summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2020-07-14 10:12:25 +0200
committerMyles Borins <mylesborins@github.com>2020-07-16 17:09:14 -0400
commit9cd523d148dcefa6dd86cb7ef6448520aad5c574 (patch)
tree6174c6535ccff03341e9536f318ae82e76db6fda
parent58dfeac1338308b47d53b185fd311a364fca557a (diff)
downloadnode-new-9cd523d148dcefa6dd86cb7ef6448520aad5c574.tar.gz
deps: update V8 to 8.4.371.19
Backport-PR-URL: https://github.com/nodejs/node/pull/34356 PR-URL: https://github.com/nodejs/node/pull/33579 Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com> Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
-rw-r--r--deps/v8/.gitignore2
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/BUILD.gn309
-rw-r--r--deps/v8/COMMON_OWNERS2
-rw-r--r--deps/v8/DEPS44
-rw-r--r--deps/v8/INTL_OWNERS1
-rw-r--r--deps/v8/WATCHLISTS7
-rw-r--r--deps/v8/build_overrides/build.gni8
-rw-r--r--deps/v8/gni/v8.gni9
-rw-r--r--deps/v8/include/DEPS1
-rw-r--r--deps/v8/include/cppgc/DEPS7
-rw-r--r--deps/v8/include/cppgc/allocation.h106
-rw-r--r--deps/v8/include/cppgc/common.h26
-rw-r--r--deps/v8/include/cppgc/custom-space.h62
-rw-r--r--deps/v8/include/cppgc/garbage-collected.h177
-rw-r--r--deps/v8/include/cppgc/heap.h36
-rw-r--r--deps/v8/include/cppgc/internal/accessors.h26
-rw-r--r--deps/v8/include/cppgc/internal/api-constants.h (renamed from deps/v8/include/cppgc/internals.h)27
-rw-r--r--deps/v8/include/cppgc/internal/compiler-specific.h26
-rw-r--r--deps/v8/include/cppgc/internal/finalizer-trait.h (renamed from deps/v8/include/cppgc/finalizer-trait.h)8
-rw-r--r--deps/v8/include/cppgc/internal/gc-info.h (renamed from deps/v8/include/cppgc/gc-info.h)10
-rw-r--r--deps/v8/include/cppgc/internal/logging.h50
-rw-r--r--deps/v8/include/cppgc/internal/persistent-node.h109
-rw-r--r--deps/v8/include/cppgc/internal/pointer-policies.h133
-rw-r--r--deps/v8/include/cppgc/internal/prefinalizer-handler.h31
-rw-r--r--deps/v8/include/cppgc/liveness-broker.h50
-rw-r--r--deps/v8/include/cppgc/macros.h26
-rw-r--r--deps/v8/include/cppgc/member.h206
-rw-r--r--deps/v8/include/cppgc/persistent.h304
-rw-r--r--deps/v8/include/cppgc/platform.h4
-rw-r--r--deps/v8/include/cppgc/prefinalizer.h54
-rw-r--r--deps/v8/include/cppgc/source-location.h59
-rw-r--r--deps/v8/include/cppgc/trace-trait.h67
-rw-r--r--deps/v8/include/cppgc/type-traits.h109
-rw-r--r--deps/v8/include/cppgc/visitor.h138
-rw-r--r--deps/v8/include/js_protocol.pdl30
-rw-r--r--deps/v8/include/libplatform/libplatform.h4
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h56
-rw-r--r--deps/v8/include/v8-fast-api-calls.h2
-rw-r--r--deps/v8/include/v8-inspector-protocol.h8
-rw-r--r--deps/v8/include/v8-inspector.h2
-rw-r--r--deps/v8/include/v8-internal.h67
-rw-r--r--deps/v8/include/v8-platform.h163
-rw-r--r--deps/v8/include/v8-profiler.h2
-rw-r--r--deps/v8/include/v8-util.h2
-rw-r--r--deps/v8/include/v8-version-string.h2
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8-wasm-trap-handler-posix.h2
-rw-r--r--deps/v8/include/v8-wasm-trap-handler-win.h2
-rw-r--r--deps/v8/include/v8.h161
-rw-r--r--deps/v8/include/v8config.h9
-rw-r--r--deps/v8/infra/mb/mb_config.pyl26
-rw-r--r--deps/v8/infra/testing/builders.pyl45
-rw-r--r--deps/v8/src/DEPS10
-rw-r--r--deps/v8/src/api/api-arguments.h8
-rw-r--r--deps/v8/src/api/api-inl.h1
-rw-r--r--deps/v8/src/api/api.cc341
-rw-r--r--deps/v8/src/api/api.h15
-rw-r--r--deps/v8/src/asmjs/asm-js.cc12
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc2
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc14
-rw-r--r--deps/v8/src/ast/ast-value-factory.h12
-rw-r--r--deps/v8/src/ast/ast.h20
-rw-r--r--deps/v8/src/ast/scopes.cc18
-rw-r--r--deps/v8/src/ast/scopes.h3
-rw-r--r--deps/v8/src/ast/variables.h5
-rw-r--r--deps/v8/src/base/address-region.h8
-rw-r--r--deps/v8/src/base/cpu.cc6
-rw-r--r--deps/v8/src/base/platform/mutex.h3
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc29
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc4
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc18
-rw-r--r--deps/v8/src/base/platform/time.cc17
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc471
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc445
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq141
-rw-r--r--deps/v8/src/builtins/array-every.tq239
-rw-r--r--deps/v8/src/builtins/array-filter.tq343
-rw-r--r--deps/v8/src/builtins/array-find.tq263
-rw-r--r--deps/v8/src/builtins/array-findindex.tq265
-rw-r--r--deps/v8/src/builtins/array-foreach.tq217
-rw-r--r--deps/v8/src/builtins/array-from.tq323
-rw-r--r--deps/v8/src/builtins/array-isarray.tq32
-rw-r--r--deps/v8/src/builtins/array-join.tq1100
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq243
-rw-r--r--deps/v8/src/builtins/array-map.tq458
-rw-r--r--deps/v8/src/builtins/array-of.tq84
-rw-r--r--deps/v8/src/builtins/array-reduce-right.tq323
-rw-r--r--deps/v8/src/builtins/array-reduce.tq323
-rw-r--r--deps/v8/src/builtins/array-reverse.tq301
-rw-r--r--deps/v8/src/builtins/array-shift.tq175
-rw-r--r--deps/v8/src/builtins/array-slice.tq379
-rw-r--r--deps/v8/src/builtins/array-some.tq239
-rw-r--r--deps/v8/src/builtins/array-splice.tq708
-rw-r--r--deps/v8/src/builtins/array-unshift.tq146
-rw-r--r--deps/v8/src/builtins/array.tq129
-rw-r--r--deps/v8/src/builtins/base.tq85
-rw-r--r--deps/v8/src/builtins/bigint.tq427
-rw-r--r--deps/v8/src/builtins/boolean.tq66
-rw-r--r--deps/v8/src/builtins/builtins-array.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc39
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h39
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc34
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc6
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc1
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h45
-rw-r--r--deps/v8/src/builtins/builtins-error.cc67
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc35
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc82
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc109
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc66
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc24
-rw-r--r--deps/v8/src/builtins/builtins-promise.h12
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc22
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-string.tq385
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc91
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc191
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.h35
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc57
-rw-r--r--deps/v8/src/builtins/cast.tq81
-rw-r--r--deps/v8/src/builtins/collections.tq87
-rw-r--r--deps/v8/src/builtins/console.tq20
-rw-r--r--deps/v8/src/builtins/convert.tq7
-rw-r--r--deps/v8/src/builtins/data-view.tq1489
-rw-r--r--deps/v8/src/builtins/finalization-registry.tq105
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc4
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.h1
-rw-r--r--deps/v8/src/builtins/growable-fixed-array.tq76
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc434
-rw-r--r--deps/v8/src/builtins/ic-callable.tq183
-rw-r--r--deps/v8/src/builtins/ic.tq59
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq57
-rw-r--r--deps/v8/src/builtins/iterator.tq177
-rw-r--r--deps/v8/src/builtins/math.tq764
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc34
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc35
-rw-r--r--deps/v8/src/builtins/number.tq108
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq102
-rw-r--r--deps/v8/src/builtins/object.tq336
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc492
-rw-r--r--deps/v8/src/builtins/promise-abstract-operations.tq928
-rw-r--r--deps/v8/src/builtins/promise-all-element-closure.tq326
-rw-r--r--deps/v8/src/builtins/promise-all.tq659
-rw-r--r--deps/v8/src/builtins/promise-any.tq372
-rw-r--r--deps/v8/src/builtins/promise-constructor.tq162
-rw-r--r--deps/v8/src/builtins/promise-finally.tq348
-rw-r--r--deps/v8/src/builtins/promise-jobs.tq120
-rw-r--r--deps/v8/src/builtins/promise-misc.tq431
-rw-r--r--deps/v8/src/builtins/promise-race.tq212
-rw-r--r--deps/v8/src/builtins/promise-reaction-job.tq180
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq316
-rw-r--r--deps/v8/src/builtins/promise-then.tq117
-rw-r--r--deps/v8/src/builtins/proxy-constructor.tq81
-rw-r--r--deps/v8/src/builtins/proxy-delete-property.tq104
-rw-r--r--deps/v8/src/builtins/proxy-get-property.tq97
-rw-r--r--deps/v8/src/builtins/proxy-get-prototype-of.tq101
-rw-r--r--deps/v8/src/builtins/proxy-has-property.tq90
-rw-r--r--deps/v8/src/builtins/proxy-is-extensible.tq90
-rw-r--r--deps/v8/src/builtins/proxy-prevent-extensions.tq92
-rw-r--r--deps/v8/src/builtins/proxy-revocable.tq73
-rw-r--r--deps/v8/src/builtins/proxy-revoke.tq44
-rw-r--r--deps/v8/src/builtins/proxy-set-property.tq136
-rw-r--r--deps/v8/src/builtins/proxy-set-prototype-of.tq112
-rw-r--r--deps/v8/src/builtins/proxy.tq32
-rw-r--r--deps/v8/src/builtins/reflect.tq154
-rw-r--r--deps/v8/src/builtins/regexp-exec.tq71
-rw-r--r--deps/v8/src/builtins/regexp-match-all.tq410
-rw-r--r--deps/v8/src/builtins/regexp-match.tq267
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq459
-rw-r--r--deps/v8/src/builtins/regexp-search.tq168
-rw-r--r--deps/v8/src/builtins/regexp-source.tq30
-rw-r--r--deps/v8/src/builtins/regexp-split.tq102
-rw-r--r--deps/v8/src/builtins/regexp-test.tq45
-rw-r--r--deps/v8/src/builtins/regexp.tq716
-rw-r--r--deps/v8/src/builtins/string-endswith.tq119
-rw-r--r--deps/v8/src/builtins/string-html.tq215
-rw-r--r--deps/v8/src/builtins/string-iterator.tq72
-rw-r--r--deps/v8/src/builtins/string-pad.tq172
-rw-r--r--deps/v8/src/builtins/string-repeat.tq106
-rw-r--r--deps/v8/src/builtins/string-replaceall.tq374
-rw-r--r--deps/v8/src/builtins/string-slice.tq47
-rw-r--r--deps/v8/src/builtins/string-startswith.tq103
-rw-r--r--deps/v8/src/builtins/string-substr.tq61
-rw-r--r--deps/v8/src/builtins/string-substring.tq40
-rw-r--r--deps/v8/src/builtins/symbol.tq74
-rw-r--r--deps/v8/src/builtins/torque-internal.tq309
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq767
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq81
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq130
-rw-r--r--deps/v8/src/builtins/typed-array-find.tq81
-rw-r--r--deps/v8/src/builtins/typed-array-findindex.tq86
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq79
-rw-r--r--deps/v8/src/builtins/typed-array-from.tq329
-rw-r--r--deps/v8/src/builtins/typed-array-of.tq90
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq98
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq104
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq573
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq177
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq81
-rw-r--r--deps/v8/src/builtins/typed-array-sort.tq227
-rw-r--r--deps/v8/src/builtins/typed-array-subarray.tq112
-rw-r--r--deps/v8/src/builtins/typed-array.tq464
-rw-r--r--deps/v8/src/builtins/wasm.tq273
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc53
-rw-r--r--deps/v8/src/codegen/arm/constants-arm.h17
-rw-r--r--deps/v8/src/codegen/arm/cpu-arm.cc13
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc24
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc33
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h18
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h2
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h1
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc4
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.cc13
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h5
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc24
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h2
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc66
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h13
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h9
-rw-r--r--deps/v8/src/codegen/assembler.cc1
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc409
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h225
-rw-r--r--deps/v8/src/codegen/compiler.cc1142
-rw-r--r--deps/v8/src/codegen/compiler.h116
-rw-r--r--deps/v8/src/codegen/external-reference-encoder.cc (renamed from deps/v8/src/snapshot/serializer-common.cc)81
-rw-r--r--deps/v8/src/codegen/external-reference-encoder.h60
-rw-r--r--deps/v8/src/codegen/external-reference.cc4
-rw-r--r--deps/v8/src/codegen/external-reference.h2
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc43
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h11
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc40
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc27
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h15
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h2
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc28
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h122
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc24
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc2
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h2
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc76
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc24
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc6
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h32
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc4
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h7
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h83
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc28
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h43
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h33
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc24
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc275
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h129
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h2
-rw-r--r--deps/v8/src/codegen/reloc-info.cc3
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc24
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc2
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h2
-rw-r--r--deps/v8/src/codegen/tnode.h6
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc2
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.cc11
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.h37
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc18
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h3
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc35
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc11
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h10
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h2
-rw-r--r--deps/v8/src/common/external-pointer-inl.h32
-rw-r--r--deps/v8/src/common/external-pointer.h33
-rw-r--r--deps/v8/src/common/globals.h24
-rw-r--r--deps/v8/src/common/message-template.h12
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h10
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc3
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h1
-rw-r--r--deps/v8/src/compiler/OWNERS3
-rw-r--r--deps/v8/src/compiler/access-builder.cc16
-rw-r--r--deps/v8/src/compiler/access-info.cc10
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc29
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc31
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc58
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h6
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc6
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc56
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc8
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h17
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc106
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h8
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc8
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc47
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc36
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc4
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc292
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h3
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc27
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc206
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h22
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc20
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc98
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc65
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h21
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc7
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc4
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc292
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h9
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc9
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc58
-rw-r--r--deps/v8/src/compiler/code-assembler.cc18
-rw-r--r--deps/v8/src/compiler/code-assembler.h11
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc3
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc9
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc74
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc96
-rw-r--r--deps/v8/src/compiler/graph-assembler.h14
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc11
-rw-r--r--deps/v8/src/compiler/graph-reducer.h4
-rw-r--r--deps/v8/src/compiler/heap-refs.h86
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc67
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc244
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h9
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc77
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc22
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h4
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc81
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc7
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc72
-rw-r--r--deps/v8/src/compiler/linkage.cc35
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc350
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h17
-rw-r--r--deps/v8/src/compiler/machine-operator.cc1053
-rw-r--r--deps/v8/src/compiler/machine-operator.h54
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc39
-rw-r--r--deps/v8/src/compiler/memory-lowering.h7
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc18
-rw-r--r--deps/v8/src/compiler/node-matchers.h3
-rw-r--r--deps/v8/src/compiler/node-properties.cc15
-rw-r--r--deps/v8/src/compiler/node-properties.h5
-rw-r--r--deps/v8/src/compiler/node.cc32
-rw-r--r--deps/v8/src/compiler/node.h5
-rw-r--r--deps/v8/src/compiler/opcodes.h6
-rw-r--r--deps/v8/src/compiler/operation-typer.cc39
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/operator.h2
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc17
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h6
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc12
-rw-r--r--deps/v8/src/compiler/representation-change.cc16
-rw-r--r--deps/v8/src/compiler/schedule.cc24
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc20
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc44
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc1946
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc94
-rw-r--r--deps/v8/src/compiler/simplified-operator.h27
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc48
-rw-r--r--deps/v8/src/compiler/typed-optimization.h2
-rw-r--r--deps/v8/src/compiler/types.cc9
-rw-r--r--deps/v8/src/compiler/types.h8
-rw-r--r--deps/v8/src/compiler/verifier.cc2
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc991
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h46
-rw-r--r--deps/v8/src/d8/d8-platforms.cc14
-rw-r--r--deps/v8/src/d8/d8-platforms.h8
-rw-r--r--deps/v8/src/d8/d8.cc141
-rw-r--r--deps/v8/src/d8/d8.h4
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc8
-rw-r--r--deps/v8/src/debug/debug-frames.cc9
-rw-r--r--deps/v8/src/debug/debug-frames.h3
-rw-r--r--deps/v8/src/debug/debug-interface.h32
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc41
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.h6
-rw-r--r--deps/v8/src/debug/debug-scopes.cc104
-rw-r--r--deps/v8/src/debug/debug-scopes.h7
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc33
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.h2
-rw-r--r--deps/v8/src/debug/debug.cc68
-rw-r--r--deps/v8/src/debug/debug.h5
-rw-r--r--deps/v8/src/debug/liveedit.cc17
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc3
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/OWNERS3
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.cc104
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.h72
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc2
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h4
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.cc400
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.h166
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/packet.cc364
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/packet.h105
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/session.cc108
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/session.h32
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/target.cc636
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/target.h87
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/transport.cc101
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/transport.h102
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/util.h27
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc388
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h105
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc303
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h32
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc23
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc21
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc14
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc17
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc53
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc169
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc43
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc9
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc4
-rw-r--r--deps/v8/src/execution/frame-constants.h4
-rw-r--r--deps/v8/src/execution/frames-inl.h8
-rw-r--r--deps/v8/src/execution/frames.cc237
-rw-r--r--deps/v8/src/execution/frames.h127
-rw-r--r--deps/v8/src/execution/futex-emulation.cc14
-rw-r--r--deps/v8/src/execution/isolate.cc128
-rw-r--r--deps/v8/src/execution/isolate.h33
-rw-r--r--deps/v8/src/execution/messages.cc95
-rw-r--r--deps/v8/src/execution/messages.h12
-rw-r--r--deps/v8/src/execution/off-thread-isolate-inl.h22
-rw-r--r--deps/v8/src/execution/off-thread-isolate.cc129
-rw-r--r--deps/v8/src/execution/off-thread-isolate.h95
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h4
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc4
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.h2
-rw-r--r--deps/v8/src/execution/protectors-inl.h9
-rw-r--r--deps/v8/src/execution/protectors.cc33
-rw-r--r--deps/v8/src/execution/protectors.h15
-rw-r--r--deps/v8/src/extensions/gc-extension.cc13
-rw-r--r--deps/v8/src/flags/flag-definitions.h112
-rw-r--r--deps/v8/src/handles/global-handles.cc3
-rw-r--r--deps/v8/src/handles/handles.h2
-rw-r--r--deps/v8/src/handles/persistent-handles.cc122
-rw-r--r--deps/v8/src/handles/persistent-handles.h88
-rw-r--r--deps/v8/src/heap/OWNERS1
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc1
-rw-r--r--deps/v8/src/heap/code-stats.cc1
-rw-r--r--deps/v8/src/heap/concurrent-allocator-inl.h86
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc43
-rw-r--r--deps/v8/src/heap/concurrent-allocator.h57
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc1
-rw-r--r--deps/v8/src/heap/concurrent-marking.h1
-rw-r--r--deps/v8/src/heap/cppgc/allocation.cc11
-rw-r--r--deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc39
-rw-r--r--deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc52
-rw-r--r--deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S32
-rw-r--r--deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc53
-rw-r--r--deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S48
-rw-r--r--deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc94
-rw-r--r--deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc35
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers.S52
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc94
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers_masm.S (renamed from deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S)0
-rw-r--r--deps/v8/src/heap/cppgc/free-list.cc190
-rw-r--r--deps/v8/src/heap/cppgc/free-list.h62
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.cc2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc2
-rw-r--r--deps/v8/src/heap/cppgc/globals.h9
-rw-r--r--deps/v8/src/heap/cppgc/heap-inl.h35
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header-inl.h15
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.cc4
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h14
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc201
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h181
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.cc58
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h127
-rw-r--r--deps/v8/src/heap/cppgc/heap-visitor.h88
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc122
-rw-r--r--deps/v8/src/heap/cppgc/heap.h123
-rw-r--r--deps/v8/src/heap/cppgc/liveness-broker.cc15
-rw-r--r--deps/v8/src/heap/cppgc/logging.cc29
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc152
-rw-r--r--deps/v8/src/heap/cppgc/marker.h121
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc143
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h70
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator-inl.h74
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc87
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h40
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap-inl.h95
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h80
-rw-r--r--deps/v8/src/heap/cppgc/page-memory-inl.h57
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.cc211
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.h237
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc60
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc35
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc66
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h44
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.cc32
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.h106
-rw-r--r--deps/v8/src/heap/cppgc/sanitizers.h39
-rw-r--r--deps/v8/src/heap/cppgc/source-location.cc16
-rw-r--r--deps/v8/src/heap/cppgc/stack.cc49
-rw-r--r--deps/v8/src/heap/cppgc/stack.h12
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc213
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h38
-rw-r--r--deps/v8/src/heap/cppgc/visitor.h23
-rw-r--r--deps/v8/src/heap/cppgc/worklist.h473
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc12
-rw-r--r--deps/v8/src/heap/embedder-tracing.h5
-rw-r--r--deps/v8/src/heap/factory-base.cc5
-rw-r--r--deps/v8/src/heap/factory.cc129
-rw-r--r--deps/v8/src/heap/factory.h45
-rw-r--r--deps/v8/src/heap/finalization-registry-cleanup-task.cc14
-rw-r--r--deps/v8/src/heap/gc-tracer.cc28
-rw-r--r--deps/v8/src/heap/gc-tracer.h11
-rw-r--r--deps/v8/src/heap/heap-inl.h4
-rw-r--r--deps/v8/src/heap/heap.cc896
-rw-r--r--deps/v8/src/heap/heap.h117
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc17
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h9
-rw-r--r--deps/v8/src/heap/incremental-marking.cc136
-rw-r--r--deps/v8/src/heap/incremental-marking.h9
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc2
-rw-r--r--deps/v8/src/heap/large-spaces.cc547
-rw-r--r--deps/v8/src/heap/large-spaces.h232
-rw-r--r--deps/v8/src/heap/list.h (renamed from deps/v8/src/base/list.h)24
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h42
-rw-r--r--deps/v8/src/heap/local-allocator.h15
-rw-r--r--deps/v8/src/heap/local-heap.cc46
-rw-r--r--deps/v8/src/heap/local-heap.h28
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h5
-rw-r--r--deps/v8/src/heap/mark-compact.cc68
-rw-r--r--deps/v8/src/heap/mark-compact.h1
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h37
-rw-r--r--deps/v8/src/heap/marking-visitor.h1
-rw-r--r--deps/v8/src/heap/marking.cc16
-rw-r--r--deps/v8/src/heap/marking.h16
-rw-r--r--deps/v8/src/heap/memory-chunk-inl.h50
-rw-r--r--deps/v8/src/heap/memory-chunk.cc157
-rw-r--r--deps/v8/src/heap/memory-chunk.h471
-rw-r--r--deps/v8/src/heap/object-stats.cc5
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h16
-rw-r--r--deps/v8/src/heap/objects-visiting.h12
-rw-r--r--deps/v8/src/heap/off-thread-factory.cc192
-rw-r--r--deps/v8/src/heap/off-thread-factory.h17
-rw-r--r--deps/v8/src/heap/off-thread-heap.cc241
-rw-r--r--deps/v8/src/heap/off-thread-heap.h52
-rw-r--r--deps/v8/src/heap/read-only-heap.cc138
-rw-r--r--deps/v8/src/heap/read-only-heap.h42
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc172
-rw-r--r--deps/v8/src/heap/read-only-spaces.h125
-rw-r--r--deps/v8/src/heap/remembered-set-inl.h (renamed from deps/v8/src/heap/remembered-set.h)56
-rw-r--r--deps/v8/src/heap/safepoint.cc41
-rw-r--r--deps/v8/src/heap/safepoint.h26
-rw-r--r--deps/v8/src/heap/scavenge-job.cc5
-rw-r--r--deps/v8/src/heap/scavenger-inl.h4
-rw-r--r--deps/v8/src/heap/scavenger.cc55
-rw-r--r--deps/v8/src/heap/scavenger.h7
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc172
-rw-r--r--deps/v8/src/heap/slot-set.h32
-rw-r--r--deps/v8/src/heap/spaces-inl.h55
-rw-r--r--deps/v8/src/heap/spaces.cc948
-rw-r--r--deps/v8/src/heap/spaces.h784
-rw-r--r--deps/v8/src/heap/sweeper.cc221
-rw-r--r--deps/v8/src/heap/sweeper.h35
-rw-r--r--deps/v8/src/heap/third-party/heap-api-stub.cc53
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc7
-rw-r--r--deps/v8/src/ic/ic.cc46
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc30
-rw-r--r--deps/v8/src/init/bootstrapper.cc547
-rw-r--r--deps/v8/src/init/bootstrapper.h14
-rw-r--r--deps/v8/src/init/heap-symbols.h4
-rw-r--r--deps/v8/src/init/v8.cc3
-rw-r--r--deps/v8/src/inspector/BUILD.gn29
-rw-r--r--deps/v8/src/inspector/search-util.cc4
-rw-r--r--deps/v8/src/inspector/string-16.h2
-rw-r--r--deps/v8/src/inspector/string-util.cc86
-rw-r--r--deps/v8/src/inspector/string-util.h11
-rw-r--r--deps/v8/src/inspector/v8-console.cc36
-rw-r--r--deps/v8/src/inspector/v8-console.h2
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc94
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h5
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc15
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h3
-rw-r--r--deps/v8/src/inspector/value-mirror.cc4
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc85
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc168
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc14
-rw-r--r--deps/v8/src/interpreter/interpreter.cc17
-rw-r--r--deps/v8/src/libplatform/default-job.cc150
-rw-r--r--deps/v8/src/libplatform/default-job.h126
-rw-r--r--deps/v8/src/libplatform/default-platform.cc59
-rw-r--r--deps/v8/src/libplatform/default-platform.h14
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.cc168
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.h46
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc201
-rw-r--r--deps/v8/src/logging/counters-definitions.h8
-rw-r--r--deps/v8/src/logging/counters.h4
-rw-r--r--deps/v8/src/logging/log.cc9
-rw-r--r--deps/v8/src/logging/log.h2
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h3
-rw-r--r--deps/v8/src/objects/allocation-site.h12
-rw-r--r--deps/v8/src/objects/allocation-site.tq2
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h23
-rw-r--r--deps/v8/src/objects/api-callbacks.h35
-rw-r--r--deps/v8/src/objects/api-callbacks.tq18
-rw-r--r--deps/v8/src/objects/arguments.h3
-rw-r--r--deps/v8/src/objects/arguments.tq293
-rw-r--r--deps/v8/src/objects/backing-store.cc56
-rw-r--r--deps/v8/src/objects/backing-store.h12
-rw-r--r--deps/v8/src/objects/cell.h3
-rw-r--r--deps/v8/src/objects/cell.tq1
-rw-r--r--deps/v8/src/objects/class-definitions-tq-deps-inl.h44
-rw-r--r--deps/v8/src/objects/code-inl.h29
-rw-r--r--deps/v8/src/objects/code.cc8
-rw-r--r--deps/v8/src/objects/code.h42
-rw-r--r--deps/v8/src/objects/contexts-inl.h14
-rw-r--r--deps/v8/src/objects/contexts.h54
-rw-r--r--deps/v8/src/objects/contexts.tq19
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h3
-rw-r--r--deps/v8/src/objects/debug-objects.h11
-rw-r--r--deps/v8/src/objects/debug-objects.tq13
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h5
-rw-r--r--deps/v8/src/objects/descriptor-array.h11
-rw-r--r--deps/v8/src/objects/descriptor-array.tq1
-rw-r--r--deps/v8/src/objects/elements-kind.h8
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h22
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h12
-rw-r--r--deps/v8/src/objects/feedback-vector.cc20
-rw-r--r--deps/v8/src/objects/feedback-vector.h2
-rw-r--r--deps/v8/src/objects/feedback-vector.tq4
-rw-r--r--deps/v8/src/objects/field-type.cc9
-rw-r--r--deps/v8/src/objects/field-type.h1
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h93
-rw-r--r--deps/v8/src/objects/fixed-array.h121
-rw-r--r--deps/v8/src/objects/fixed-array.tq36
-rw-r--r--deps/v8/src/objects/foreign-inl.h18
-rw-r--r--deps/v8/src/objects/foreign.h20
-rw-r--r--deps/v8/src/objects/foreign.tq3
-rw-r--r--deps/v8/src/objects/frame-array-inl.h14
-rw-r--r--deps/v8/src/objects/frame-array.h17
-rw-r--r--deps/v8/src/objects/free-space-inl.h3
-rw-r--r--deps/v8/src/objects/free-space.h13
-rw-r--r--deps/v8/src/objects/free-space.tq1
-rw-r--r--deps/v8/src/objects/function-kind.h4
-rw-r--r--deps/v8/src/objects/heap-number-inl.h10
-rw-r--r--deps/v8/src/objects/heap-number.h13
-rw-r--r--deps/v8/src/objects/heap-number.tq5
-rw-r--r--deps/v8/src/objects/heap-object.h2
-rw-r--r--deps/v8/src/objects/intl-objects.cc17
-rw-r--r--deps/v8/src/objects/intl-objects.h33
-rw-r--r--deps/v8/src/objects/intl-objects.tq11
-rw-r--r--deps/v8/src/objects/js-aggregate-error-inl.h25
-rw-r--r--deps/v8/src/objects/js-aggregate-error.h27
-rw-r--r--deps/v8/src/objects/js-aggregate-error.tq81
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h104
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc13
-rw-r--r--deps/v8/src/objects/js-array-buffer.h119
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq16
-rw-r--r--deps/v8/src/objects/js-array.tq3
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h10
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc4
-rw-r--r--deps/v8/src/objects/js-break-iterator.h20
-rw-r--r--deps/v8/src/objects/js-collator-inl.h5
-rw-r--r--deps/v8/src/objects/js-collator.cc25
-rw-r--r--deps/v8/src/objects/js-collator.h11
-rw-r--r--deps/v8/src/objects/js-collection.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h7
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc214
-rw-r--r--deps/v8/src/objects/js-date-time-format.h14
-rw-r--r--deps/v8/src/objects/js-display-names-inl.h7
-rw-r--r--deps/v8/src/objects/js-display-names.cc29
-rw-r--r--deps/v8/src/objects/js-display-names.h15
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h6
-rw-r--r--deps/v8/src/objects/js-list-format.cc3
-rw-r--r--deps/v8/src/objects/js-list-format.h16
-rw-r--r--deps/v8/src/objects/js-locale-inl.h4
-rw-r--r--deps/v8/src/objects/js-locale.cc61
-rw-r--r--deps/v8/src/objects/js-locale.h18
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h6
-rw-r--r--deps/v8/src/objects/js-number-format.cc45
-rw-r--r--deps/v8/src/objects/js-number-format.h14
-rw-r--r--deps/v8/src/objects/js-objects-inl.h6
-rw-r--r--deps/v8/src/objects/js-objects.cc57
-rw-r--r--deps/v8/src/objects/js-objects.h14
-rw-r--r--deps/v8/src/objects/js-objects.tq3
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h6
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc1
-rw-r--r--deps/v8/src/objects/js-plural-rules.h13
-rw-r--r--deps/v8/src/objects/js-promise-inl.h4
-rw-r--r--deps/v8/src/objects/js-promise.h12
-rw-r--r--deps/v8/src/objects/js-promise.tq26
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h7
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h6
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h5
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.tq8
-rw-r--r--deps/v8/src/objects/js-regexp.h11
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h7
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h20
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h6
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h15
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h6
-rw-r--r--deps/v8/src/objects/js-segmenter.cc4
-rw-r--r--deps/v8/src/objects/js-segmenter.h17
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h72
-rw-r--r--deps/v8/src/objects/js-weak-refs.h40
-rw-r--r--deps/v8/src/objects/js-weak-refs.tq17
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h1
-rw-r--r--deps/v8/src/objects/lookup.cc52
-rw-r--r--deps/v8/src/objects/managed.h5
-rw-r--r--deps/v8/src/objects/map-inl.h3
-rw-r--r--deps/v8/src/objects/map.cc69
-rw-r--r--deps/v8/src/objects/map.h38
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h31
-rw-r--r--deps/v8/src/objects/maybe-object.h5
-rw-r--r--deps/v8/src/objects/object-list-macros.h8
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h99
-rw-r--r--deps/v8/src/objects/objects-body-descriptors.h89
-rw-r--r--deps/v8/src/objects/objects-definitions.h12
-rw-r--r--deps/v8/src/objects/objects.cc212
-rw-r--r--deps/v8/src/objects/objects.h1
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc21
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h7
-rw-r--r--deps/v8/src/objects/promise.tq2
-rw-r--r--deps/v8/src/objects/script.h6
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h2
-rw-r--r--deps/v8/src/objects/source-text-module.cc10
-rw-r--r--deps/v8/src/objects/source-text-module.h5
-rw-r--r--deps/v8/src/objects/string-inl.h105
-rw-r--r--deps/v8/src/objects/string-table.h2
-rw-r--r--deps/v8/src/objects/string.cc12
-rw-r--r--deps/v8/src/objects/string.h46
-rw-r--r--deps/v8/src/objects/string.tq11
-rw-r--r--deps/v8/src/objects/visitors.h46
-rw-r--r--deps/v8/src/parsing/expression-scope.h37
-rw-r--r--deps/v8/src/parsing/parse-info.cc360
-rw-r--r--deps/v8/src/parsing/parse-info.h434
-rw-r--r--deps/v8/src/parsing/parser-base.h127
-rw-r--r--deps/v8/src/parsing/parser.cc192
-rw-r--r--deps/v8/src/parsing/parser.h35
-rw-r--r--deps/v8/src/parsing/parsing.cc72
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc111
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h54
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h9
-rw-r--r--deps/v8/src/parsing/preparse-data.cc14
-rw-r--r--deps/v8/src/parsing/preparse-data.h7
-rw-r--r--deps/v8/src/parsing/preparser.cc9
-rw-r--r--deps/v8/src/parsing/preparser.h11
-rw-r--r--deps/v8/src/parsing/rewriter.cc19
-rw-r--r--deps/v8/src/parsing/scanner-inl.h16
-rw-r--r--deps/v8/src/parsing/scanner.cc15
-rw-r--r--deps/v8/src/parsing/scanner.h23
-rw-r--r--deps/v8/src/parsing/token.h10
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc13
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h43
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc19
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc6
-rw-r--r--deps/v8/src/profiler/profile-generator.cc1
-rw-r--r--deps/v8/src/profiler/profile-generator.h1
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc8
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc1
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc19
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h9
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc22
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h9
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc19
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h9
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc19
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h9
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc19
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h9
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc56
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h36
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h105
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc2
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc68
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h18
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc134
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h26
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc18
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h20
-rw-r--r--deps/v8/src/regexp/regexp-nodes.h9
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc5
-rw-r--r--deps/v8/src/regexp/regexp.cc160
-rw-r--r--deps/v8/src/regexp/regexp.h2
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc20
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h8
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc19
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h9
-rw-r--r--deps/v8/src/roots/roots-inl.h32
-rw-r--r--deps/v8/src/roots/roots.cc21
-rw-r--r--deps/v8/src/roots/roots.h85
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc25
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc1
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc34
-rw-r--r--deps/v8/src/runtime/runtime-module.cc2
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc4
-rw-r--r--deps/v8/src/runtime/runtime-object.cc13
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc57
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc18
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc35
-rw-r--r--deps/v8/src/runtime/runtime-test.cc266
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc238
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc30
-rw-r--r--deps/v8/src/runtime/runtime.cc1
-rw-r--r--deps/v8/src/runtime/runtime.h66
-rw-r--r--deps/v8/src/snapshot/DEPS2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc13
-rw-r--r--deps/v8/src/snapshot/code-serializer.h6
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc (renamed from deps/v8/src/snapshot/partial-deserializer.cc)25
-rw-r--r--deps/v8/src/snapshot/context-deserializer.h (renamed from deps/v8/src/snapshot/partial-deserializer.h)12
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc (renamed from deps/v8/src/snapshot/partial-serializer.cc)134
-rw-r--r--deps/v8/src/snapshot/context-serializer.h (renamed from deps/v8/src/snapshot/partial-serializer.h)22
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc4
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h3
-rw-r--r--deps/v8/src/snapshot/deserializer.cc112
-rw-r--r--deps/v8/src/snapshot/deserializer.h10
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc6
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc10
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h3
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc43
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h2
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc16
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h2
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc8
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc4
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc8
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h2
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc5
-rw-r--r--deps/v8/src/snapshot/roots-serializer.h3
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.h3
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.cc61
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h (renamed from deps/v8/src/snapshot/serializer-common.h)206
-rw-r--r--deps/v8/src/snapshot/serializer.cc174
-rw-r--r--deps/v8/src/snapshot/serializer.h33
-rw-r--r--deps/v8/src/snapshot/snapshot-compression.cc2
-rw-r--r--deps/v8/src/snapshot/snapshot-compression.h4
-rw-r--r--deps/v8/src/snapshot/snapshot-data.cc80
-rw-r--r--deps/v8/src/snapshot/snapshot-data.h129
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h20
-rw-r--r--deps/v8/src/snapshot/snapshot-utils.cc25
-rw-r--r--deps/v8/src/snapshot/snapshot-utils.h18
-rw-r--r--deps/v8/src/snapshot/snapshot.cc (renamed from deps/v8/src/snapshot/snapshot-common.cc)401
-rw-r--r--deps/v8/src/snapshot/snapshot.h197
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc16
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc94
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h14
-rw-r--r--deps/v8/src/strings/uri.cc2
-rw-r--r--deps/v8/src/torque/ast.h26
-rw-r--r--deps/v8/src/torque/class-debug-reader-generator.cc2
-rw-r--r--deps/v8/src/torque/constants.h3
-rw-r--r--deps/v8/src/torque/csa-generator.cc159
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc11
-rw-r--r--deps/v8/src/torque/earley-parser.h5
-rw-r--r--deps/v8/src/torque/global-context.cc7
-rw-r--r--deps/v8/src/torque/global-context.h13
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc707
-rw-r--r--deps/v8/src/torque/implementation-visitor.h30
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc75
-rw-r--r--deps/v8/src/torque/instructions.h16
-rw-r--r--deps/v8/src/torque/torque-compiler.cc2
-rw-r--r--deps/v8/src/torque/torque-parser.cc100
-rw-r--r--deps/v8/src/torque/type-oracle.h8
-rw-r--r--deps/v8/src/torque/type-visitor.cc15
-rw-r--r--deps/v8/src/torque/type-visitor.h4
-rw-r--r--deps/v8/src/torque/types.cc188
-rw-r--r--deps/v8/src/torque/types.h70
-rw-r--r--deps/v8/src/torque/utils.h2
-rw-r--r--deps/v8/src/tracing/DEPS4
-rw-r--r--deps/v8/src/tracing/trace-categories.cc9
-rw-r--r--deps/v8/src/tracing/trace-categories.h58
-rw-r--r--deps/v8/src/tracing/trace-event.cc2
-rw-r--r--deps/v8/src/tracing/trace-event.h47
-rw-r--r--deps/v8/src/tracing/traced-value.cc15
-rw-r--r--deps/v8/src/tracing/traced-value.h13
-rw-r--r--deps/v8/src/utils/ostreams.h4
-rw-r--r--deps/v8/src/utils/pointer-with-payload.h2
-rw-r--r--deps/v8/src/utils/vector.h39
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h1559
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h637
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h1401
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc213
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h426
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc1268
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h5
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h20
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h869
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h1077
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h554
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h554
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h1479
-rw-r--r--deps/v8/src/wasm/c-api.cc4
-rw-r--r--deps/v8/src/wasm/compilation-environment.h13
-rw-r--r--deps/v8/src/wasm/decoder.h35
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h750
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc3
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h4
-rw-r--r--deps/v8/src/wasm/function-compiler.cc35
-rw-r--r--deps/v8/src/wasm/function-compiler.h7
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc84
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc13
-rw-r--r--deps/v8/src/wasm/module-compiler.cc321
-rw-r--r--deps/v8/src/wasm/module-compiler.h9
-rw-r--r--deps/v8/src/wasm/module-decoder.cc204
-rw-r--r--deps/v8/src/wasm/module-decoder.h8
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc66
-rw-r--r--deps/v8/src/wasm/struct-types.h116
-rw-r--r--deps/v8/src/wasm/value-type.h133
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc346
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h170
-rw-r--r--deps/v8/src/wasm/wasm-constants.h16
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.cc190
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.h2
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc935
-rw-r--r--deps/v8/src/wasm/wasm-debug.h19
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc258
-rw-r--r--deps/v8/src/wasm/wasm-engine.h21
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h8
-rw-r--r--deps/v8/src/wasm/wasm-features.cc3
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc263
-rw-r--r--deps/v8/src/wasm/wasm-js.cc31
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc121
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h41
-rw-r--r--deps/v8/src/wasm/wasm-module.cc37
-rw-r--r--deps/v8/src/wasm/wasm-module.h103
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h51
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc129
-rw-r--r--deps/v8/src/wasm/wasm-objects.h119
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq12
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc45
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h464
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc12
-rw-r--r--deps/v8/src/wasm/wasm-tier.h6
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status5
-rw-r--r--deps/v8/test/cctest/BUILD.gn25
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.h6
-rw-r--r--deps/v8/test/cctest/cctest.status32
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc28
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h24
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc100
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc81
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden87
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden136
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden120
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden240
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden31
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden84
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc36
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc33
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc451
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc23
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc6
-rw-r--r--deps/v8/test/cctest/test-api-icu.cc57
-rw-r--r--deps/v8/test/cctest/test-api-wasm.cc34
-rw-r--r--deps/v8/test/cctest/test-api.cc150
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc299
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc25
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc49
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc1
-rw-r--r--deps/v8/test/cctest/test-debug.cc22
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc7
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc21
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc149
-rw-r--r--deps/v8/test/cctest/test-flags.cc1
-rw-r--r--deps/v8/test/cctest/test-inspector.cc109
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc139
-rw-r--r--deps/v8/test/cctest/test-log.cc6
-rw-r--r--deps/v8/test/cctest/test-parsing.cc269
-rw-r--r--deps/v8/test/cctest/test-persistent-handles.cc114
-rw-r--r--deps/v8/test/cctest/test-regexp.cc49
-rw-r--r--deps/v8/test/cctest/test-roots.cc2
-rw-r--r--deps/v8/test/cctest/test-serialize.cc219
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc44
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc311
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc37
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc31
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc58
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc18
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc84
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc35
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc47
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc219
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc46
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc111
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc260
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc18
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc4
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc65
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h55
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h148
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc3
-rw-r--r--deps/v8/test/debugger/debug/debug-liveedit-patch-positions-replace.js2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/promise-any-caught.js39
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/promise-any-uncaught.js67
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-10319.js46
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js5
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js13
-rw-r--r--deps/v8/test/debugger/debug/wasm/debug-step-into-wasm.js8
-rw-r--r--deps/v8/test/debugger/debugger.status29
-rw-r--r--deps/v8/test/debugging/debugging.status9
-rw-r--r--deps/v8/test/debugging/testcfg.py3
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/OWNERS3
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/breakpoints.py58
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/connect.py34
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/float.py71
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py137
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/memory.py96
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/status.py109
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/stepping.py56
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/__init__.py0
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test.js33
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.js31
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.py23
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.js31
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.py21
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js48
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.py43
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.js31
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.py28
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/trap.py37
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status5
-rw-r--r--deps/v8/test/fuzzer/parser.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc143
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc11
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt20
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js9
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt14
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js2
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-anyref-global.js11
-rw-r--r--deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart.js63
-rw-r--r--deps/v8/test/inspector/debugger/wasm-debug-evaluate-expected.txt5
-rw-r--r--deps/v8/test/inspector/debugger/wasm-debug-evaluate.js155
-rw-r--r--deps/v8/test/inspector/debugger/wasm-global-names.js12
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt160
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js20
-rw-r--r--deps/v8/test/inspector/debugger/wasm-memory-names-expected.txt19
-rw-r--r--deps/v8/test/inspector/debugger/wasm-memory-names.js117
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt105
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts-expected.txt80
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js101
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt111
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt56
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-after-trap.js102
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt84
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt87
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js3
-rw-r--r--deps/v8/test/inspector/inspector-test.cc1
-rw-r--r--deps/v8/test/inspector/inspector.status22
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/query-objects-expected.txt3
-rw-r--r--deps/v8/test/inspector/runtime/query-objects.js28
-rw-r--r--deps/v8/test/inspector/runtime/regress-1075763-expected.txt26
-rw-r--r--deps/v8/test/inspector/runtime/regress-1075763.js14
-rw-r--r--deps/v8/test/inspector/runtime/regress-1078205-expected.txt157
-rw-r--r--deps/v8/test/inspector/runtime/regress-1078205.js34
-rw-r--r--deps/v8/test/inspector/runtime/regress-986051-expected.txt76
-rw-r--r--deps/v8/test/inspector/runtime/regress-986051.js25
-rw-r--r--deps/v8/test/inspector/runtime/remote-object-expected.txt2
-rw-r--r--deps/v8/test/intl/date-format/check-calendar.js2
-rw-r--r--deps/v8/test/intl/date-format/check-numbering-system.js2
-rw-r--r--deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js1
-rw-r--r--deps/v8/test/intl/date-format/property-override.js1
-rw-r--r--deps/v8/test/intl/date-format/related-year.js1
-rw-r--r--deps/v8/test/intl/displaynames/constructor-order.js2
-rw-r--r--deps/v8/test/intl/displaynames/constructor.js1
-rw-r--r--deps/v8/test/intl/displaynames/resolved-options.js2
-rw-r--r--deps/v8/test/intl/displaynames/supported-locale.js2
-rw-r--r--deps/v8/test/intl/intl.status13
-rw-r--r--deps/v8/test/intl/locale/locale-constructor.js5
-rw-r--r--deps/v8/test/intl/number-format/check-numbering-system.js2
-rw-r--r--deps/v8/test/intl/number-format/constructor-numberingSytem-order.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/sign-display.js2
-rw-r--r--deps/v8/test/intl/regress-10437.js18
-rw-r--r--deps/v8/test/intl/regress-10438.js51
-rw-r--r--deps/v8/test/intl/regress-1074578.js45
-rw-r--r--deps/v8/test/intl/regress-364374.js60
-rw-r--r--deps/v8/test/intl/regress-966285.js2
-rw-r--r--deps/v8/test/intl/regress-9786.js2
-rw-r--r--deps/v8/test/intl/regress-9787.js2
-rw-r--r--deps/v8/test/intl/regress-9788.js2
-rw-r--r--deps/v8/test/intl/regress-9887.js2
-rw-r--r--deps/v8/test/intl/relative-time-format/resolved-options-nu-extended.js38
-rw-r--r--deps/v8/test/js-perf-test/JSTests1.json16
-rw-r--r--deps/v8/test/js-perf-test/Operators/abstract-equality.js79
-rw-r--r--deps/v8/test/js-perf-test/Operators/run.js28
-rw-r--r--deps/v8/test/message/fail/spread-call-4.js5
-rw-r--r--deps/v8/test/message/fail/spread-call-4.out5
-rw-r--r--deps/v8/test/message/message.status9
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.js8
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.out14
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.js3
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.out2
-rw-r--r--deps/v8/test/mjsunit/asm/load-elimination.js2
-rw-r--r--deps/v8/test/mjsunit/call-intrinsic-fuzzing.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js53
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-infinite.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/redundancy-elimination.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1065737.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1067544.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1068494.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1070892.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1071743.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1074736.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1082704.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1084820.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1092650.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1094132.js78
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking-2.js225
-rw-r--r--deps/v8/test/mjsunit/es6/array-copywithin.js2
-rw-r--r--deps/v8/test/mjsunit/es6/promise-all-resolve-not-callable.js30
-rw-r--r--deps/v8/test/mjsunit/es6/promise-allsettled-resolve-not-callable.js30
-rw-r--r--deps/v8/test/mjsunit/es6/promise-race-resolve-not-callable.js30
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-constructor.js16
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-revocable.js6
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-construct.js3
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-411237.js17
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-from-optional-arguments.js32
-rw-r--r--deps/v8/test/mjsunit/harmony/aggregate-error.js213
-rw-r--r--deps/v8/test/mjsunit/harmony/logical-assignment.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/optional-chaining.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-any-overflow-1.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-any-overflow-2.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-any-resolve-not-callable.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-any.js94
-rw-r--r--deps/v8/test/mjsunit/harmony/string-matchAll.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics-cleanupsome.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js93
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js)33
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref6
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js (renamed from deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js)22
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js26
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js50
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js8
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status113
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js29
-rw-r--r--deps/v8/test/mjsunit/object-tostring-builtins.js (renamed from deps/v8/test/mjsunit/class-of-builtins.js)4
-rw-r--r--deps/v8/test/mjsunit/regress-crbug-1078825.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-10508.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1069964.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1071190.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1076569.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1077804.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1078913.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1365.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-447756.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-491481.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1053939-1.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1055138-1.js64
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1055138-2.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1055138-3.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1060023.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1063796.js (renamed from deps/v8/test/mjsunit/regress/regress-347542.js)13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1065741.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1067757.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1070560.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1074737.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1077508.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-754177.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-10484-1.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-10484-2.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-10513.js25
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10309.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1054466.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1055692.js35
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1065599.js27
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1065635.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1065852.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1067621.js82
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1070078.js39
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1073553.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1074586-b.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1074586.js94
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1075953.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1079449.js37
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1081030.js25
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7049.js53
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-715216a.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-715216b.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-719175.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-766003.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-771243.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-772332.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-778917.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-831463.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834624.js29
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js26
-rw-r--r--deps/v8/test/mjsunit/serialize-deserialize-now.js17
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-globals-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js113
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-management.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-global-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js51
-rw-r--r--deps/v8/test/mjsunit/wasm/futex-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js222
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js569
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-value-interpreter.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/nullref-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js29
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-interpreter.js68
-rw-r--r--deps/v8/test/mozilla/mozilla.status5
-rw-r--r--deps/v8/test/test262/BUILD.gn1
-rw-r--r--deps/v8/test/test262/harness-ishtmldda.js5
-rw-r--r--deps/v8/test/test262/test262.status159
-rw-r--r--deps/v8/test/test262/testcfg.py13
-rw-r--r--deps/v8/test/torque/test-torque.tq2063
-rw-r--r--deps/v8/test/unittests/BUILD.gn36
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc95
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc235
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc251
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc14
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h7
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc40
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc12
-rw-r--r--deps/v8/test/unittests/heap/bitmap-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc42
-rw-r--r--deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc130
-rw-r--r--deps/v8/test/unittests/heap/cppgc/finalizer-trait-unittest.cc (renamed from deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc)4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc187
-rw-r--r--deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc143
-rw-r--r--deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc26
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc (renamed from deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc)2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc (renamed from deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc)0
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc274
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-unittest.cc115
-rw-r--r--deps/v8/test/unittests/heap/cppgc/logging-unittest.cc79
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc188
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc285
-rw-r--r--deps/v8/test/unittests/heap/cppgc/member-unittest.cc304
-rw-r--r--deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc189
-rw-r--r--deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc308
-rw-r--r--deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc658
-rw-r--r--deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc199
-rw-r--r--deps/v8/test/unittests/heap/cppgc/source-location-unittest.cc61
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stack-unittest.cc357
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stack_unittest.cc256
-rw-r--r--deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc230
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.cc17
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h25
-rw-r--r--deps/v8/test/unittests/heap/cppgc/visitor-unittest.cc232
-rw-r--r--deps/v8/test/unittests/heap/cppgc/worklist-unittest.cc346
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc48
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/list-unittest.cc (renamed from deps/v8/test/unittests/base/list-unittest.cc)12
-rw-r--r--deps/v8/test/unittests/heap/off-thread-factory-unittest.cc140
-rw-r--r--deps/v8/test/unittests/heap/safepoint-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc36
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc27
-rw-r--r--deps/v8/test/unittests/libplatform/default-job-unittest.cc233
-rw-r--r--deps/v8/test/unittests/libplatform/default-platform-unittest.cc10
-rw-r--r--deps/v8/test/unittests/objects/backing-store-unittest.cc22
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc5
-rw-r--r--deps/v8/test/unittests/test-helpers.cc10
-rw-r--r--deps/v8/test/unittests/test-helpers.h3
-rw-r--r--deps/v8/test/unittests/test-utils.cc65
-rw-r--r--deps/v8/test/unittests/test-utils.h216
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc46
-rw-r--r--deps/v8/test/unittests/unittests.status5
-rw-r--r--deps/v8/test/unittests/utils/vector-unittest.cc54
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc40
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc65
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc16
-rw-r--r--deps/v8/test/unittests/wasm/wasm-gdbserver-unittest.cc275
-rw-r--r--deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc35
-rw-r--r--deps/v8/test/wasm-api-tests/callbacks.cc4
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-tests.status6
-rw-r--r--deps/v8/test/wasm-js/testcfg.py4
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status9
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py8
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status19
-rw-r--r--deps/v8/test/webkit/webkit.status6
-rw-r--r--deps/v8/testing/gmock/BUILD.gn9
-rw-r--r--deps/v8/testing/gmock/include/gmock/gmock-generated-function-mockers.h10
-rw-r--r--deps/v8/third_party/googletest/BUILD.gn22
-rw-r--r--deps/v8/third_party/jinja2/README.chromium1
-rw-r--r--deps/v8/third_party/jsoncpp/BUILD.gn50
-rw-r--r--deps/v8/third_party/jsoncpp/LICENSE55
-rw-r--r--deps/v8/third_party/jsoncpp/README.chromium16
-rw-r--r--deps/v8/third_party/jsoncpp/generated/version.h22
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq2374
-rw-r--r--deps/v8/third_party/zlib/BUILD.gn81
-rw-r--r--deps/v8/third_party/zlib/DEPS3
-rw-r--r--deps/v8/third_party/zlib/README.chromium1
-rw-r--r--deps/v8/third_party/zlib/chromeconf.h4
-rw-r--r--deps/v8/third_party/zlib/contrib/optimizations/insert_string.h59
-rw-r--r--deps/v8/third_party/zlib/cpu_features.c7
-rw-r--r--deps/v8/third_party/zlib/cpu_features.h1
-rw-r--r--deps/v8/third_party/zlib/crc32.c6
-rw-r--r--deps/v8/third_party/zlib/crc_folding.c6
-rw-r--r--deps/v8/third_party/zlib/deflate.c4
-rw-r--r--deps/v8/third_party/zlib/fill_window_sse.c7
-rw-r--r--deps/v8/third_party/zlib/google/OWNERS1
-rw-r--r--deps/v8/third_party/zlib/google/compression_utils_portable.cc2
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.cc1
-rw-r--r--deps/v8/third_party/zlib/patches/0005-infcover-gtest.patch405
-rw-r--r--deps/v8/tools/callstats.html148
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/v8_commands.py1
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock.js30
-rw-r--r--deps/v8/tools/clusterfuzz/v8_sanity_checks.js13
-rw-r--r--deps/v8/tools/csvparser.js8
-rw-r--r--deps/v8/tools/debug_helper/BUILD.gn5
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc14
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py1
-rw-r--r--deps/v8/tools/logreader.js3
-rw-r--r--deps/v8/tools/map-processor.html126
-rw-r--r--deps/v8/tools/map-processor.js139
-rw-r--r--deps/v8/tools/testrunner/base_runner.py6
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py3
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py113
-rw-r--r--deps/v8/tools/testrunner/testproc/util.py39
-rw-r--r--deps/v8/tools/testrunner/testproc/util_unittest.py61
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py10
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py6
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json229
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json144
-rw-r--r--deps/v8/tools/v8heapconst.py423
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh2
-rw-r--r--deps/v8/tools/whitespace.txt6
1404 files changed, 73174 insertions, 39729 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 1afbd765d3..6d2cf1077a 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -61,6 +61,8 @@
!/third_party/antlr4
!/third_party/binutils
!/third_party/inspector_protocol
+!/third_party/jsoncpp
+/third_party/jsoncpp/source
!/third_party/colorama
/third_party/colorama/src
!/third_party/googletest
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 7036ecd42b..47a83c5ff1 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -43,6 +43,7 @@ Julia Computing, Inc. <*@juliacomputing.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
+Adam Kallai <kadam@inf.u-szeged.hu>
Akinori MUSHA <knu@FreeBSD.org>
Alessandro Pignotti <alessandro@leaningtech.com>
Alex Kodat <akodat@rocketsoftware.com>
@@ -112,6 +113,7 @@ James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Jiawen Geng <technicalcute@gmail.com>
+Jiaxun Yang <jiaxun.yang@flygoat.com>
Joel Stanley <joel@jms.id.au>
Johan Bergstrƶm <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
@@ -211,3 +213,4 @@ Zhao Jiazhong <kyslie3100@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
ęŸ³č£äø€ <admin@web-tinker.com>
Yanbo Li <lybvinci@gmail.com>
+Gilang Mentari Hamidy <gilang@hamidy.net> \ No newline at end of file
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index b2dde3f9d7..167e63503c 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -218,6 +218,17 @@ declare_args() {
# Enable control-flow integrity features, such as pointer authentication for
# ARM64.
v8_control_flow_integrity = false
+
+ # Enable object names in cppgc for debug purposes.
+ cppgc_enable_object_names = false
+
+ # Enable V8 heap sandbox experimental feature.
+ # Sets -DV8_HEAP_SANDBOX.
+ v8_enable_heap_sandbox = ""
+
+ # Experimental support for native context independent code.
+ # https://crbug.com/v8/8888
+ v8_enable_nci_code = false
}
# Derived defaults.
@@ -254,7 +265,9 @@ if (v8_enable_pointer_compression == "") {
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
}
-
+if (v8_enable_heap_sandbox == "") {
+ v8_enable_heap_sandbox = false
+}
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
@@ -284,6 +297,9 @@ assert(
!v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
"Pointer compression is not supported with shared read-only heap enabled")
+assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
+ "V8 Heap Sandbox requires pointer compression")
+
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -294,8 +310,11 @@ v8_toolset_for_shell = "host"
config("internal_config_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
+ configs = [ ":v8_tracing_config" ]
+
include_dirs = [
".",
+ "include",
"$target_gen_dir",
]
}
@@ -308,7 +327,6 @@ config("internal_config") {
"//build/config/compiler:wexit_time_destructors",
":internal_config_base",
":v8_header_features",
- ":v8_tracing_config",
]
if (is_component_build) {
@@ -346,6 +364,14 @@ config("libbase_config") {
}
}
+# This config should be applied to code using the cppgc_base.
+config("cppgc_base_config") {
+ defines = []
+ if (cppgc_enable_object_names) {
+ defines += [ "CPPGC_SUPPORTS_OBJECT_NAMES" ]
+ }
+}
+
# This config should be applied to code using the libsampler.
config("libsampler_config") {
include_dirs = [ "include" ]
@@ -389,6 +415,9 @@ config("v8_header_features") {
if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
}
+ if (v8_enable_heap_sandbox) {
+ defines += [ "V8_HEAP_SANDBOX" ]
+ }
if (v8_deprecation_warnings) {
defines += [ "V8_DEPRECATION_WARNINGS" ]
}
@@ -428,10 +457,6 @@ config("features") {
}
if (v8_enable_lite_mode) {
defines += [ "V8_LITE_MODE" ]
-
- # TODO(v8:7777): Remove the define once the --jitless runtime flag does
- # everything we need.
- defines += [ "V8_JITLESS_MODE" ]
}
if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
@@ -501,10 +526,6 @@ config("features") {
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
-
- # TODO(v8:8519): Remove the define once all use-sites in
- # the code are removed/fixed
- defines += [ "V8_EMBEDDED_BUILTINS" ]
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
}
@@ -532,6 +553,9 @@ config("features") {
if (v8_enable_wasm_gdb_remote_debugging) {
defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ]
}
+ if (v8_enable_nci_code) {
+ defines += [ "V8_ENABLE_NCI_CODE" ]
+ }
}
config("toolchain") {
@@ -630,7 +654,9 @@ config("toolchain") {
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
-
+ if (mips_use_msa) {
+ defines += [ "_MIPS_MSA" ]
+ }
if (host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
} else if (host_byteorder == "big") {
@@ -646,9 +672,6 @@ config("toolchain") {
}
if (mips_arch_variant == "r6") {
defines += [ "_MIPS_ARCH_MIPS64R6" ]
- if (mips_use_msa) {
- defines += [ "_MIPS_MSA" ]
- }
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
@@ -1012,9 +1035,12 @@ torque_files = [
"src/builtins/convert.tq",
"src/builtins/console.tq",
"src/builtins/data-view.tq",
+ "src/builtins/finalization-registry.tq",
"src/builtins/frames.tq",
"src/builtins/frame-arguments.tq",
"src/builtins/growable-fixed-array.tq",
+ "src/builtins/ic-callable.tq",
+ "src/builtins/ic.tq",
"src/builtins/internal-coverage.tq",
"src/builtins/iterator.tq",
"src/builtins/math.tq",
@@ -1024,6 +1050,7 @@ torque_files = [
"src/builtins/promise-abstract-operations.tq",
"src/builtins/promise-all.tq",
"src/builtins/promise-all-element-closure.tq",
+ "src/builtins/promise-any.tq",
"src/builtins/promise-constructor.tq",
"src/builtins/promise-finally.tq",
"src/builtins/promise-misc.tq",
@@ -1082,6 +1109,7 @@ torque_files = [
"src/builtins/typed-array-sort.tq",
"src/builtins/typed-array-subarray.tq",
"src/builtins/typed-array.tq",
+ "src/builtins/wasm.tq",
"src/ic/handler-configuration.tq",
"src/objects/allocation-site.tq",
"src/objects/api-callbacks.tq",
@@ -1101,6 +1129,7 @@ torque_files = [
"src/objects/heap-number.tq",
"src/objects/heap-object.tq",
"src/objects/intl-objects.tq",
+ "src/objects/js-aggregate-error.tq",
"src/objects/js-array-buffer.tq",
"src/objects/js-array.tq",
"src/objects/js-collection-iterator.tq",
@@ -1184,7 +1213,7 @@ template("run_torque") {
"class-verifiers-tq.h",
"enum-verifiers-tq.cc",
"objects-printer-tq.cc",
- "objects-body-descriptors-tq-inl.h",
+ "objects-body-descriptors-tq-inl.inc",
"class-definitions-tq.cc",
"class-definitions-tq-inl.h",
"class-definitions-tq.h",
@@ -1196,6 +1225,8 @@ template("run_torque") {
"instance-types-tq.h",
"internal-class-definitions-tq.h",
"internal-class-definitions-tq-inl.h",
+ "exported-class-definitions-tq.h",
+ "exported-class-definitions-tq-inl.h",
]
outputs = []
@@ -1270,6 +1301,7 @@ v8_source_set("torque_generated_initializers") {
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_tracing",
]
public_deps = [ ":v8_maybe_icu" ]
@@ -1298,6 +1330,7 @@ v8_source_set("torque_generated_definitions") {
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_tracing",
]
public_deps = [ ":v8_maybe_icu" ]
@@ -1573,7 +1606,10 @@ v8_source_set("v8_initializers") {
"test/cctest:*",
]
- deps = [ ":torque_generated_initializers" ]
+ deps = [
+ ":torque_generated_initializers",
+ ":v8_tracing",
+ ]
sources = [
### gcmole(all) ###
@@ -1624,6 +1660,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-typed-array-gen.h",
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
+ "src/builtins/builtins-wasm-gen.h",
"src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h",
"src/builtins/setup-builtins-internal.cc",
@@ -1701,7 +1738,10 @@ v8_source_set("v8_initializers") {
v8_source_set("v8_init") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- deps = [ ":v8_initializers" ]
+ deps = [
+ ":v8_initializers",
+ ":v8_tracing",
+ ]
sources = [
### gcmole(all) ###
@@ -2005,6 +2045,7 @@ v8_source_set("v8_compiler_opt") {
":generate_bytecode_builtins_list",
":run_torque",
":v8_maybe_icu",
+ ":v8_tracing",
]
if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
@@ -2029,6 +2070,7 @@ v8_source_set("v8_compiler") {
":generate_bytecode_builtins_list",
":run_torque",
":v8_maybe_icu",
+ ":v8_tracing",
]
configs = [ ":internal_config" ]
@@ -2042,6 +2084,18 @@ group("v8_compiler_for_mksnapshot") {
}
}
+# Any target using trace events must directly or indirectly depend on
+# v8_tracing.
+group("v8_tracing") {
+ if (v8_use_perfetto) {
+ if (build_with_chromium) {
+ public_deps = [ "//third_party/perfetto:libperfetto" ]
+ } else {
+ public_deps = [ ":v8_libperfetto" ]
+ }
+ }
+}
+
v8_source_set("v8_base_without_compiler") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -2053,6 +2107,7 @@ v8_source_set("v8_base_without_compiler") {
### gcmole(all) ###
"$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
+ "include/cppgc/common.h",
"include/v8-fast-api-calls.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
@@ -2121,7 +2176,6 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins-json.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
- "src/builtins/builtins-promise.cc",
"src/builtins/builtins-promise.h",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
@@ -2160,6 +2214,8 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/constant-pool.h",
"src/codegen/constants-arch.h",
"src/codegen/cpu-features.h",
+ "src/codegen/external-reference-encoder.cc",
+ "src/codegen/external-reference-encoder.h",
"src/codegen/external-reference-table.cc",
"src/codegen/external-reference-table.h",
"src/codegen/external-reference.cc",
@@ -2207,6 +2263,8 @@ v8_source_set("v8_base_without_compiler") {
"src/common/assert-scope.cc",
"src/common/assert-scope.h",
"src/common/checks.h",
+ "src/common/external-pointer-inl.h",
+ "src/common/external-pointer.h",
"src/common/message-template.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
@@ -2284,6 +2342,7 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/messages.h",
"src/execution/microtask-queue.cc",
"src/execution/microtask-queue.h",
+ "src/execution/off-thread-isolate-inl.h",
"src/execution/off-thread-isolate.cc",
"src/execution/off-thread-isolate.h",
"src/execution/pointer-authentication.h",
@@ -2330,6 +2389,8 @@ v8_source_set("v8_base_without_compiler") {
"src/handles/local-handles.h",
"src/handles/maybe-handles-inl.h",
"src/handles/maybe-handles.h",
+ "src/handles/persistent-handles.cc",
+ "src/handles/persistent-handles.h",
"src/heap/array-buffer-collector.cc",
"src/heap/array-buffer-collector.h",
"src/heap/array-buffer-sweeper.cc",
@@ -2344,6 +2405,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/code-stats.h",
"src/heap/combined-heap.cc",
"src/heap/combined-heap.h",
+ "src/heap/concurrent-allocator-inl.h",
+ "src/heap/concurrent-allocator.cc",
+ "src/heap/concurrent-allocator.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
@@ -2376,6 +2440,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.cc",
"src/heap/item-parallel-job.h",
+ "src/heap/large-spaces.cc",
+ "src/heap/large-spaces.h",
+ "src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
"src/heap/local-heap.cc",
@@ -2389,6 +2456,9 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/marking-worklist.h",
"src/heap/marking.cc",
"src/heap/marking.h",
+ "src/heap/memory-chunk-inl.h",
+ "src/heap/memory-chunk.cc",
+ "src/heap/memory-chunk.h",
"src/heap/memory-measurement-inl.h",
"src/heap/memory-measurement.cc",
"src/heap/memory-measurement.h",
@@ -2401,9 +2471,13 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/objects-visiting.h",
"src/heap/off-thread-factory.cc",
"src/heap/off-thread-factory.h",
+ "src/heap/off-thread-heap.cc",
+ "src/heap/off-thread-heap.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
+ "src/heap/read-only-spaces.cc",
+ "src/heap/read-only-spaces.h",
"src/heap/remembered-set.h",
"src/heap/safepoint.cc",
"src/heap/safepoint.h",
@@ -2598,6 +2672,8 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/internal-index.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
+ "src/objects/js-aggregate-error-inl.h",
+ "src/objects/js-aggregate-error.h",
"src/objects/js-array-buffer-inl.h",
"src/objects/js-array-buffer.cc",
"src/objects/js-array-buffer.h",
@@ -2888,6 +2964,7 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-utils.h",
"src/runtime/runtime-wasm.cc",
+ "src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
"src/sanitizer/asan.h",
@@ -2897,6 +2974,10 @@ v8_source_set("v8_base_without_compiler") {
"src/sanitizer/tsan.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
+ "src/snapshot/context-deserializer.cc",
+ "src/snapshot/context-deserializer.h",
+ "src/snapshot/context-serializer.cc",
+ "src/snapshot/context-serializer.h",
"src/snapshot/deserializer-allocator.cc",
"src/snapshot/deserializer-allocator.h",
"src/snapshot/deserializer.cc",
@@ -2905,10 +2986,6 @@ v8_source_set("v8_base_without_compiler") {
"src/snapshot/embedded/embedded-data.h",
"src/snapshot/object-deserializer.cc",
"src/snapshot/object-deserializer.h",
- "src/snapshot/partial-deserializer.cc",
- "src/snapshot/partial-deserializer.h",
- "src/snapshot/partial-serializer.cc",
- "src/snapshot/partial-serializer.h",
"src/snapshot/read-only-deserializer.cc",
"src/snapshot/read-only-deserializer.h",
"src/snapshot/read-only-serializer.cc",
@@ -2918,15 +2995,19 @@ v8_source_set("v8_base_without_compiler") {
"src/snapshot/roots-serializer.h",
"src/snapshot/serializer-allocator.cc",
"src/snapshot/serializer-allocator.h",
- "src/snapshot/serializer-common.cc",
- "src/snapshot/serializer-common.h",
+ "src/snapshot/serializer-deserializer.cc",
+ "src/snapshot/serializer-deserializer.h",
"src/snapshot/serializer.cc",
"src/snapshot/serializer.h",
- "src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-compression.cc",
"src/snapshot/snapshot-compression.h",
+ "src/snapshot/snapshot-data.cc",
+ "src/snapshot/snapshot-data.h",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
+ "src/snapshot/snapshot-utils.cc",
+ "src/snapshot/snapshot-utils.h",
+ "src/snapshot/snapshot.cc",
"src/snapshot/snapshot.h",
"src/snapshot/startup-deserializer.cc",
"src/snapshot/startup-deserializer.h",
@@ -3026,6 +3107,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/signature-map.h",
"src/wasm/streaming-decoder.cc",
"src/wasm/streaming-decoder.h",
+ "src/wasm/struct-types.h",
"src/wasm/value-type.h",
"src/wasm/wasm-arguments.h",
"src/wasm/wasm-code-manager.cc",
@@ -3085,21 +3167,28 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_third_party_heap) {
sources += v8_third_party_heap_files
+ } else {
+ sources += [ "src/heap/third-party/heap-api-stub.cc" ]
}
if (v8_enable_wasm_gdb_remote_debugging) {
sources += [
+ "src/debug/wasm/gdb-server/gdb-remote-util.cc",
+ "src/debug/wasm/gdb-server/gdb-remote-util.h",
"src/debug/wasm/gdb-server/gdb-server-thread.cc",
"src/debug/wasm/gdb-server/gdb-server-thread.h",
"src/debug/wasm/gdb-server/gdb-server.cc",
"src/debug/wasm/gdb-server/gdb-server.h",
+ "src/debug/wasm/gdb-server/packet.cc",
+ "src/debug/wasm/gdb-server/packet.h",
"src/debug/wasm/gdb-server/session.cc",
"src/debug/wasm/gdb-server/session.h",
"src/debug/wasm/gdb-server/target.cc",
"src/debug/wasm/gdb-server/target.h",
"src/debug/wasm/gdb-server/transport.cc",
"src/debug/wasm/gdb-server/transport.h",
- "src/debug/wasm/gdb-server/util.h",
+ "src/debug/wasm/gdb-server/wasm-module-debug.cc",
+ "src/debug/wasm/gdb-server/wasm-module-debug.h",
]
}
@@ -3420,6 +3509,7 @@ v8_source_set("v8_base_without_compiler") {
":v8_libbase",
":v8_libsampler",
":v8_shared_internal_headers",
+ ":v8_tracing",
":v8_version",
"src/inspector:inspector",
]
@@ -3511,6 +3601,14 @@ v8_source_set("v8_base_without_compiler") {
]
deps += [ "src/third_party/vtune:v8_vtune_trace_mark" ]
}
+
+ if (v8_use_perfetto) {
+ sources -= [ "//base/trace_event/common/trace_event_common.h" ]
+ sources += [
+ "src/tracing/trace-categories.cc",
+ "src/tracing/trace-categories.h",
+ ]
+ }
}
group("v8_base") {
@@ -3672,7 +3770,6 @@ v8_component("v8_libbase") {
"src/base/ieee754.h",
"src/base/iterator.h",
"src/base/lazy-instance.h",
- "src/base/list.h",
"src/base/logging.cc",
"src/base/logging.h",
"src/base/lsan.h",
@@ -3843,6 +3940,8 @@ v8_component("v8_libplatform") {
"include/libplatform/v8-tracing.h",
"src/libplatform/default-foreground-task-runner.cc",
"src/libplatform/default-foreground-task-runner.h",
+ "src/libplatform/default-job.cc",
+ "src/libplatform/default-job.h",
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/default-worker-threads-task-runner.cc",
@@ -3873,20 +3972,25 @@ v8_component("v8_libplatform") {
deps = [
":v8_headers",
":v8_libbase",
+ ":v8_tracing",
]
+
if (v8_use_perfetto) {
+ sources -= [
+ "//base/trace_event/common/trace_event_common.h",
+ "src/libplatform/tracing/trace-buffer.cc",
+ "src/libplatform/tracing/trace-buffer.h",
+ "src/libplatform/tracing/trace-object.cc",
+ "src/libplatform/tracing/trace-writer.cc",
+ "src/libplatform/tracing/trace-writer.h",
+ ]
sources += [
- "src/libplatform/tracing/json-trace-event-listener.cc",
- "src/libplatform/tracing/json-trace-event-listener.h",
"src/libplatform/tracing/trace-event-listener.cc",
"src/libplatform/tracing/trace-event-listener.h",
]
deps += [
+ # TODO(skyostil): Switch TraceEventListener to protozero.
"//third_party/perfetto/protos/perfetto/trace:lite",
- "//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite",
- "//third_party/perfetto/protos/perfetto/trace/chrome:zero",
- "//third_party/perfetto/src/tracing:client_api",
- "//third_party/perfetto/src/tracing:platform_posix",
]
}
}
@@ -3914,9 +4018,8 @@ v8_source_set("fuzzer_support") {
configs = [ ":internal_config_base" ]
- deps = [ ":v8" ]
-
public_deps = [
+ ":v8",
":v8_libbase",
":v8_libplatform",
":v8_maybe_icu",
@@ -3928,14 +4031,33 @@ v8_source_set("cppgc_base") {
sources = [
"include/cppgc/allocation.h",
- "include/cppgc/api-constants.h",
- "include/cppgc/finalizer-trait.h",
+ "include/cppgc/common.h",
+ "include/cppgc/custom-space.h",
"include/cppgc/garbage-collected.h",
- "include/cppgc/gc-info.h",
"include/cppgc/heap.h",
+ "include/cppgc/internal/accessors.h",
+ "include/cppgc/internal/api-contants.h",
+ "include/cppgc/internal/compiler-specific.h",
+ "include/cppgc/internal/finalizer-traits.h",
+ "include/cppgc/internal/gc-info.h",
+ "include/cppgc/internal/persistent-node.h",
+ "include/cppgc/internal/pointer-policies.h",
+ "include/cppgc/internal/prefinalizer-handler.h",
+ "include/cppgc/liveness-broker.h",
+ "include/cppgc/liveness-broker.h",
+ "include/cppgc/macros.h",
+ "include/cppgc/member.h",
+ "include/cppgc/persistent.h",
"include/cppgc/platform.h",
+ "include/cppgc/prefinalizer.h",
+ "include/cppgc/source-location.h",
+ "include/cppgc/trace-trait.h",
+ "include/cppgc/type-traits.h",
+ "include/cppgc/visitor.h",
"include/v8config.h",
"src/heap/cppgc/allocation.cc",
+ "src/heap/cppgc/free-list.cc",
+ "src/heap/cppgc/free-list.h",
"src/heap/cppgc/gc-info-table.cc",
"src/heap/cppgc/gc-info-table.h",
"src/heap/cppgc/gc-info.cc",
@@ -3943,23 +4065,75 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/heap-object-header-inl.h",
"src/heap/cppgc/heap-object-header.cc",
"src/heap/cppgc/heap-object-header.h",
+ "src/heap/cppgc/heap-page.cc",
+ "src/heap/cppgc/heap-page.h",
+ "src/heap/cppgc/heap-space.cc",
+ "src/heap/cppgc/heap-space.h",
+ "src/heap/cppgc/heap-visitor.h",
"src/heap/cppgc/heap.cc",
"src/heap/cppgc/heap.h",
+ "src/heap/cppgc/liveness-broker.cc",
+ "src/heap/cppgc/logging.cc",
+ "src/heap/cppgc/marker.cc",
+ "src/heap/cppgc/marker.h",
+ "src/heap/cppgc/marking-visitor.cc",
+ "src/heap/cppgc/marking-visitor.h",
+ "src/heap/cppgc/object-allocator-inl.h",
+ "src/heap/cppgc/object-allocator.cc",
+ "src/heap/cppgc/object-allocator.h",
+ "src/heap/cppgc/object-start-bitmap-inl.h",
+ "src/heap/cppgc/object-start-bitmap.h",
+ "src/heap/cppgc/page-memory-inl.h",
+ "src/heap/cppgc/page-memory.cc",
+ "src/heap/cppgc/page-memory.h",
+ "src/heap/cppgc/persistent-node.cc",
"src/heap/cppgc/platform.cc",
+ "src/heap/cppgc/pointer-policies.cc",
+ "src/heap/cppgc/prefinalizer-handler.cc",
+ "src/heap/cppgc/prefinalizer-handler.h",
+ "src/heap/cppgc/raw-heap.cc",
+ "src/heap/cppgc/raw-heap.h",
"src/heap/cppgc/sanitizers.h",
+ "src/heap/cppgc/source-location.cc",
"src/heap/cppgc/stack.cc",
"src/heap/cppgc/stack.h",
+ "src/heap/cppgc/sweeper.cc",
+ "src/heap/cppgc/sweeper.h",
+ "src/heap/cppgc/worklist.h",
]
- if (target_cpu == "x64") {
- if (is_win) {
- sources += [ "src/heap/cppgc/asm/x64/push_registers_win.S" ]
- } else {
- sources += [ "src/heap/cppgc/asm/x64/push_registers.S" ]
+ if (is_clang || !is_win) {
+ if (target_cpu == "x64") {
+ sources += [ "src/heap/cppgc/asm/x64/push_registers_asm.cc" ]
+ } else if (target_cpu == "x86") {
+ sources += [ "src/heap/cppgc/asm/ia32/push_registers_asm.cc" ]
+ } else if (target_cpu == "arm") {
+ sources += [ "src/heap/cppgc/asm/arm/push_registers_asm.cc" ]
+ } else if (target_cpu == "arm64") {
+ sources += [ "src/heap/cppgc/asm/arm64/push_registers_asm.cc" ]
+ } else if (target_cpu == "ppc64") {
+ sources += [ "src/heap/cppgc/asm/ppc/push_registers_asm.cc" ]
+ } else if (target_cpu == "s390x") {
+ sources += [ "src/heap/cppgc/asm/s390/push_registers_asm.cc" ]
+ } else if (target_cpu == "mipsel") {
+ sources += [ "src/heap/cppgc/asm/mips/push_registers_asm.cc" ]
+ } else if (target_cpu == "mips64el") {
+ sources += [ "src/heap/cppgc/asm/mips64/push_registers_asm.cc" ]
+ }
+ } else if (is_win) {
+ if (target_cpu == "x64") {
+ sources += [ "src/heap/cppgc/asm/x64/push_registers_masm.S" ]
+ } else if (target_cpu == "x86") {
+ sources += [ "src/heap/cppgc/asm/ia32/push_registers_masm.S" ]
+ } else if (target_cpu == "arm64") {
+ sources += [ "src/heap/cppgc/asm/arm64/push_registers_masm.S" ]
}
}
- configs = [ ":internal_config" ]
+ configs = [
+ ":internal_config",
+ ":cppgc_base_config",
+ ]
public_deps = [ ":v8_libbase" ]
}
@@ -4010,12 +4184,6 @@ v8_static_library("wee8") {
]
}
-v8_static_library("cppgc") {
- deps = [ ":cppgc_base" ]
-
- configs = [ ":internal_config" ]
-}
-
###############################################################################
# Executables
#
@@ -4074,6 +4242,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
":v8_libbase",
":v8_libplatform",
":v8_maybe_icu",
+ ":v8_tracing",
"//build/win:default_exe_manifest",
]
}
@@ -4308,6 +4477,14 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
+ v8_component("cppgc") {
+ public_deps = [ ":cppgc_base" ]
+
+ configs = [ ":internal_config" ]
+
+ public_configs = [ ":external_config" ]
+ }
+
v8_component("cppgc_for_testing") {
testonly = true
@@ -4340,6 +4517,12 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
+ group("cppgc") {
+ public_deps = [ ":cppgc_base" ]
+
+ public_configs = [ ":external_config" ]
+ }
+
group("cppgc_for_testing") {
testonly = true
@@ -4374,6 +4557,7 @@ v8_executable("d8") {
":v8",
":v8_libbase",
":v8_libplatform",
+ ":v8_tracing",
"//build/win:default_exe_manifest",
]
@@ -4392,10 +4576,6 @@ v8_executable("d8") {
if (v8_enable_vtunejit) {
deps += [ "src/third_party/vtune:v8_vtune" ]
}
-
- if (v8_use_perfetto) {
- deps += [ "//third_party/perfetto/src/tracing:in_process_backend" ]
- }
}
v8_executable("v8_hello_world") {
@@ -4551,6 +4731,7 @@ v8_source_set("wasm_module_runner") {
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_tracing",
]
public_deps = [ ":v8_maybe_icu" ]
@@ -4627,6 +4808,7 @@ v8_source_set("lib_wasm_fuzzer_common") {
deps = [
":generate_bytecode_builtins_list",
":run_torque",
+ ":v8_tracing",
]
public_deps = [ ":v8_maybe_icu" ]
@@ -4709,7 +4891,7 @@ if (!build_with_chromium && v8_use_perfetto) {
"-Wno-tautological-constant-compare",
]
}
- if (is_win) {
+ if (is_win && is_clang) {
cflags += [ "-Wno-microsoft-unqualified-friend" ]
}
}
@@ -4868,4 +5050,21 @@ if (!build_with_chromium && v8_use_perfetto) {
configs += [ "//build/config/compiler:no_chromium_code" ]
}
} # host_toolchain
+
+ v8_component("v8_libperfetto") {
+ configs = [ ":v8_tracing_config" ]
+ public_configs = [ "//third_party/perfetto/gn:public_config" ]
+ deps = [
+ "//third_party/perfetto/src/trace_processor:export_json",
+ "//third_party/perfetto/src/trace_processor:storage_minimal",
+ "//third_party/perfetto/src/tracing:client_api",
+ "//third_party/perfetto/src/tracing/core",
+
+ # TODO(skyostil): Support non-POSIX platforms.
+ "//third_party/perfetto/protos/perfetto/config:cpp",
+ "//third_party/perfetto/protos/perfetto/trace/track_event:zero",
+ "//third_party/perfetto/src/tracing:in_process_backend",
+ "//third_party/perfetto/src/tracing:platform_posix",
+ ]
+ }
} # if (!build_with_chromium && v8_use_perfetto)
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index 1eee48173a..1319a57917 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -2,6 +2,7 @@ adamk@chromium.org
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
+bikineev@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
clemensb@chromium.org
@@ -25,6 +26,7 @@ mslekova@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
+omerkatz@chromium.org
petermarshall@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 1bc687beaf..7b38c3dcd0 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -34,7 +34,7 @@ vars = {
'gn_version': 'git_revision:5ed3c9cc67b090d5e311e4bd2aba072173e82db9',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:de73cf6c4bde86f0a9c8d54151b69b0154a398f1',
+ 'luci_go': 'git_revision:56ae79476e3caf14da59d75118408aa778637936',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -55,7 +55,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platform-tools_version
# and whatever else without interference from each other.
- 'android_sdk_platform-tools_version': 'Jxtur3_L9RzY4q79K-AwIahwFW4oi5uYVD5URx9h62wC',
+ 'android_sdk_platform-tools_version': 'zMVtBEihXp2Z0NYFNjLLmNrwy6252b_YWG6sh2l0QAcC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
@@ -67,20 +67,20 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_tools-lint_version': '89hXqZYzCum3delB5RV7J_QyWkaRodqdtQS0s3LMh3wC',
+ 'android_sdk_cmdline-tools_version': 'CR25ixsRhwuRnhdgDpGFyl9S0C_0HO9SUgFrwX46zq8C',
}
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '26e9d485d01d6e0eb9dadd21df767a63494c8fea',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '1b904cc30093c25d5fd48389bd58e3f7409bcf80',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '2b2aec6506a810f8d7bd018609de2c2450b3c121',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '454f4ba4b3a69feb03c73f93d789062033433b4c',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'd7aff76cf6bb0fbef3afa6c07718f78a80a70f8f',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f2223961702f00a8833874b0560d615a2cc42738',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'bb3f1802c237dd19105dd0f7919f99e536a39d10',
'v8/buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '7977eb176752aeec29d888cfe8e677ac12ed1c41',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '204a35a2a64f7179f8b76d7a0385653690839e21',
'v8/buildtools/clang_format/script':
Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
'v8/buildtools/linux64': {
@@ -108,7 +108,7 @@ deps = {
'v8/buildtools/third_party/libc++abi/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '196ba1aaa8ac285d94f4ea8d9836390a45360533',
'v8/buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '43bb9f872232f531bac80093ceb4de61c64b9ab7',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd999d54f4bca789543a2eb6c995af2d9b5a1f3ed',
'v8/buildtools/win': {
'packages': [
{
@@ -126,7 +126,7 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '2244b5ea295f8fda3179bef160c84ef8fa0ec9fc',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '716366f5685ad8aaf1208c64941e440e8e117441',
'condition': 'checkout_android',
},
'v8/third_party/android_sdk/public': {
@@ -160,15 +160,15 @@ deps = {
'version': Var('android_sdk_sources_version'),
},
{
- 'package': 'chromium/third_party/android_sdk/public/tools-lint',
- 'version': Var('android_sdk_tools-lint_version'),
+ 'package': 'chromium/third_party/android_sdk/public/cmdline-tools',
+ 'version': Var('android_sdk_cmdline-tools_version'),
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '032c78376792ef343ea361bca2181ba6dec6b95f',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e9a8d378c950ee44beec5dd5207e151f48e5b5be',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -176,23 +176,23 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '2457e41d8dc379f74662d3157e76339ba92cee06',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '277fe9120cce5f7a42d43554646fa447f88a1598',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '10b1902d893ea8cc43c69541d70868f91af3646b',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'a09ea700d32bab83325aff9ff34d0582e50e3997',
'v8/third_party/jinja2':
- Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
+ Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '3f90fa05c85718505e28c9c3426c1ba52843b9b7',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'cc958279ffd6853e0a1b227a7e957ca334fe56af',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '160b445a44e0daacf6f3f8570ca2707ec451f374',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f6b2ccdd091ff82da54150796297c3a96d7edb41',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd2f7d4285c4a5267f5be37a9c823a397daadad1b',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
'v8/third_party/qemu-linux-x64': {
@@ -219,7 +219,7 @@ deps = {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': '7YlCgase5GlIanqHn-nZClSlZ5kQETJyVUYRF7Jjy6UC'
+ 'version': '5LzaFiFYMxwWXcgus5JjF74yr90M5oz9IMo29pTdoLgC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -236,7 +236,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '105a8460911176861a422738eee4daad8dfe88a2',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'de3e20662b84f0ee361a5ae11c99a9513df7c8e8',
'v8/tools/luci-go': {
'packages': [
{
@@ -266,11 +266,13 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + 'b9b24d1b0b80aafec393af085067e9eae829412f',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + 'ff70e0d273ed10995866c803f23e11250eb3dc52',
'v8/third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
'v8/third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '156be8c52f80cde343088b4a69a80579101b6e67',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '90fc47e6eed7bd1a59ad1603761303ef24705593',
+ 'v8/third_party/jsoncpp/source':
+ Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '645250b6690785be60ab6780ce4b58698d884d11',
'v8/third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
diff --git a/deps/v8/INTL_OWNERS b/deps/v8/INTL_OWNERS
index dbe6f3b7b5..6e9f2cedb9 100644
--- a/deps/v8/INTL_OWNERS
+++ b/deps/v8/INTL_OWNERS
@@ -1,3 +1,4 @@
cira@chromium.org
mnita@google.com
jshin@chromium.org
+ftang@chromium.org
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index 47470f49e4..54d6bbec1c 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -33,10 +33,6 @@
{
'WATCHLIST_DEFINITIONS': {
- 'api': {
- 'filepath': 'include/' \
- '|src/api\.(cc|h)$',
- },
'snapshot': {
'filepath': 'src/snapshot/',
},
@@ -94,9 +90,6 @@
},
'WATCHLISTS': {
- 'api': [
- 'yangguo+watch@chromium.org',
- ],
'csa': [
'jgruber+watch@chromium.org',
],
diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni
index 5b99eb9402..dde92c46ea 100644
--- a/deps/v8/build_overrides/build.gni
+++ b/deps/v8/build_overrides/build.gni
@@ -16,6 +16,14 @@ perfetto_build_with_embedder = true
perfetto_protobuf_target_prefix = "//"
perfetto_protobuf_gni = "//gni/proto_library.gni"
+# We use Perfetto's Trace Processor to convert traces to the legacy JSON
+# format.
+enable_perfetto_trace_processor = true
+
+# When building with chromium, determines whether we want to also use the
+# perfetto library from chromium instead declaring our own.
+use_perfetto_client_library = false
+
# Uncomment these to specify a different NDK location and version in
# non-Chromium builds.
# default_android_ndk_root = "//third_party/android_ndk"
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 0b2806ca94..9d286ebbfc 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -54,8 +54,7 @@ declare_args() {
# Expose symbols for dynamic linking.
v8_expose_symbols = false
- # Use Perfetto (https://perfetto.dev) as the default TracingController. Not
- # currently implemented.
+ # Implement tracing using Perfetto (https://perfetto.dev).
v8_use_perfetto = false
# Override global symbol level setting for v8
@@ -82,6 +81,12 @@ if (v8_enable_backtrace == "") {
v8_enable_backtrace = is_debug && !v8_optimized_debug
}
+# If chromium is configured to use the perfetto client library, v8 should also
+# use perfetto for tracing.
+if (build_with_chromium && use_perfetto_client_library) {
+ v8_use_perfetto = true
+}
+
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
diff --git a/deps/v8/include/DEPS b/deps/v8/include/DEPS
index ca60f841f5..7305ff5112 100644
--- a/deps/v8/include/DEPS
+++ b/deps/v8/include/DEPS
@@ -1,4 +1,5 @@
include_rules = [
# v8-inspector-protocol.h depends on generated files under include/inspector.
"+inspector",
+ "+cppgc/common.h",
]
diff --git a/deps/v8/include/cppgc/DEPS b/deps/v8/include/cppgc/DEPS
new file mode 100644
index 0000000000..04c343de27
--- /dev/null
+++ b/deps/v8/include/cppgc/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+ "-include",
+ "+v8config.h",
+ "+v8-platform.h",
+ "+cppgc",
+ "-src",
+]
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
index 3e717ad7d4..49ad49c34d 100644
--- a/deps/v8/include/cppgc/allocation.h
+++ b/deps/v8/include/cppgc/allocation.h
@@ -6,12 +6,14 @@
#define INCLUDE_CPPGC_ALLOCATION_H_
#include <stdint.h>
+
#include <atomic>
-#include "include/cppgc/garbage-collected.h"
-#include "include/cppgc/gc-info.h"
-#include "include/cppgc/heap.h"
-#include "include/cppgc/internals.h"
+#include "cppgc/custom-space.h"
+#include "cppgc/garbage-collected.h"
+#include "cppgc/heap.h"
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/internal/gc-info.h"
namespace cppgc {
@@ -35,36 +37,80 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal {
}
static void* Allocate(cppgc::Heap* heap, size_t size, GCInfoIndex index);
+ static void* Allocate(cppgc::Heap* heapx, size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_inde);
friend class HeapObjectHeader;
};
} // namespace internal
-// Users with custom allocation needs (e.g. overriding size) should override
-// MakeGarbageCollectedTrait (see below) and inherit their trait from
-// MakeGarbageCollectedTraitBase to get access to low-level primitives.
+/**
+ * Base trait that provides utilities for advancers users that have custom
+ * allocation needs (e.g., overriding size). It's expected that users override
+ * MakeGarbageCollectedTrait (see below) and inherit from
+ * MakeGarbageCollectedTraitBase and make use of the low-level primitives
+ * offered to allocate and construct an object.
+ */
template <typename T>
class MakeGarbageCollectedTraitBase
: private internal::MakeGarbageCollectedTraitInternal {
+ private:
+ template <typename U, typename CustomSpace>
+ struct SpacePolicy {
+ static void* Allocate(Heap* heap, size_t size) {
+ // Custom space.
+ static_assert(std::is_base_of<CustomSpaceBase, CustomSpace>::value,
+ "Custom space must inherit from CustomSpaceBase.");
+ return internal::MakeGarbageCollectedTraitInternal::Allocate(
+ heap, size, internal::GCInfoTrait<T>::Index(),
+ CustomSpace::kSpaceIndex);
+ }
+ };
+
+ template <typename U>
+ struct SpacePolicy<U, void> {
+ static void* Allocate(Heap* heap, size_t size) {
+ // Default space.
+ return internal::MakeGarbageCollectedTraitInternal::Allocate(
+ heap, size, internal::GCInfoTrait<T>::Index());
+ }
+ };
+
protected:
- // Allocates an object of |size| bytes on |heap|.
- //
- // TODO(mlippautz): Allow specifying arena for specific embedder uses.
+ /**
+ * Allocates memory for an object of type T.
+ *
+ * \param heap The heap to allocate this object on.
+ * \param size The size that should be reserved for the object.
+ * \returns the memory to construct an object of type T on.
+ */
static void* Allocate(Heap* heap, size_t size) {
- return internal::MakeGarbageCollectedTraitInternal::Allocate(
- heap, size, internal::GCInfoTrait<T>::Index());
+ return SpacePolicy<T, typename SpaceTrait<T>::Space>::Allocate(heap, size);
}
- // Marks an object as being fully constructed, resulting in precise handling
- // by the garbage collector.
+ /**
+ * Marks an object as fully constructed, resulting in precise handling by the
+ * garbage collector.
+ *
+ * \param payload The base pointer the object is allocated at.
+ */
static void MarkObjectAsFullyConstructed(const void* payload) {
- // internal::MarkObjectAsFullyConstructed(payload);
internal::MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
payload);
}
};
+/**
+ * Default trait class that specifies how to construct an object of type T.
+ * Advanced users may override how an object is constructed using the utilities
+ * that are provided through MakeGarbageCollectedTraitBase.
+ *
+ * Any trait overriding construction must
+ * - allocate through MakeGarbageCollectedTraitBase<T>::Allocate;
+ * - mark the object as fully constructed using
+ * MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed;
+ */
template <typename T>
class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
public:
@@ -72,6 +118,10 @@ class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
static T* Call(Heap* heap, Args&&... args) {
static_assert(internal::IsGarbageCollectedType<T>::value,
"T needs to be a garbage collected object");
+ static_assert(
+ !internal::IsGarbageCollectedMixinType<T>::value ||
+ sizeof(T) <= internal::api_constants::kLargeObjectSizeThreshold,
+ "GarbageCollectedMixin may not be a large object");
void* memory = MakeGarbageCollectedTraitBase<T>::Allocate(heap, sizeof(T));
T* object = ::new (memory) T(std::forward<Args>(args)...);
MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed(object);
@@ -79,11 +129,31 @@ class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
}
};
-// Default MakeGarbageCollected: Constructs an instance of T, which is a garbage
-// collected type.
+/**
+ * Allows users to specify a post-construction callback for specific types. The
+ * callback is invoked on the instance of type T right after it has been
+ * constructed. This can be useful when the callback requires a
+ * fully-constructed object to be able to dispatch to virtual methods.
+ */
+template <typename T, typename = void>
+struct PostConstructionCallbackTrait {
+ static void Call(T*) {}
+};
+
+/**
+ * Constructs a managed object of type T where T transitively inherits from
+ * GarbageCollected.
+ *
+ * \param args List of arguments with which an instance of T will be
+ * constructed.
+ * \returns an instance of type T.
+ */
template <typename T, typename... Args>
T* MakeGarbageCollected(Heap* heap, Args&&... args) {
- return MakeGarbageCollectedTrait<T>::Call(heap, std::forward<Args>(args)...);
+ T* object =
+ MakeGarbageCollectedTrait<T>::Call(heap, std::forward<Args>(args)...);
+ PostConstructionCallbackTrait<T>::Call(object);
+ return object;
}
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/common.h b/deps/v8/include/cppgc/common.h
new file mode 100644
index 0000000000..228b9abb74
--- /dev/null
+++ b/deps/v8/include/cppgc/common.h
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_COMMON_H_
+#define INCLUDE_CPPGC_COMMON_H_
+
+// TODO(chromium:1056170): Remove dependency on v8.
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+// Indicator for the stack state of the embedder.
+enum class EmbedderStackState {
+ kMayContainHeapPointers,
+ kNoHeapPointers,
+ kUnknown V8_ENUM_DEPRECATE_SOON("Use kMayContainHeapPointers") =
+ kMayContainHeapPointers,
+ kNonEmpty V8_ENUM_DEPRECATE_SOON("Use kMayContainHeapPointers") =
+ kMayContainHeapPointers,
+ kEmpty V8_ENUM_DEPRECATE_SOON("Use kNoHeapPointers") = kNoHeapPointers,
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_COMMON_H_
diff --git a/deps/v8/include/cppgc/custom-space.h b/deps/v8/include/cppgc/custom-space.h
new file mode 100644
index 0000000000..2597a5bdef
--- /dev/null
+++ b/deps/v8/include/cppgc/custom-space.h
@@ -0,0 +1,62 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_CUSTOM_SPACE_H_
+#define INCLUDE_CPPGC_CUSTOM_SPACE_H_
+
+#include <stddef.h>
+
+namespace cppgc {
+
+struct CustomSpaceIndex {
+ CustomSpaceIndex(size_t value) : value(value) {} // NOLINT
+ size_t value;
+};
+
+/**
+ * Top-level base class for custom spaces. Users must inherit from CustomSpace
+ * below.
+ */
+class CustomSpaceBase {
+ public:
+ virtual ~CustomSpaceBase() = default;
+ virtual CustomSpaceIndex GetCustomSpaceIndex() const = 0;
+};
+
+/**
+ * Base class custom spaces should directly inherit from. The class inheriting
+ * from CustomSpace must define kSpaceIndex as unique space index. These
+ * indices need for form a sequence starting at 0.
+ *
+ * Example:
+ * \code
+ * class CustomSpace1 : public CustomSpace<CustomSpace1> {
+ * public:
+ * static constexpr CustomSpaceIndex kSpaceIndex = 0;
+ * };
+ * class CustomSpace2 : public CustomSpace<CustomSpace2> {
+ * public:
+ * static constexpr CustomSpaceIndex kSpaceIndex = 1;
+ * };
+ * \endcode
+ */
+template <typename ConcreteCustomSpace>
+class CustomSpace : public CustomSpaceBase {
+ public:
+ CustomSpaceIndex GetCustomSpaceIndex() const final {
+ return ConcreteCustomSpace::kSpaceIndex;
+ }
+};
+
+/**
+ * User-overridable trait that allows pinning types to custom spaces.
+ */
+template <typename T, typename = void>
+struct SpaceTrait {
+ using Space = void;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_CUSTOM_SPACE_H_
diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h
index 6c62daafdc..c263a9fecf 100644
--- a/deps/v8/include/cppgc/garbage-collected.h
+++ b/deps/v8/include/cppgc/garbage-collected.h
@@ -7,31 +7,20 @@
#include <type_traits>
-#include "include/cppgc/internals.h"
-#include "include/cppgc/platform.h"
+#include "cppgc/internal/api-constants.h"
+#include "cppgc/macros.h"
+#include "cppgc/platform.h"
+#include "cppgc/trace-trait.h"
+#include "cppgc/type-traits.h"
namespace cppgc {
-namespace internal {
-
-template <typename T, typename = void>
-struct IsGarbageCollectedType : std::false_type {
- static_assert(sizeof(T), "T must be fully defined");
-};
-template <typename T>
-struct IsGarbageCollectedType<
- T, void_t<typename std::remove_const_t<T>::IsGarbageCollectedTypeMarker>>
- : std::true_type {
- static_assert(sizeof(T), "T must be fully defined");
-};
+class Visitor;
-} // namespace internal
+namespace internal {
-template <typename>
-class GarbageCollected {
+class GarbageCollectedBase {
public:
- using IsGarbageCollectedTypeMarker = void;
-
// Must use MakeGarbageCollected.
void* operator new(size_t) = delete;
void* operator new[](size_t) = delete;
@@ -45,9 +34,159 @@ class GarbageCollected {
void operator delete[](void*) = delete;
protected:
+ GarbageCollectedBase() = default;
+};
+
+} // namespace internal
+
+/**
+ * Base class for managed objects. Only descendent types of GarbageCollected
+ * can be constructed using MakeGarbageCollected. Must be inherited from as
+ * left-most base class.
+ *
+ * Types inheriting from GarbageCollected must provide a method of
+ * signature `void Trace(cppgc::Visitor*) const` that dispatchs all managed
+ * pointers to the visitor and delegates to garbage-collected base classes.
+ * The method must be virtual if the type is not directly a child of
+ * GarbageCollected and marked as final.
+ *
+ * \code
+ * // Example using final class.
+ * class FinalType final : public GarbageCollected<FinalType> {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const {
+ * // Dispatch using visitor->Trace(...);
+ * }
+ * };
+ *
+ * // Example using non-final base class.
+ * class NonFinalBase : public GarbageCollected<NonFinalBase> {
+ * public:
+ * virtual void Trace(cppgc::Visitor*) const {}
+ * };
+ *
+ * class FinalChild final : public NonFinalBase {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const final {
+ * // Dispatch using visitor->Trace(...);
+ * NonFinalBase::Trace(visitor);
+ * }
+ * };
+ * \endcode
+ */
+template <typename>
+class GarbageCollected : public internal::GarbageCollectedBase {
+ public:
+ using IsGarbageCollectedTypeMarker = void;
+
+ protected:
GarbageCollected() = default;
};
+/**
+ * Base class for managed mixin objects. Such objects cannot be constructed
+ * directly but must be mixed into the inheritance hierarchy of a
+ * GarbageCollected object.
+ *
+ * Types inheriting from GarbageCollectedMixin must override a virtual method
+ * of signature `void Trace(cppgc::Visitor*) const` that dispatchs all managed
+ * pointers to the visitor and delegates to base classes.
+ *
+ * \code
+ * class Mixin : public GarbageCollectedMixin {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const override {
+ * // Dispatch using visitor->Trace(...);
+ * }
+ * };
+ * \endcode
+ */
+class GarbageCollectedMixin : public internal::GarbageCollectedBase {
+ public:
+ using IsGarbageCollectedMixinTypeMarker = void;
+
+ // Sentinel used to mark not-fully-constructed mixins.
+ static constexpr void* kNotFullyConstructedObject = nullptr;
+
+ // Provide default implementation that indicate that the vtable is not yet
+ // set up properly. This is used to to get GCInfo objects for mixins so that
+ // these objects can be processed later on.
+ virtual TraceDescriptor GetTraceDescriptor() const {
+ return {kNotFullyConstructedObject, nullptr};
+ }
+
+ /**
+ * This Trace method must be overriden by objects inheriting from
+ * GarbageCollectedMixin.
+ */
+ virtual void Trace(cppgc::Visitor*) const {}
+};
+
+/**
+ * Macro defines all methods and markers needed for handling mixins. Must be
+ * used on the type that is inheriting from GarbageCollected *and*
+ * GarbageCollectedMixin.
+ *
+ * \code
+ * class Mixin : public GarbageCollectedMixin {
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const override {
+ * // Dispatch using visitor->Trace(...);
+ * }
+ * };
+ *
+ * class Foo : public GarbageCollected<Foo>, public Mixin {
+ * USING_GARBAGE_COLLECTED_MIXIN();
+ * public:
+ * void Trace(cppgc::Visitor* visitor) const override {
+ * // Dispatch using visitor->Trace(...);
+ * Mixin::Trace(visitor);
+ * }
+ * };
+ * \endcode
+ */
+#define USING_GARBAGE_COLLECTED_MIXIN() \
+ public: \
+ /* Marker is used by clang to check for proper usages of the macro. */ \
+ typedef int HasUsingGarbageCollectedMixinMacro; \
+ \
+ TraceDescriptor GetTraceDescriptor() const override { \
+ static_assert( \
+ internal::IsSubclassOfTemplate< \
+ std::remove_const_t<std::remove_pointer_t<decltype(this)>>, \
+ cppgc::GarbageCollected>::value, \
+ "Only garbage collected objects can have garbage collected mixins"); \
+ return {this, TraceTrait<std::remove_const_t< \
+ std::remove_pointer_t<decltype(this)>>>::Trace}; \
+ } \
+ \
+ private: \
+ friend class internal::__thisIsHereToForceASemicolonAfterThisMacro
+
+/**
+ * Merge two or more Mixins into one.
+ *
+ * \code
+ * class A : public GarbageCollectedMixin {};
+ * class B : public GarbageCollectedMixin {};
+ * class C : public A, public B {
+ * MERGE_GARBAGE_COLLECTED_MIXINS();
+ * public:
+ * };
+ * \endcode
+ */
+#define MERGE_GARBAGE_COLLECTED_MIXINS() \
+ public: \
+ /* When using multiple mixins the methods become */ \
+ /* ambigous. Providing additional implementations */ \
+ /* disambiguate them again. */ \
+ TraceDescriptor GetTraceDescriptor() const override { \
+ return {kNotFullyConstructedObject, nullptr}; \
+ } \
+ \
+ private: \
+ friend class internal::__thisIsHereToForceASemicolonAfterThisMacro
+
} // namespace cppgc
#endif // INCLUDE_CPPGC_GARBAGE_COLLECTED_H_
diff --git a/deps/v8/include/cppgc/heap.h b/deps/v8/include/cppgc/heap.h
index a0568d534f..90046c3505 100644
--- a/deps/v8/include/cppgc/heap.h
+++ b/deps/v8/include/cppgc/heap.h
@@ -6,8 +6,11 @@
#define INCLUDE_CPPGC_HEAP_H_
#include <memory>
+#include <vector>
-#include "include/v8config.h"
+#include "cppgc/common.h"
+#include "cppgc/custom-space.h"
+#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
@@ -16,10 +19,39 @@ class Heap;
class V8_EXPORT Heap {
public:
- static std::unique_ptr<Heap> Create();
+ /**
+ * Specifies the stack state the embedder is in.
+ */
+ using StackState = EmbedderStackState;
+
+ struct HeapOptions {
+ static HeapOptions Default() { return {}; }
+
+ /**
+ * Custom spaces added to heap are required to have indices forming a
+ * numbered sequence starting at 0, i.e., their kSpaceIndex must correspond
+ * to the index they reside in the vector.
+ */
+ std::vector<std::unique_ptr<CustomSpaceBase>> custom_spaces;
+ };
+
+ static std::unique_ptr<Heap> Create(HeapOptions = HeapOptions::Default());
virtual ~Heap() = default;
+ /**
+ * Forces garbage collection.
+ *
+ * \param source String specifying the source (or caller) triggering a
+ * forced garbage collection.
+ * \param reason String specifying the reason for the forced garbage
+ * collection.
+ * \param stack_state The embedder stack state, see StackState.
+ */
+ void ForceGarbageCollectionSlow(
+ const char* source, const char* reason,
+ StackState stack_state = StackState::kMayContainHeapPointers);
+
private:
Heap() = default;
diff --git a/deps/v8/include/cppgc/internal/accessors.h b/deps/v8/include/cppgc/internal/accessors.h
new file mode 100644
index 0000000000..ee0a0042fe
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/accessors.h
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_
+#define INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_
+
+#include "cppgc/internal/api-constants.h"
+
+namespace cppgc {
+
+class Heap;
+
+namespace internal {
+
+inline cppgc::Heap* GetHeapFromPayload(const void* payload) {
+ return *reinterpret_cast<cppgc::Heap**>(
+ ((reinterpret_cast<uintptr_t>(payload) & api_constants::kPageBaseMask) +
+ api_constants::kGuardPageSize) +
+ api_constants::kHeapOffset);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_
diff --git a/deps/v8/include/cppgc/internals.h b/deps/v8/include/cppgc/internal/api-constants.h
index 1e57779758..ef910a4857 100644
--- a/deps/v8/include/cppgc/internals.h
+++ b/deps/v8/include/cppgc/internal/api-constants.h
@@ -2,25 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef INCLUDE_CPPGC_INTERNALS_H_
-#define INCLUDE_CPPGC_INTERNALS_H_
+#ifndef INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
+#define INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
#include <stddef.h>
#include <stdint.h>
-#include "include/v8config.h"
+#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
-// Pre-C++17 custom implementation of std::void_t.
-template <typename... Ts>
-struct make_void {
- typedef void type;
-};
-template <typename... Ts>
-using void_t = typename make_void<Ts...>::type;
-
// Embedders should not rely on this code!
// Internal constants to avoid exposing internal types on the API surface.
@@ -33,9 +25,20 @@ static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload =
// Mask for in-construction bit.
static constexpr size_t kFullyConstructedBitMask = size_t{1};
+// Page constants used to align pointers to page begin.
+static constexpr size_t kPageSize = size_t{1} << 17;
+static constexpr size_t kPageAlignment = kPageSize;
+static constexpr size_t kPageBaseMask = ~(kPageAlignment - 1);
+static constexpr size_t kGuardPageSize = 4096;
+
+// Offset of the Heap backref.
+static constexpr size_t kHeapOffset = 0;
+
+static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
+
} // namespace api_constants
} // namespace internal
} // namespace cppgc
-#endif // INCLUDE_CPPGC_INTERNALS_H_
+#endif // INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
diff --git a/deps/v8/include/cppgc/internal/compiler-specific.h b/deps/v8/include/cppgc/internal/compiler-specific.h
new file mode 100644
index 0000000000..e1f5c1d57f
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/compiler-specific.h
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
+#define INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
+
+namespace cppgc {
+
+#if defined(__has_cpp_attribute)
+#define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) __has_cpp_attribute(FEATURE)
+#else
+#define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) 0
+#endif
+
+// [[no_unique_address]] comes in C++20 but supported in clang with -std >=
+// c++11.
+#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address) // NOLINTNEXTLINE
+#define CPPGC_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+#define CPPGC_NO_UNIQUE_ADDRESS
+#endif
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
diff --git a/deps/v8/include/cppgc/finalizer-trait.h b/deps/v8/include/cppgc/internal/finalizer-trait.h
index 12216ed84e..a95126591c 100644
--- a/deps/v8/include/cppgc/finalizer-trait.h
+++ b/deps/v8/include/cppgc/internal/finalizer-trait.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef INCLUDE_CPPGC_FINALIZER_TRAIT_H_
-#define INCLUDE_CPPGC_FINALIZER_TRAIT_H_
+#ifndef INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
+#define INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
#include <type_traits>
-#include "include/cppgc/internals.h"
+#include "cppgc/type-traits.h"
namespace cppgc {
namespace internal {
@@ -87,4 +87,4 @@ constexpr FinalizationCallback FinalizerTrait<T>::kCallback;
} // namespace internal
} // namespace cppgc
-#endif // INCLUDE_CPPGC_FINALIZER_TRAIT_H_
+#endif // INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
diff --git a/deps/v8/include/cppgc/gc-info.h b/deps/v8/include/cppgc/internal/gc-info.h
index 987ba34fa4..9aac1361c6 100644
--- a/deps/v8/include/cppgc/gc-info.h
+++ b/deps/v8/include/cppgc/internal/gc-info.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef INCLUDE_CPPGC_GC_INFO_H_
-#define INCLUDE_CPPGC_GC_INFO_H_
+#ifndef INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
+#define INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#include <stdint.h>
-#include "include/cppgc/finalizer-trait.h"
-#include "include/v8config.h"
+#include "cppgc/internal/finalizer-trait.h"
+#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
@@ -40,4 +40,4 @@ struct GCInfoTrait {
} // namespace internal
} // namespace cppgc
-#endif // INCLUDE_CPPGC_GC_INFO_H_
+#endif // INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
diff --git a/deps/v8/include/cppgc/internal/logging.h b/deps/v8/include/cppgc/internal/logging.h
new file mode 100644
index 0000000000..79beaef7d4
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/logging.h
@@ -0,0 +1,50 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_LOGGING_H_
+#define INCLUDE_CPPGC_INTERNAL_LOGGING_H_
+
+#include "cppgc/source-location.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+void V8_EXPORT DCheckImpl(const char*,
+ const SourceLocation& = SourceLocation::Current());
+[[noreturn]] void V8_EXPORT
+FatalImpl(const char*, const SourceLocation& = SourceLocation::Current());
+
+// Used to ignore -Wunused-variable.
+template <typename>
+struct EatParams {};
+
+#if DEBUG
+#define CPPGC_DCHECK_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ ::cppgc::internal::DCheckImpl(message); \
+ } \
+ } while (false)
+#else
+#define CPPGC_DCHECK_MSG(condition, message) \
+ (static_cast<void>(::cppgc::internal::EatParams<decltype( \
+ static_cast<void>(condition), message)>{}))
+#endif
+
+#define CPPGC_DCHECK(condition) CPPGC_DCHECK_MSG(condition, #condition)
+
+#define CPPGC_CHECK_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ ::cppgc::internal::FatalImpl(message); \
+ } \
+ } while (false)
+
+#define CPPGC_CHECK(condition) CPPGC_CHECK_MSG(condition, #condition)
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_LOGGING_H_
diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h
new file mode 100644
index 0000000000..11cf69623e
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/persistent-node.h
@@ -0,0 +1,109 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_PERSISTENT_NODE_H_
+#define INCLUDE_CPPGC_INTERNAL_PERSISTENT_NODE_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "cppgc/internal/logging.h"
+#include "cppgc/trace-trait.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+class Visitor;
+
+namespace internal {
+
+// PersistentNode represesents a variant of two states:
+// 1) traceable node with a back pointer to the Persistent object;
+// 2) freelist entry.
+class PersistentNode final {
+ public:
+ PersistentNode() = default;
+
+ PersistentNode(const PersistentNode&) = delete;
+ PersistentNode& operator=(const PersistentNode&) = delete;
+
+ void InitializeAsUsedNode(void* owner, TraceCallback trace) {
+ owner_ = owner;
+ trace_ = trace;
+ }
+
+ void InitializeAsFreeNode(PersistentNode* next) {
+ next_ = next;
+ trace_ = nullptr;
+ }
+
+ void UpdateOwner(void* owner) {
+ CPPGC_DCHECK(IsUsed());
+ owner_ = owner;
+ }
+
+ PersistentNode* FreeListNext() const {
+ CPPGC_DCHECK(!IsUsed());
+ return next_;
+ }
+
+ void Trace(Visitor* visitor) const {
+ CPPGC_DCHECK(IsUsed());
+ trace_(visitor, owner_);
+ }
+
+ bool IsUsed() const { return trace_; }
+
+ private:
+ // PersistentNode acts as a designated union:
+ // If trace_ != nullptr, owner_ points to the corresponding Persistent handle.
+ // Otherwise, next_ points to the next freed PersistentNode.
+ union {
+ void* owner_ = nullptr;
+ PersistentNode* next_;
+ };
+ TraceCallback trace_ = nullptr;
+};
+
+class V8_EXPORT PersistentRegion {
+ using PersistentNodeSlots = std::array<PersistentNode, 256u>;
+
+ public:
+ PersistentRegion() = default;
+
+ PersistentRegion(const PersistentRegion&) = delete;
+ PersistentRegion& operator=(const PersistentRegion&) = delete;
+
+ PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
+ if (!free_list_head_) {
+ EnsureNodeSlots();
+ }
+ PersistentNode* node = free_list_head_;
+ free_list_head_ = free_list_head_->FreeListNext();
+ node->InitializeAsUsedNode(owner, trace);
+ return node;
+ }
+
+ void FreeNode(PersistentNode* node) {
+ node->InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = node;
+ }
+
+ void Trace(Visitor*);
+
+ size_t NodesInUse() const;
+
+ private:
+ void EnsureNodeSlots();
+
+ std::vector<std::unique_ptr<PersistentNodeSlots>> nodes_;
+ PersistentNode* free_list_head_ = nullptr;
+};
+
+} // namespace internal
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_PERSISTENT_NODE_H_
diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h
new file mode 100644
index 0000000000..fe8d94b57a
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/pointer-policies.h
@@ -0,0 +1,133 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_
+#define INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_
+
+#include <cstdint>
+#include <type_traits>
+
+#include "cppgc/source-location.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+class PersistentRegion;
+
+// Tags to distinguish between strong and weak member types.
+class StrongMemberTag;
+class WeakMemberTag;
+class UntracedMemberTag;
+
+struct DijkstraWriteBarrierPolicy {
+ static void InitializingBarrier(const void*, const void*) {
+ // Since in initializing writes the source object is always white, having no
+ // barrier doesn't break the tri-color invariant.
+ }
+ static void AssigningBarrier(const void*, const void*) {
+ // TODO(chromium:1056170): Add actual implementation.
+ }
+};
+
+struct NoWriteBarrierPolicy {
+ static void InitializingBarrier(const void*, const void*) {}
+ static void AssigningBarrier(const void*, const void*) {}
+};
+
+class V8_EXPORT EnabledCheckingPolicy {
+ protected:
+ EnabledCheckingPolicy();
+ void CheckPointer(const void* ptr);
+
+ private:
+ void* impl_;
+};
+
+class DisabledCheckingPolicy {
+ protected:
+ void CheckPointer(const void* raw) {}
+};
+
+#if V8_ENABLE_CHECKS
+using DefaultCheckingPolicy = EnabledCheckingPolicy;
+#else
+using DefaultCheckingPolicy = DisabledCheckingPolicy;
+#endif
+
+class KeepLocationPolicy {
+ public:
+ constexpr const SourceLocation& Location() const { return location_; }
+
+ protected:
+ constexpr explicit KeepLocationPolicy(const SourceLocation& location)
+ : location_(location) {}
+
+ // KeepLocationPolicy must not copy underlying source locations.
+ KeepLocationPolicy(const KeepLocationPolicy&) = delete;
+ KeepLocationPolicy& operator=(const KeepLocationPolicy&) = delete;
+
+ // Location of the original moved from object should be preserved.
+ KeepLocationPolicy(KeepLocationPolicy&&) = default;
+ KeepLocationPolicy& operator=(KeepLocationPolicy&&) = default;
+
+ private:
+ SourceLocation location_;
+};
+
+class IgnoreLocationPolicy {
+ public:
+ constexpr SourceLocation Location() const { return {}; }
+
+ protected:
+ constexpr explicit IgnoreLocationPolicy(const SourceLocation&) {}
+};
+
+#if CPPGC_SUPPORTS_OBJECT_NAMES
+using DefaultLocationPolicy = KeepLocationPolicy;
+#else
+using DefaultLocationPolicy = IgnoreLocationPolicy;
+#endif
+
+struct StrongPersistentPolicy {
+ using IsStrongPersistent = std::true_type;
+
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+};
+
+struct WeakPersistentPolicy {
+ using IsStrongPersistent = std::false_type;
+
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+};
+
+// Persistent/Member forward declarations.
+template <typename T, typename WeaknessPolicy,
+ typename LocationPolicy = DefaultLocationPolicy,
+ typename CheckingPolicy = DefaultCheckingPolicy>
+class BasicPersistent;
+template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
+ typename CheckingPolicy = DefaultCheckingPolicy>
+class BasicMember;
+
+// Special tag type used to denote some sentinel member. The semantics of the
+// sentinel is defined by the embedder.
+struct SentinelPointer {
+ template <typename T>
+ operator T*() const { // NOLINT
+ static constexpr intptr_t kSentinelValue = -1;
+ return reinterpret_cast<T*>(kSentinelValue);
+ }
+ // Hidden friends.
+ friend bool operator==(SentinelPointer, SentinelPointer) { return true; }
+ friend bool operator!=(SentinelPointer, SentinelPointer) { return false; }
+};
+
+} // namespace internal
+
+constexpr internal::SentinelPointer kSentinelPointer;
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_
diff --git a/deps/v8/include/cppgc/internal/prefinalizer-handler.h b/deps/v8/include/cppgc/internal/prefinalizer-handler.h
new file mode 100644
index 0000000000..939a9b8ff0
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/prefinalizer-handler.h
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_
+#define INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_
+
+#include "cppgc/heap.h"
+#include "cppgc/liveness-broker.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT PreFinalizerRegistrationDispatcher final {
+ public:
+ using PreFinalizerCallback = bool (*)(const LivenessBroker&, void*);
+ struct PreFinalizer {
+ void* object_;
+ PreFinalizerCallback callback_;
+
+ bool operator==(const PreFinalizer& other);
+ };
+
+ static void RegisterPrefinalizer(cppgc::Heap* heap,
+ PreFinalizer prefinalzier);
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_
diff --git a/deps/v8/include/cppgc/liveness-broker.h b/deps/v8/include/cppgc/liveness-broker.h
new file mode 100644
index 0000000000..69dbc11f1f
--- /dev/null
+++ b/deps/v8/include/cppgc/liveness-broker.h
@@ -0,0 +1,50 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_LIVENESS_BROKER_H_
+#define INCLUDE_CPPGC_LIVENESS_BROKER_H_
+
+#include "cppgc/heap.h"
+#include "cppgc/member.h"
+#include "cppgc/trace-trait.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+namespace internal {
+class LivenessBrokerFactory;
+} // namespace internal
+
+class V8_EXPORT LivenessBroker final {
+ public:
+ template <typename T>
+ bool IsHeapObjectAlive(const T* object) const {
+ return object &&
+ IsHeapObjectAliveImpl(
+ TraceTrait<T>::GetTraceDescriptor(object).base_object_payload);
+ }
+
+ template <typename T>
+ bool IsHeapObjectAlive(const WeakMember<T>& weak_member) const {
+ return (weak_member != kSentinelPointer) &&
+ IsHeapObjectAlive<T>(weak_member.Get());
+ }
+
+ template <typename T>
+ bool IsHeapObjectAlive(const UntracedMember<T>& untraced_member) const {
+ return (untraced_member != kSentinelPointer) &&
+ IsHeapObjectAlive<T>(untraced_member.Get());
+ }
+
+ private:
+ LivenessBroker() = default;
+
+ bool IsHeapObjectAliveImpl(const void*) const;
+
+ friend class internal::LivenessBrokerFactory;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_LIVENESS_BROKER_H_
diff --git a/deps/v8/include/cppgc/macros.h b/deps/v8/include/cppgc/macros.h
new file mode 100644
index 0000000000..7c7a10e433
--- /dev/null
+++ b/deps/v8/include/cppgc/macros.h
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_MACROS_H_
+#define INCLUDE_CPPGC_MACROS_H_
+
+namespace cppgc {
+
+namespace internal {
+class __thisIsHereToForceASemicolonAfterThisMacro {};
+} // namespace internal
+
+// Use if the object is only stack allocated.
+#define CPPGC_STACK_ALLOCATED() \
+ public: \
+ using IsStackAllocatedTypeMarker = int; \
+ \
+ private: \
+ void* operator new(size_t) = delete; \
+ void* operator new(size_t, void*) = delete; \
+ friend class internal::__thisIsHereToForceASemicolonAfterThisMacro
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_MACROS_H_
diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h
new file mode 100644
index 0000000000..a183edb96f
--- /dev/null
+++ b/deps/v8/include/cppgc/member.h
@@ -0,0 +1,206 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_MEMBER_H_
+#define INCLUDE_CPPGC_MEMBER_H_
+
+#include <atomic>
+#include <cstddef>
+#include <type_traits>
+
+#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/type-traits.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+class Visitor;
+
+namespace internal {
+
+// The basic class from which all Member classes are 'generated'.
+template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
+ typename CheckingPolicy>
+class BasicMember : private CheckingPolicy {
+ public:
+ using PointeeType = T;
+
+ constexpr BasicMember() = default;
+ constexpr BasicMember(std::nullptr_t) {} // NOLINT
+ BasicMember(SentinelPointer s) : raw_(s) {} // NOLINT
+ BasicMember(T* raw) : raw_(raw) { // NOLINT
+ InitializingWriteBarrier();
+ this->CheckPointer(raw_);
+ }
+ BasicMember(T& raw) : BasicMember(&raw) {} // NOLINT
+ BasicMember(const BasicMember& other) : BasicMember(other.Get()) {}
+ // Allow heterogeneous construction.
+ template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
+ typename OtherCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicMember( // NOLINT
+ const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
+ OtherCheckingPolicy>& other)
+ : BasicMember(other.Get()) {}
+ // Construction from Persistent.
+ template <typename U, typename PersistentWeaknessPolicy,
+ typename PersistentLocationPolicy,
+ typename PersistentCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicMember( // NOLINT
+ const BasicPersistent<U, PersistentWeaknessPolicy,
+ PersistentLocationPolicy, PersistentCheckingPolicy>&
+ p)
+ : BasicMember(p.Get()) {}
+
+ BasicMember& operator=(const BasicMember& other) {
+ return operator=(other.Get());
+ }
+ // Allow heterogeneous assignment.
+ template <typename U, typename OtherWeaknessTag, typename OtherBarrierPolicy,
+ typename OtherCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicMember& operator=(
+ const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
+ OtherCheckingPolicy>& other) {
+ return operator=(other.Get());
+ }
+ // Assignment from Persistent.
+ template <typename U, typename PersistentWeaknessPolicy,
+ typename PersistentLocationPolicy,
+ typename PersistentCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicMember& operator=(
+ const BasicPersistent<U, PersistentWeaknessPolicy,
+ PersistentLocationPolicy, PersistentCheckingPolicy>&
+ other) {
+ return operator=(other.Get());
+ }
+ BasicMember& operator=(T* other) {
+ SetRawAtomic(other);
+ AssigningWriteBarrier();
+ this->CheckPointer(Get());
+ return *this;
+ }
+ BasicMember& operator=(std::nullptr_t) {
+ Clear();
+ return *this;
+ }
+ BasicMember& operator=(SentinelPointer s) {
+ SetRawAtomic(s);
+ return *this;
+ }
+
+ template <typename OtherWeaknessTag, typename OtherBarrierPolicy,
+ typename OtherCheckingPolicy>
+ void Swap(BasicMember<T, OtherWeaknessTag, OtherBarrierPolicy,
+ OtherCheckingPolicy>& other) {
+ T* tmp = Get();
+ *this = other;
+ other = tmp;
+ }
+
+ explicit operator bool() const { return Get(); }
+ operator T*() const { return Get(); } // NOLINT
+ T* operator->() const { return Get(); }
+ T& operator*() const { return *Get(); }
+
+ T* Get() const {
+ // Executed by the mutator, hence non atomic load.
+ return raw_;
+ }
+
+ void Clear() { SetRawAtomic(nullptr); }
+
+ T* Release() {
+ T* result = Get();
+ Clear();
+ return result;
+ }
+
+ private:
+ void SetRawAtomic(T* raw) {
+ reinterpret_cast<std::atomic<T*>*>(&raw_)->store(raw,
+ std::memory_order_relaxed);
+ }
+ T* GetRawAtomic() const {
+ return reinterpret_cast<const std::atomic<T*>*>(&raw_)->load(
+ std::memory_order_relaxed);
+ }
+
+ void InitializingWriteBarrier() const {
+ WriteBarrierPolicy::InitializingBarrier(
+ reinterpret_cast<const void*>(&raw_), static_cast<const void*>(raw_));
+ }
+ void AssigningWriteBarrier() const {
+ WriteBarrierPolicy::AssigningBarrier(reinterpret_cast<const void*>(&raw_),
+ static_cast<const void*>(raw_));
+ }
+
+ T* raw_ = nullptr;
+
+ friend class cppgc::Visitor;
+};
+
+template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
+ typename CheckingPolicy1, typename T2, typename WeaknessTag2,
+ typename WriteBarrierPolicy2, typename CheckingPolicy2>
+bool operator==(
+ BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1> member1,
+ BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>
+ member2) {
+ return member1.Get() == member2.Get();
+}
+
+template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
+ typename CheckingPolicy1, typename T2, typename WeaknessTag2,
+ typename WriteBarrierPolicy2, typename CheckingPolicy2>
+bool operator!=(
+ BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1> member1,
+ BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>
+ member2) {
+ return !(member1 == member2);
+}
+
+template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy>
+struct IsWeak<
+ internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy, CheckingPolicy>>
+ : std::true_type {};
+
+} // namespace internal
+
+/**
+ * Members are used in classes to contain strong pointers to other garbage
+ * collected objects. All Member fields of a class must be traced in the class'
+ * trace method.
+ */
+template <typename T>
+using Member = internal::BasicMember<T, internal::StrongMemberTag,
+ internal::DijkstraWriteBarrierPolicy>;
+
+/**
+ * WeakMember is similar to Member in that it is used to point to other garbage
+ * collected objects. However instead of creating a strong pointer to the
+ * object, the WeakMember creates a weak pointer, which does not keep the
+ * pointee alive. Hence if all pointers to to a heap allocated object are weak
+ * the object will be garbage collected. At the time of GC the weak pointers
+ * will automatically be set to null.
+ */
+template <typename T>
+using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
+ internal::DijkstraWriteBarrierPolicy>;
+
+/**
+ * UntracedMember is a pointer to an on-heap object that is not traced for some
+ * reason. Do not use this unless you know what you are doing. Keeping raw
+ * pointers to on-heap objects is prohibited unless used from stack. Pointee
+ * must be kept alive through other means.
+ */
+template <typename T>
+using UntracedMember = internal::BasicMember<T, internal::UntracedMemberTag,
+ internal::NoWriteBarrierPolicy>;
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_MEMBER_H_
diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h
new file mode 100644
index 0000000000..fc6b0b9d92
--- /dev/null
+++ b/deps/v8/include/cppgc/persistent.h
@@ -0,0 +1,304 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_PERSISTENT_H_
+#define INCLUDE_CPPGC_PERSISTENT_H_
+
+#include <type_traits>
+
+#include "cppgc/internal/persistent-node.h"
+#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/source-location.h"
+#include "cppgc/type-traits.h"
+#include "cppgc/visitor.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+// The basic class from which all Persistent classes are generated.
+template <typename T, typename WeaknessPolicy, typename LocationPolicy,
+ typename CheckingPolicy>
+class BasicPersistent : public LocationPolicy,
+ private WeaknessPolicy,
+ private CheckingPolicy {
+ public:
+ using typename WeaknessPolicy::IsStrongPersistent;
+ using PointeeType = T;
+
+ // Null-state/sentinel constructors.
+ BasicPersistent( // NOLINT
+ const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc) {}
+
+ BasicPersistent(std::nullptr_t, // NOLINT
+ const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc) {}
+
+ BasicPersistent( // NOLINT
+ SentinelPointer s, const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc), raw_(s) {}
+
+ // Raw value contstructors.
+ BasicPersistent(T* raw, // NOLINT
+ const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc), raw_(raw) {
+ if (!IsValid()) return;
+ node_ = WeaknessPolicy::GetPersistentRegion(raw_).AllocateNode(
+ this, &BasicPersistent::Trace);
+ this->CheckPointer(Get());
+ }
+
+ BasicPersistent(T& raw, // NOLINT
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicPersistent(&raw, loc) {}
+
+ // Copy ctor.
+ BasicPersistent(const BasicPersistent& other,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicPersistent(other.Get(), loc) {}
+
+ // Heterogeneous ctor.
+ template <typename U, typename OtherWeaknessPolicy,
+ typename OtherLocationPolicy, typename OtherCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicPersistent( // NOLINT
+ const BasicPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
+ OtherCheckingPolicy>& other,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicPersistent(other.Get(), loc) {}
+
+ // Move ctor. The heterogeneous move ctor is not supported since e.g.
+ // persistent can't reuse persistent node from weak persistent.
+ BasicPersistent(
+ BasicPersistent&& other,
+ const SourceLocation& loc = SourceLocation::Current()) noexcept
+ : LocationPolicy(std::move(other)),
+ raw_(std::move(other.raw_)),
+ node_(std::move(other.node_)) {
+ if (!IsValid()) return;
+ node_->UpdateOwner(this);
+ other.raw_ = nullptr;
+ other.node_ = nullptr;
+ this->CheckPointer(Get());
+ }
+
+ // Constructor from member.
+ template <typename U, typename MemberBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicPersistent(internal::BasicMember<U, MemberBarrierPolicy, // NOLINT
+ MemberWeaknessTag, MemberCheckingPolicy>
+ member,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicPersistent(member.Get(), loc) {}
+
+ ~BasicPersistent() { Clear(); }
+
+ // Copy assignment.
+ BasicPersistent& operator=(const BasicPersistent& other) {
+ return operator=(other.Get());
+ }
+
+ template <typename U, typename OtherWeaknessPolicy,
+ typename OtherLocationPolicy, typename OtherCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicPersistent& operator=(
+ const BasicPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
+ OtherCheckingPolicy>& other) {
+ return operator=(other.Get());
+ }
+
+ // Move assignment.
+ BasicPersistent& operator=(BasicPersistent&& other) {
+ if (this == &other) return *this;
+ Clear();
+ LocationPolicy::operator=(std::move(other));
+ raw_ = std::move(other.raw_);
+ node_ = std::move(other.node_);
+ if (!IsValid()) return *this;
+ node_->UpdateOwner(this);
+ other.raw_ = nullptr;
+ other.node_ = nullptr;
+ this->CheckPointer(Get());
+ return *this;
+ }
+
+ // Assignment from member.
+ template <typename U, typename MemberBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicPersistent& operator=(
+ internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
+ MemberCheckingPolicy>
+ member) {
+ return operator=(member.Get());
+ }
+
+ BasicPersistent& operator=(T* other) {
+ Assign(other);
+ return *this;
+ }
+
+ BasicPersistent& operator=(std::nullptr_t) {
+ Clear();
+ return *this;
+ }
+
+ BasicPersistent& operator=(SentinelPointer s) {
+ Assign(s);
+ return *this;
+ }
+
+ explicit operator bool() const { return Get(); }
+ operator T*() const { return Get(); }
+ T* operator->() const { return Get(); }
+ T& operator*() const { return *Get(); }
+
+ T* Get() const { return raw_; }
+
+ void Clear() { Assign(nullptr); }
+
+ T* Release() {
+ T* result = Get();
+ Clear();
+ return result;
+ }
+
+ private:
+ static void Trace(Visitor* v, const void* ptr) {
+ const auto* persistent = static_cast<const BasicPersistent*>(ptr);
+ v->TraceRoot(*persistent, persistent->Location());
+ }
+
+ bool IsValid() const {
+ // Ideally, handling kSentinelPointer would be done by the embedder. On the
+ // other hand, having Persistent aware of it is beneficial since no node
+ // gets wasted.
+ return raw_ != nullptr && raw_ != kSentinelPointer;
+ }
+
+ void Assign(T* ptr) {
+ if (IsValid()) {
+ if (ptr && ptr != kSentinelPointer) {
+ // Simply assign the pointer reusing the existing node.
+ raw_ = ptr;
+ this->CheckPointer(ptr);
+ return;
+ }
+ WeaknessPolicy::GetPersistentRegion(raw_).FreeNode(node_);
+ node_ = nullptr;
+ }
+ raw_ = ptr;
+ if (!IsValid()) return;
+ node_ = WeaknessPolicy::GetPersistentRegion(raw_).AllocateNode(
+ this, &BasicPersistent::Trace);
+ this->CheckPointer(Get());
+ }
+
+ T* raw_ = nullptr;
+ PersistentNode* node_ = nullptr;
+};
+
+template <typename T1, typename WeaknessPolicy1, typename LocationPolicy1,
+ typename CheckingPolicy1, typename T2, typename WeaknessPolicy2,
+ typename LocationPolicy2, typename CheckingPolicy2>
+bool operator==(const BasicPersistent<T1, WeaknessPolicy1, LocationPolicy1,
+ CheckingPolicy1>& p1,
+ const BasicPersistent<T2, WeaknessPolicy2, LocationPolicy2,
+ CheckingPolicy2>& p2) {
+ return p1.Get() == p2.Get();
+}
+
+template <typename T1, typename WeaknessPolicy1, typename LocationPolicy1,
+ typename CheckingPolicy1, typename T2, typename WeaknessPolicy2,
+ typename LocationPolicy2, typename CheckingPolicy2>
+bool operator!=(const BasicPersistent<T1, WeaknessPolicy1, LocationPolicy1,
+ CheckingPolicy1>& p1,
+ const BasicPersistent<T2, WeaknessPolicy2, LocationPolicy2,
+ CheckingPolicy2>& p2) {
+ return !(p1 == p2);
+}
+
+template <typename T1, typename PersistentWeaknessPolicy,
+ typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
+ typename T2, typename MemberWriteBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy>
+bool operator==(const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy,
+ PersistentCheckingPolicy>& p,
+ BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>
+ m) {
+ return p.Get() == m.Get();
+}
+
+template <typename T1, typename PersistentWeaknessPolicy,
+ typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
+ typename T2, typename MemberWriteBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy>
+bool operator!=(const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy,
+ PersistentCheckingPolicy>& p,
+ BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>
+ m) {
+ return !(p == m);
+}
+
+template <typename T1, typename MemberWriteBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename T2, typename PersistentWeaknessPolicy,
+ typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
+bool operator==(BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>
+ m,
+ const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy,
+ PersistentCheckingPolicy>& p) {
+ return m.Get() == p.Get();
+}
+
+template <typename T1, typename MemberWriteBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename T2, typename PersistentWeaknessPolicy,
+ typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
+bool operator!=(BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
+ MemberCheckingPolicy>
+ m,
+ const BasicPersistent<T1, PersistentWeaknessPolicy,
+ PersistentLocationPolicy,
+ PersistentCheckingPolicy>& p) {
+ return !(m == p);
+}
+
+template <typename T, typename LocationPolicy, typename CheckingPolicy>
+struct IsWeak<BasicPersistent<T, internal::WeakPersistentPolicy, LocationPolicy,
+ CheckingPolicy>> : std::true_type {};
+} // namespace internal
+
+/**
+ * Persistent is a way to create a strong pointer from an off-heap object to
+ * another on-heap object. As long as the Persistent handle is alive the GC will
+ * keep the object pointed to alive. The Persistent handle is always a GC root
+ * from the point of view of the GC. Persistent must be constructed and
+ * destructed in the same thread.
+ */
+template <typename T>
+using Persistent =
+ internal::BasicPersistent<T, internal::StrongPersistentPolicy>;
+
+/**
+ * WeakPersistent is a way to create a weak pointer from an off-heap object to
+ * an on-heap object. The pointer is automatically cleared when the pointee gets
+ * collected. WeakPersistent must be constructed and destructed in the same
+ * thread.
+ */
+template <typename T>
+using WeakPersistent =
+ internal::BasicPersistent<T, internal::WeakPersistentPolicy>;
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_PERSISTENT_H_
diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h
index f216c2730a..8dc5e14a7d 100644
--- a/deps/v8/include/cppgc/platform.h
+++ b/deps/v8/include/cppgc/platform.h
@@ -5,8 +5,8 @@
#ifndef INCLUDE_CPPGC_PLATFORM_H_
#define INCLUDE_CPPGC_PLATFORM_H_
-#include "include/v8-platform.h"
-#include "include/v8config.h"
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
diff --git a/deps/v8/include/cppgc/prefinalizer.h b/deps/v8/include/cppgc/prefinalizer.h
new file mode 100644
index 0000000000..2f6d68a1da
--- /dev/null
+++ b/deps/v8/include/cppgc/prefinalizer.h
@@ -0,0 +1,54 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_PREFINALIZER_H_
+#define INCLUDE_CPPGC_PREFINALIZER_H_
+
+#include "cppgc/internal/accessors.h"
+#include "cppgc/internal/compiler-specific.h"
+#include "cppgc/internal/prefinalizer-handler.h"
+#include "cppgc/liveness-broker.h"
+#include "cppgc/macros.h"
+
+namespace cppgc {
+
+namespace internal {
+
+template <typename T>
+class PrefinalizerRegistration final {
+ public:
+ explicit PrefinalizerRegistration(T* self) {
+ static_assert(sizeof(&T::InvokePreFinalizer) > 0,
+ "USING_PRE_FINALIZER(T) must be defined.");
+
+ cppgc::internal::PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
+ internal::GetHeapFromPayload(self), {self, T::InvokePreFinalizer});
+ }
+
+ void* operator new(size_t, void* location) = delete;
+ void* operator new(size_t) = delete;
+};
+
+} // namespace internal
+
+#define CPPGC_USING_PRE_FINALIZER(Class, PreFinalizer) \
+ public: \
+ static bool InvokePreFinalizer(const LivenessBroker& liveness_broker, \
+ void* object) { \
+ static_assert(internal::IsGarbageCollectedTypeV<Class>, \
+ "Only garbage collected objects can have prefinalizers"); \
+ Class* self = static_cast<Class*>(object); \
+ if (liveness_broker.IsHeapObjectAlive(self)) return false; \
+ self->Class::PreFinalizer(); \
+ return true; \
+ } \
+ \
+ private: \
+ CPPGC_NO_UNIQUE_ADDRESS internal::PrefinalizerRegistration<Class> \
+ prefinalizer_dummy_{this}; \
+ friend class internal::__thisIsHereToForceASemicolonAfterThisMacro
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_PREFINALIZER_H_
diff --git a/deps/v8/include/cppgc/source-location.h b/deps/v8/include/cppgc/source-location.h
new file mode 100644
index 0000000000..8cc52d6a53
--- /dev/null
+++ b/deps/v8/include/cppgc/source-location.h
@@ -0,0 +1,59 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_SOURCE_LOCATION_H_
+#define INCLUDE_CPPGC_SOURCE_LOCATION_H_
+
+#include <string>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+#if defined(__has_builtin)
+#define CPPGC_SUPPORTS_SOURCE_LOCATION \
+ (__has_builtin(__builtin_FUNCTION) && __has_builtin(__builtin_FILE) && \
+ __has_builtin(__builtin_LINE)) // NOLINT
+#elif defined(V8_CC_GNU) && __GNUC__ >= 7
+#define CPPGC_SUPPORTS_SOURCE_LOCATION 1
+#elif defined(V8_CC_INTEL) && __ICC >= 1800
+#define CPPGC_SUPPORTS_SOURCE_LOCATION 1
+#else
+#define CPPGC_SUPPORTS_SOURCE_LOCATION 0
+#endif
+
+namespace cppgc {
+
+// Encapsulates source location information. Mimics C++20's
+// std::source_location.
+class V8_EXPORT SourceLocation final {
+ public:
+#if CPPGC_SUPPORTS_SOURCE_LOCATION
+ static constexpr SourceLocation Current(
+ const char* function = __builtin_FUNCTION(),
+ const char* file = __builtin_FILE(), size_t line = __builtin_LINE()) {
+ return SourceLocation(function, file, line);
+ }
+#else
+ static constexpr SourceLocation Current() { return SourceLocation(); }
+#endif // CPPGC_SUPPORTS_SOURCE_LOCATION
+
+ constexpr SourceLocation() = default;
+
+ constexpr const char* Function() const { return function_; }
+ constexpr const char* FileName() const { return file_; }
+ constexpr size_t Line() const { return line_; }
+
+ std::string ToString() const;
+
+ private:
+ constexpr SourceLocation(const char* function, const char* file, size_t line)
+ : function_(function), file_(file), line_(line) {}
+
+ const char* function_ = nullptr;
+ const char* file_ = nullptr;
+ size_t line_ = 0u;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_SOURCE_LOCATION_H_
diff --git a/deps/v8/include/cppgc/trace-trait.h b/deps/v8/include/cppgc/trace-trait.h
new file mode 100644
index 0000000000..e246bc53b7
--- /dev/null
+++ b/deps/v8/include/cppgc/trace-trait.h
@@ -0,0 +1,67 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_TRACE_TRAIT_H_
+#define INCLUDE_CPPGC_TRACE_TRAIT_H_
+
+#include <type_traits>
+#include "cppgc/type-traits.h"
+
+namespace cppgc {
+
+class Visitor;
+
+namespace internal {
+
+template <typename T,
+ bool =
+ IsGarbageCollectedMixinTypeV<typename std::remove_const<T>::type>>
+struct TraceTraitImpl;
+
+} // namespace internal
+
+using TraceCallback = void (*)(Visitor*, const void*);
+
+// TraceDescriptor is used to describe how to trace an object.
+struct TraceDescriptor {
+ // The adjusted base pointer of the object that should be traced.
+ const void* base_object_payload;
+ // A callback for tracing the object.
+ TraceCallback callback;
+};
+
+template <typename T>
+struct TraceTrait {
+ static_assert(internal::IsTraceableV<T>, "T must have a Trace() method");
+
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ return internal::TraceTraitImpl<T>::GetTraceDescriptor(
+ static_cast<const T*>(self));
+ }
+
+ static void Trace(Visitor* visitor, const void* self) {
+ static_cast<const T*>(self)->Trace(visitor);
+ }
+};
+
+namespace internal {
+
+template <typename T>
+struct TraceTraitImpl<T, false> {
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ return {self, TraceTrait<T>::Trace};
+ }
+};
+
+template <typename T>
+struct TraceTraitImpl<T, true> {
+ static TraceDescriptor GetTraceDescriptor(const void* self) {
+ return static_cast<const T*>(self)->GetTraceDescriptor();
+ }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_TRACE_TRAIT_H_
diff --git a/deps/v8/include/cppgc/type-traits.h b/deps/v8/include/cppgc/type-traits.h
new file mode 100644
index 0000000000..4d8ab809c8
--- /dev/null
+++ b/deps/v8/include/cppgc/type-traits.h
@@ -0,0 +1,109 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_TYPE_TRAITS_H_
+#define INCLUDE_CPPGC_TYPE_TRAITS_H_
+
+#include <type_traits>
+
+namespace cppgc {
+
+class Visitor;
+
+namespace internal {
+
+// Pre-C++17 custom implementation of std::void_t.
+template <typename... Ts>
+struct make_void {
+ typedef void type;
+};
+template <typename... Ts>
+using void_t = typename make_void<Ts...>::type;
+
+// Not supposed to be specialized by the user.
+template <typename T>
+struct IsWeak : std::false_type {};
+
+template <typename T, template <typename... V> class U>
+struct IsSubclassOfTemplate {
+ private:
+ template <typename... W>
+ static std::true_type SubclassCheck(U<W...>*);
+ static std::false_type SubclassCheck(...);
+
+ public:
+ static constexpr bool value =
+ decltype(SubclassCheck(std::declval<T*>()))::value;
+};
+
+// IsTraceMethodConst is used to verify that all Trace methods are marked as
+// const. It is equivalent to IsTraceable but for a non-const object.
+template <typename T, typename = void>
+struct IsTraceMethodConst : std::false_type {};
+
+template <typename T>
+struct IsTraceMethodConst<T, void_t<decltype(std::declval<const T>().Trace(
+ std::declval<Visitor*>()))>> : std::true_type {
+};
+
+template <typename T, typename = void>
+struct IsTraceable : std::false_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsTraceable<
+ T, void_t<decltype(std::declval<T>().Trace(std::declval<Visitor*>()))>>
+ : std::true_type {
+ // All Trace methods should be marked as const. If an object of type
+ // 'T' is traceable then any object of type 'const T' should also
+ // be traceable.
+ static_assert(IsTraceMethodConst<T>(),
+ "Trace methods should be marked as const.");
+};
+
+template <typename T>
+constexpr bool IsTraceableV = IsTraceable<T>::value;
+
+template <typename T, typename = void>
+struct IsGarbageCollectedMixinType : std::false_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedMixinType<
+ T,
+ void_t<typename std::remove_const_t<T>::IsGarbageCollectedMixinTypeMarker>>
+ : std::true_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T, typename = void>
+struct IsGarbageCollectedType : IsGarbageCollectedMixinType<T> {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+struct IsGarbageCollectedType<
+ T, void_t<typename std::remove_const_t<T>::IsGarbageCollectedTypeMarker>>
+ : std::true_type {
+ static_assert(sizeof(T), "T must be fully defined");
+};
+
+template <typename T>
+constexpr bool IsGarbageCollectedTypeV =
+ internal::IsGarbageCollectedType<T>::value;
+
+template <typename T>
+constexpr bool IsGarbageCollectedMixinTypeV =
+ internal::IsGarbageCollectedMixinType<T>::value;
+
+} // namespace internal
+
+template <typename T>
+constexpr bool IsWeakV = internal::IsWeak<T>::value;
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_TYPE_TRAITS_H_
diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h
new file mode 100644
index 0000000000..a73a4abb2b
--- /dev/null
+++ b/deps/v8/include/cppgc/visitor.h
@@ -0,0 +1,138 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_VISITOR_H_
+#define INCLUDE_CPPGC_VISITOR_H_
+
+#include "cppgc/garbage-collected.h"
+#include "cppgc/internal/logging.h"
+#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/liveness-broker.h"
+#include "cppgc/member.h"
+#include "cppgc/source-location.h"
+#include "cppgc/trace-trait.h"
+
+namespace cppgc {
+namespace internal {
+class VisitorBase;
+} // namespace internal
+
+using WeakCallback = void (*)(const LivenessBroker&, const void*);
+
+/**
+ * Visitor passed to trace methods. All managed pointers must have called the
+ * visitor's trace method on them.
+ */
+class Visitor {
+ public:
+ template <typename T>
+ void Trace(const Member<T>& member) {
+ const T* value = member.GetRawAtomic();
+ CPPGC_DCHECK(value != kSentinelPointer);
+ Trace(value);
+ }
+
+ template <typename T>
+ void Trace(const WeakMember<T>& weak_member) {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(internal::IsGarbageCollectedType<T>::value,
+ "T must be GarabgeCollected or GarbageCollectedMixin type");
+
+ const T* value = weak_member.GetRawAtomic();
+
+ // Bailout assumes that WeakMember emits write barrier.
+ if (!value) {
+ return;
+ }
+
+ // TODO(chromium:1056170): DCHECK (or similar) for deleted values as they
+ // should come in at a different path.
+ VisitWeak(value, TraceTrait<T>::GetTraceDescriptor(value),
+ &HandleWeak<WeakMember<T>>, &weak_member);
+ }
+
+ template <typename Persistent,
+ std::enable_if_t<Persistent::IsStrongPersistent::value>* = nullptr>
+ void TraceRoot(const Persistent& p, const SourceLocation& loc) {
+ using PointeeType = typename Persistent::PointeeType;
+ static_assert(sizeof(PointeeType),
+ "Persistent's pointee type must be fully defined");
+ static_assert(internal::IsGarbageCollectedType<PointeeType>::value,
+ "Persisent's pointee type must be GarabgeCollected or "
+ "GarbageCollectedMixin");
+ if (!p.Get()) {
+ return;
+ }
+ VisitRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()));
+ }
+
+ template <
+ typename WeakPersistent,
+ std::enable_if_t<!WeakPersistent::IsStrongPersistent::value>* = nullptr>
+ void TraceRoot(const WeakPersistent& p, const SourceLocation& loc) {
+ using PointeeType = typename WeakPersistent::PointeeType;
+ static_assert(sizeof(PointeeType),
+ "Persistent's pointee type must be fully defined");
+ static_assert(internal::IsGarbageCollectedType<PointeeType>::value,
+ "Persisent's pointee type must be GarabgeCollected or "
+ "GarbageCollectedMixin");
+ VisitWeakRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()),
+ &HandleWeak<WeakPersistent>, &p);
+ }
+
+ template <typename T, void (T::*method)(const LivenessBroker&)>
+ void RegisterWeakCallbackMethod(const T* obj) {
+ RegisterWeakCallback(&WeakCallbackMethodDelegate<T, method>, obj);
+ }
+
+ virtual void RegisterWeakCallback(WeakCallback, const void*) {}
+
+ protected:
+ virtual void Visit(const void* self, TraceDescriptor) {}
+ virtual void VisitWeak(const void* self, TraceDescriptor, WeakCallback,
+ const void* weak_member) {}
+ virtual void VisitRoot(const void*, TraceDescriptor) {}
+ virtual void VisitWeakRoot(const void* self, TraceDescriptor, WeakCallback,
+ const void* weak_root) {}
+
+ private:
+ template <typename T, void (T::*method)(const LivenessBroker&)>
+ static void WeakCallbackMethodDelegate(const LivenessBroker& info,
+ const void* self) {
+ // Callback is registered through a potential const Trace method but needs
+ // to be able to modify fields. See HandleWeak.
+ (const_cast<T*>(static_cast<const T*>(self))->*method)(info);
+ }
+
+ template <typename PointerType>
+ static void HandleWeak(const LivenessBroker& info, const void* object) {
+ const PointerType* weak = static_cast<const PointerType*>(object);
+ const auto* raw = weak->Get();
+ if (raw && !info.IsHeapObjectAlive(raw)) {
+ // Object is passed down through the marker as const. Alternatives are
+ // - non-const Trace method;
+ // - mutable pointer in MemberBase;
+ const_cast<PointerType*>(weak)->Clear();
+ }
+ }
+
+ Visitor() = default;
+
+ template <typename T>
+ void Trace(const T* t) {
+ static_assert(sizeof(T), "T must be fully defined");
+ static_assert(internal::IsGarbageCollectedType<T>::value,
+ "T must be GarabgeCollected or GarbageCollectedMixin type");
+ if (!t) {
+ return;
+ }
+ Visit(t, TraceTrait<T>::GetTraceDescriptor(t));
+ }
+
+ friend class internal::VisitorBase;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_VISITOR_H_
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 3f5410d1e1..706c37f958 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -204,6 +204,21 @@ domain Debugger
# Exception details.
optional Runtime.ExceptionDetails exceptionDetails
+ # Execute a Wasm Evaluator module on a given call frame.
+ experimental command executeWasmEvaluator
+ parameters
+ # WebAssembly call frame identifier to evaluate on.
+ CallFrameId callFrameId
+ # Code of the evaluator module.
+ binary evaluator
+ # Terminate execution after timing out (number of milliseconds).
+ experimental optional Runtime.TimeDelta timeout
+ returns
+ # Object wrapper for the evaluation result.
+ Runtime.RemoteObject result
+ # Exception details.
+ optional Runtime.ExceptionDetails exceptionDetails
+
# Returns possible locations for breakpoint. scriptId in start and end range locations should be
# the same.
command getPossibleBreakpoints
@@ -510,6 +525,18 @@ domain Debugger
JavaScript
WebAssembly
+ # Debug symbols available for a wasm script.
+ type DebugSymbols extends object
+ properties
+ # Type of the debug symbols.
+ enum type
+ None
+ SourceMap
+ EmbeddedDWARF
+ ExternalDWARF
+ # URL of the external symbol source.
+ optional string externalURL
+
# Fired when virtual machine fails to parse the script.
event scriptFailedToParse
parameters
@@ -584,6 +611,8 @@ domain Debugger
experimental optional integer codeOffset
# The language of the script.
experimental optional Debugger.ScriptLanguage scriptLanguage
+ # If the scriptLanguage is WebASsembly, the source of debug symbols for the module.
+ experimental optional Debugger.DebugSymbols debugSymbols
experimental domain HeapProfiler
depends on Runtime
@@ -980,6 +1009,7 @@ domain Runtime
f32
f64
v128
+ anyref
# Object class (constructor) name. Specified for `object` type values only.
optional string className
# Remote object value in case of primitive values or JSON values (if it was requested).
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index 6051b644fb..c7ea4c2bd3 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -9,8 +9,8 @@
#include "libplatform/libplatform-export.h"
#include "libplatform/v8-tracing.h"
-#include "v8-platform.h" // NOLINT(build/include)
-#include "v8config.h" // NOLINT(build/include)
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
namespace platform {
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index 79e6f62d23..45822d00f3 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -12,9 +12,12 @@
#include <vector>
#include "libplatform/libplatform-export.h"
-#include "v8-platform.h" // NOLINT(build/include)
+#include "v8-platform.h" // NOLINT(build/include_directory)
namespace perfetto {
+namespace trace_processor {
+class TraceProcessorStorage;
+}
class TracingSession;
}
@@ -28,7 +31,6 @@ namespace platform {
namespace tracing {
class TraceEventListener;
-class JSONTraceEventListener;
const int kTraceMaxNumArgs = 2;
@@ -197,6 +199,9 @@ class V8_PLATFORM_EXPORT TraceConfig {
TraceConfig() : enable_systrace_(false), enable_argument_filter_(false) {}
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
+ const StringList& GetEnabledCategories() const {
+ return included_categories_;
+ }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
@@ -229,6 +234,17 @@ class V8_PLATFORM_EXPORT TraceConfig {
class V8_PLATFORM_EXPORT TracingController
: public V8_PLATFORM_NON_EXPORTED_BASE(v8::TracingController) {
public:
+ TracingController();
+ ~TracingController() override;
+
+#if defined(V8_USE_PERFETTO)
+ // Must be called before StartTracing() if V8_USE_PERFETTO is true. Provides
+ // the output stream for the JSON trace data.
+ void InitializeForPerfetto(std::ostream* output_stream);
+ // Provide an optional listener for testing that will receive trace events.
+ // Must be called before StartTracing().
+ void SetTraceEventListenerForTesting(TraceEventListener* listener);
+#else // defined(V8_USE_PERFETTO)
// The pointer returned from GetCategoryGroupEnabled() points to a value with
// zero or more of the following bits. Used in this class only. The
// TRACE_EVENT macros should only use the value as a bool. These values must
@@ -242,19 +258,8 @@ class V8_PLATFORM_EXPORT TracingController
ENABLED_FOR_ETW_EXPORT = 1 << 3
};
- TracingController();
- ~TracingController() override;
-
// Takes ownership of |trace_buffer|.
void Initialize(TraceBuffer* trace_buffer);
-#ifdef V8_USE_PERFETTO
- // Must be called before StartTracing() if V8_USE_PERFETTO is true. Provides
- // the output stream for the JSON trace data.
- void InitializeForPerfetto(std::ostream* output_stream);
- // Provide an optional listener for testing that will receive trace events.
- // Must be called before StartTracing().
- void SetTraceEventListenerForTesting(TraceEventListener* listener);
-#endif
// v8::TracingController implementation.
const uint8_t* GetCategoryGroupEnabled(const char* category_group) override;
@@ -274,6 +279,10 @@ class V8_PLATFORM_EXPORT TracingController
unsigned int flags, int64_t timestamp) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) override;
+
+ static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
+#endif // !defined(V8_USE_PERFETTO)
+
void AddTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) override;
void RemoveTraceStateObserver(
@@ -282,27 +291,32 @@ class V8_PLATFORM_EXPORT TracingController
void StartTracing(TraceConfig* trace_config);
void StopTracing();
- static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
-
protected:
+#if !defined(V8_USE_PERFETTO)
virtual int64_t CurrentTimestampMicroseconds();
virtual int64_t CurrentCpuTimestampMicroseconds();
+#endif // !defined(V8_USE_PERFETTO)
private:
+#if !defined(V8_USE_PERFETTO)
void UpdateCategoryGroupEnabledFlag(size_t category_index);
void UpdateCategoryGroupEnabledFlags();
+#endif // !defined(V8_USE_PERFETTO)
- std::unique_ptr<TraceBuffer> trace_buffer_;
- std::unique_ptr<TraceConfig> trace_config_;
std::unique_ptr<base::Mutex> mutex_;
- std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
+ std::unique_ptr<TraceConfig> trace_config_;
std::atomic_bool recording_{false};
-#ifdef V8_USE_PERFETTO
+ std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
+
+#if defined(V8_USE_PERFETTO)
std::ostream* output_stream_ = nullptr;
- std::unique_ptr<JSONTraceEventListener> json_listener_;
+ std::unique_ptr<perfetto::trace_processor::TraceProcessorStorage>
+ trace_processor_;
TraceEventListener* listener_for_testing_ = nullptr;
std::unique_ptr<perfetto::TracingSession> tracing_session_;
-#endif
+#else // !defined(V8_USE_PERFETTO)
+ std::unique_ptr<TraceBuffer> trace_buffer_;
+#endif // !defined(V8_USE_PERFETTO)
// Disallow copy and assign
TracingController(const TracingController&) = delete;
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 79a5d4d82a..f74406493b 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -165,7 +165,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "v8config.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
diff --git a/deps/v8/include/v8-inspector-protocol.h b/deps/v8/include/v8-inspector-protocol.h
index 612a2ebc39..a5ffb7d695 100644
--- a/deps/v8/include/v8-inspector-protocol.h
+++ b/deps/v8/include/v8-inspector-protocol.h
@@ -5,9 +5,9 @@
#ifndef V8_V8_INSPECTOR_PROTOCOL_H_
#define V8_V8_INSPECTOR_PROTOCOL_H_
-#include "inspector/Debugger.h" // NOLINT(build/include)
-#include "inspector/Runtime.h" // NOLINT(build/include)
-#include "inspector/Schema.h" // NOLINT(build/include)
-#include "v8-inspector.h" // NOLINT(build/include)
+#include "inspector/Debugger.h" // NOLINT(build/include_directory)
+#include "inspector/Runtime.h" // NOLINT(build/include_directory)
+#include "inspector/Schema.h" // NOLINT(build/include_directory)
+#include "v8-inspector.h" // NOLINT(build/include_directory)
#endif // V8_V8_INSPECTOR_PROTOCOL_H_
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index 01274625c1..6573940e2f 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -11,7 +11,7 @@
#include <memory>
#include <unordered_map>
-#include "v8.h" // NOLINT(build/include)
+#include "v8.h" // NOLINT(build/include_directory)
namespace v8_inspector {
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 52ee403f52..127a77dbfc 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -10,8 +10,8 @@
#include <string.h>
#include <type_traits>
-#include "v8-version.h" // NOLINT(build/include)
-#include "v8config.h" // NOLINT(build/include)
+#include "v8-version.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -110,6 +110,16 @@ constexpr bool PointerCompressionIsEnabled() {
return kApiTaggedSize != kApiSystemPointerSize;
}
+constexpr bool HeapSandboxIsEnabled() {
+#ifdef V8_HEAP_SANDBOX
+ return true;
+#else
+ return false;
+#endif
+}
+
+using ExternalPointer_t = Address;
+
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
#else
@@ -130,6 +140,15 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
kSmiTag;
}
+// {obj} must be the raw tagged pointer representation of a HeapObject
+// that's guaranteed to never be in ReadOnlySpace.
+V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
+
+// Returns if we need to throw when an error occurs. This infers the language
+// mode based on the current context and the closure. This returns true if the
+// language mode is strict.
+V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
+
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@@ -145,7 +164,6 @@ class Internals {
1 * kApiTaggedSize + 2 * kApiInt32Size;
static const int kOddballKindOffset = 4 * kApiTaggedSize + kApiDoubleSize;
- static const int kForeignAddressOffset = kApiTaggedSize;
static const int kJSObjectHeaderSize = 3 * kApiTaggedSize;
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
@@ -330,11 +348,36 @@ class Internals {
#endif
}
+ V8_INLINE static internal::Isolate* GetIsolateForHeapSandbox(
+ internal::Address obj) {
+#ifdef V8_HEAP_SANDBOX
+ return internal::IsolateFromNeverReadOnlySpaceObject(obj);
+#else
+ // Not used in non-sandbox mode.
+ return nullptr;
+#endif
+ }
+
+ V8_INLINE static internal::Address ReadExternalPointerField(
+ internal::Isolate* isolate, internal::Address heap_object_ptr,
+ int offset) {
+ internal::Address value = ReadRawField<Address>(heap_object_ptr, offset);
+#ifdef V8_HEAP_SANDBOX
+ // We currently have to treat zero as nullptr in embedder slots.
+ if (value) value = DecodeExternalPointer(isolate, value);
+#endif
+ return value;
+ }
+
#ifdef V8_COMPRESS_POINTERS
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
+ // See v8:10391 for details about V8 heap sandbox.
+ static constexpr uint32_t kExternalPointerSalt =
+ 0x7fffffff & ~static_cast<uint32_t>(kHeapObjectTagMask);
+
V8_INLINE static internal::Address GetRootFromOnHeapAddress(
internal::Address addr) {
return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
@@ -345,6 +388,15 @@ class Internals {
internal::Address root = GetRootFromOnHeapAddress(heap_object_ptr);
return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
}
+
+ V8_INLINE static Address DecodeExternalPointer(
+ const Isolate* isolate, ExternalPointer_t encoded_pointer) {
+#ifndef V8_HEAP_SANDBOX
+ return encoded_pointer;
+#else
+ return encoded_pointer ^ kExternalPointerSalt;
+#endif
+ }
#endif // V8_COMPRESS_POINTERS
};
@@ -371,15 +423,6 @@ V8_INLINE void PerformCastCheck(T* data) {
CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
}
-// {obj} must be the raw tagged pointer representation of a HeapObject
-// that's guaranteed to never be in ReadOnlySpace.
-V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
-
-// Returns if we need to throw when an error occurs. This infers the language
-// mode based on the current context and the closure. This returns true if the
-// language mode is strict.
-V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
-
// A base class for backing stores, which is needed due to vagaries of
// how static casts work with std::shared_ptr.
class BackingStoreBase {};
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 5d23cd665e..7cfd18b570 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -11,12 +11,34 @@
#include <memory>
#include <string>
-#include "v8config.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
class Isolate;
+// Valid priorities supported by the task scheduling infrastructure.
+enum class TaskPriority : uint8_t {
+ /**
+ * Best effort tasks are not critical for performance of the application. The
+ * platform implementation should preempt such tasks if higher priority tasks
+ * arrive.
+ */
+ kBestEffort,
+ /**
+ * User visible tasks are long running background tasks that will
+ * improve performance and memory usage of the application upon completion.
+ * Example: background compilation and garbage collection.
+ */
+ kUserVisible,
+ /**
+ * User blocking tasks are highest priority tasks that block the execution
+ * thread (e.g. major garbage collection). They must be finished as soon as
+ * possible.
+ */
+ kUserBlocking,
+};
+
/**
* A Task represents a unit of work.
*/
@@ -114,6 +136,82 @@ class TaskRunner {
};
/**
+ * Delegate that's passed to Job's worker task, providing an entry point to
+ * communicate with the scheduler.
+ */
+class JobDelegate {
+ public:
+ /**
+ * Returns true if this thread should return from the worker task on the
+ * current thread ASAP. Workers should periodically invoke ShouldYield (or
+ * YieldIfNeeded()) as often as is reasonable.
+ */
+ virtual bool ShouldYield() = 0;
+
+ /**
+ * Notifies the scheduler that max concurrency was increased, and the number
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
+ * details.
+ */
+ virtual void NotifyConcurrencyIncrease() = 0;
+};
+
+/**
+ * Handle returned when posting a Job. Provides methods to control execution of
+ * the posted Job.
+ */
+class JobHandle {
+ public:
+ virtual ~JobHandle() = default;
+
+ /**
+ * Notifies the scheduler that max concurrency was increased, and the number
+ * of worker should be adjusted accordingly. See Platform::PostJob() for more
+ * details.
+ */
+ virtual void NotifyConcurrencyIncrease() = 0;
+
+ /**
+ * Contributes to the job on this thread. Doesn't return until all tasks have
+ * completed and max concurrency becomes 0. When Join() is called and max
+ * concurrency reaches 0, it should not increase again. This also promotes
+ * this Job's priority to be at least as high as the calling thread's
+ * priority.
+ */
+ virtual void Join() = 0;
+
+ /**
+ * Forces all existing workers to yield ASAP. Waits until they have all
+ * returned from the Job's callback before returning.
+ */
+ virtual void Cancel() = 0;
+
+ /**
+ * Returns true if associated with a Job and other methods may be called.
+ * Returns false after Join() or Cancel() was called.
+ */
+ virtual bool IsRunning() = 0;
+};
+
+/**
+ * A JobTask represents work to run in parallel from Platform::PostJob().
+ */
+class JobTask {
+ public:
+ virtual ~JobTask() = default;
+
+ virtual void Run(JobDelegate* delegate) = 0;
+
+ /**
+ * Controls the maximum number of threads calling Run() concurrently. Run() is
+ * only invoked if the number of threads previously running Run() was less
+ * than the value returned. Since GetMaxConcurrency() is a leaf function, it
+ * must not call back any JobHandle methods.
+ */
+ virtual size_t GetMaxConcurrency() const = 0;
+};
+
+/**
* The interface represents complex arguments to trace events.
*/
class ConvertableToTraceFormat {
@@ -138,6 +236,10 @@ class TracingController {
public:
virtual ~TracingController() = default;
+ // In Perfetto mode, trace events are written using Perfetto's Track Event
+ // API directly without going through the embedder. However, it is still
+ // possible to observe tracing being enabled and disabled.
+#if !defined(V8_USE_PERFETTO)
/**
* Called by TRACE_EVENT* macros, don't call this directly.
* The name parameter is a category group for example:
@@ -183,6 +285,7 @@ class TracingController {
**/
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) {}
+#endif // !defined(V8_USE_PERFETTO)
class TraceStateObserver {
public:
@@ -369,6 +472,64 @@ class Platform {
virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
/**
+ * Posts |job_task| to run in parallel. Returns a JobHandle associated with
+ * the Job, which can be joined or canceled.
+ * This avoids degenerate cases:
+ * - Calling CallOnWorkerThread() for each work item, causing significant
+ * overhead.
+ * - Fixed number of CallOnWorkerThread() calls that split the work and might
+ * run for a long time. This is problematic when many components post
+ * "num cores" tasks and all expect to use all the cores. In these cases,
+ * the scheduler lacks context to be fair to multiple same-priority requests
+ * and/or ability to request lower priority work to yield when high priority
+ * work comes in.
+ * A canonical implementation of |job_task| looks like:
+ * class MyJobTask : public JobTask {
+ * public:
+ * MyJobTask(...) : worker_queue_(...) {}
+ * // JobTask:
+ * void Run(JobDelegate* delegate) override {
+ * while (!delegate->ShouldYield()) {
+ * // Smallest unit of work.
+ * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
+ * if (!work_item) return;
+ * ProcessWork(work_item);
+ * }
+ * }
+ *
+ * size_t GetMaxConcurrency() const override {
+ * return worker_queue_.GetSize(); // Thread safe.
+ * }
+ * };
+ * auto handle = PostJob(TaskPriority::kUserVisible,
+ * std::make_unique<MyJobTask>(...));
+ * handle->Join();
+ *
+ * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
+ * called while holding a lock that could be acquired by JobTask::Run or
+ * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
+ * because [1] JobTask::GetMaxConcurrency may be invoked while holding
+ * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
+ * if that lock is *never* held while calling back into JobHandle from any
+ * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
+ * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
+ * (B=>JobHandle::foo=>B deadlock).
+ *
+ * A sufficient PostJob() implementation that uses the default Job provided in
+ * libplatform looks like:
+ * std::unique_ptr<JobHandle> PostJob(
+ * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
+ * return std::make_unique<DefaultJobHandle>(
+ * std::make_shared<DefaultJobState>(
+ * this, std::move(job_task), kNumThreads));
+ * }
+ */
+ virtual std::unique_ptr<JobHandle> PostJob(
+ TaskPriority priority, std::unique_ptr<JobTask> job_task) {
+ return nullptr;
+ }
+
+ /**
* Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least
* millisecond-precision values. For this reason,
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 866d799076..c3b25e8d6a 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -10,7 +10,7 @@
#include <unordered_set>
#include <vector>
-#include "v8.h" // NOLINT(build/include)
+#include "v8.h" // NOLINT(build/include_directory)
/**
* Profiler support for the V8 JavaScript engine.
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 29d813e427..89ec4f6a78 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -5,7 +5,7 @@
#ifndef V8_UTIL_H_
#define V8_UTIL_H_
-#include "v8.h" // NOLINT(build/include)
+#include "v8.h" // NOLINT(build/include_directory)
#include <assert.h>
#include <map>
#include <vector>
diff --git a/deps/v8/include/v8-version-string.h b/deps/v8/include/v8-version-string.h
index fb84144d54..8faed2a740 100644
--- a/deps/v8/include/v8-version-string.h
+++ b/deps/v8/include/v8-version-string.h
@@ -5,7 +5,7 @@
#ifndef V8_VERSION_STRING_H_
#define V8_VERSION_STRING_H_
-#include "v8-version.h" // NOLINT(build/include)
+#include "v8-version.h" // NOLINT(build/include_directory)
// This is here rather than v8-version.h to keep that file simple and
// machine-processable.
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 64f1848665..cee7990e4b 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 8
-#define V8_MINOR_VERSION 3
-#define V8_BUILD_NUMBER 110
-#define V8_PATCH_LEVEL 9
+#define V8_MINOR_VERSION 4
+#define V8_BUILD_NUMBER 371
+#define V8_PATCH_LEVEL 19
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8-wasm-trap-handler-posix.h b/deps/v8/include/v8-wasm-trap-handler-posix.h
index 998d0a41bb..9b8e8a5b49 100644
--- a/deps/v8/include/v8-wasm-trap-handler-posix.h
+++ b/deps/v8/include/v8-wasm-trap-handler-posix.h
@@ -7,7 +7,7 @@
#include <signal.h>
-#include "v8config.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
/**
diff --git a/deps/v8/include/v8-wasm-trap-handler-win.h b/deps/v8/include/v8-wasm-trap-handler-win.h
index 0185df6401..9d3cad5848 100644
--- a/deps/v8/include/v8-wasm-trap-handler-win.h
+++ b/deps/v8/include/v8-wasm-trap-handler-win.h
@@ -7,7 +7,7 @@
#include <windows.h>
-#include "v8config.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
/**
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 406c47383b..18d72f1630 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -18,15 +18,17 @@
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
+
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
-#include "v8-internal.h" // NOLINT(build/include)
-#include "v8-version.h" // NOLINT(build/include)
-#include "v8config.h" // NOLINT(build/include)
+#include "cppgc/common.h"
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-version.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
@@ -123,19 +125,21 @@ namespace internal {
enum class ArgumentsType;
template <ArgumentsType>
class Arguments;
+template <typename T>
+class CustomArguments;
class DeferredHandles;
+class FunctionCallbackArguments;
+class GlobalHandles;
class Heap;
class HeapObject;
class ExternalString;
class Isolate;
class LocalEmbedderHeapTracer;
class MicrotaskQueue;
-struct ScriptStreamingData;
-template<typename T> class CustomArguments;
class PropertyCallbackArguments;
-class FunctionCallbackArguments;
-class GlobalHandles;
+class ReadOnlyHeap;
class ScopedExternalStringLock;
+struct ScriptStreamingData;
class ThreadLocalTop;
namespace wasm {
@@ -1752,11 +1756,9 @@ class V8_EXPORT ScriptCompiler {
public:
enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON(
"This class takes ownership of source_stream, so use the constructor "
"taking a unique_ptr to make these semantics clearer")
-#endif
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
@@ -4764,11 +4766,17 @@ class V8_EXPORT CompiledWasmModule {
*/
MemorySpan<const uint8_t> GetWireBytesRef();
+ const std::string& source_url() const { return source_url_; }
+
private:
- explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>);
- friend class Utils;
+ friend class WasmModuleObject;
+ friend class WasmStreaming;
+
+ explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>,
+ const char* source_url, size_t url_length);
const std::shared_ptr<internal::wasm::NativeModule> native_module_;
+ const std::string source_url_;
};
// An instance of WebAssembly.Module.
@@ -5936,37 +5944,6 @@ class V8_EXPORT RegExp : public Object {
};
/**
- * An instance of the built-in FinalizationRegistry constructor.
- *
- * The C++ name is FinalizationGroup for backwards compatibility. This API is
- * experimental and deprecated.
- */
-class V8_EXPORT FinalizationGroup : public Object {
- public:
- /**
- * Runs the cleanup callback of the given FinalizationRegistry.
- *
- * V8 will inform the embedder that there are finalizer callbacks be
- * called through HostCleanupFinalizationGroupCallback.
- *
- * HostCleanupFinalizationGroupCallback should schedule a task to
- * call FinalizationGroup::Cleanup() at some point in the
- * future. It's the embedders responsiblity to make this call at a
- * time which does not interrupt synchronous ECMAScript code
- * execution.
- *
- * If the result is Nothing<bool> then an exception has
- * occurred. Otherwise the result is |true| if the cleanup callback
- * was called successfully. The result is never |false|.
- */
- V8_DEPRECATED(
- "FinalizationGroup cleanup is automatic if "
- "HostCleanupFinalizationGroupCallback is not set")
- static V8_WARN_UNUSED_RESULT Maybe<bool> Cleanup(
- Local<FinalizationGroup> finalization_group);
-};
-
-/**
* A JavaScript value that wraps a C++ void*. This type of value is mainly used
* to associate C++ data structures with JavaScript objects.
*/
@@ -7172,6 +7149,9 @@ class V8_EXPORT Exception {
static Local<Value> ReferenceError(Local<String> message);
static Local<Value> SyntaxError(Local<String> message);
static Local<Value> TypeError(Local<String> message);
+ static Local<Value> WasmCompileError(Local<String> message);
+ static Local<Value> WasmLinkError(Local<String> message);
+ static Local<Value> WasmRuntimeError(Local<String> message);
static Local<Value> Error(Local<String> message);
/**
@@ -7216,20 +7196,6 @@ typedef void (*BeforeCallEnteredCallback)(Isolate*);
typedef void (*CallCompletedCallback)(Isolate*);
/**
- * HostCleanupFinalizationGroupCallback is called when we require the
- * embedder to enqueue a task that would call
- * FinalizationGroup::Cleanup().
- *
- * The FinalizationGroup is the one for which the embedder needs to
- * call FinalizationGroup::Cleanup() on.
- *
- * The context provided is the one in which the FinalizationGroup was
- * created in.
- */
-typedef void (*HostCleanupFinalizationGroupCallback)(
- Local<Context> context, Local<FinalizationGroup> fg);
-
-/**
* HostImportModuleDynamicallyCallback is called when we require the
* embedder to load a module. This is used as part of the dynamic
* import syntax.
@@ -7255,7 +7221,8 @@ typedef MaybeLocal<Promise> (*HostImportModuleDynamicallyCallback)(
/**
* HostInitializeImportMetaObjectCallback is called the first time import.meta
- * is accessed for a module. Subsequent access will reuse the same value.
+ * is accessed for a module. Subsequent access will reuse the same value. The
+ * callback must not throw.
*
* The method combines two implementation-defined abstract operations into one:
* HostGetImportMetaProperties and HostFinalizeImportMeta.
@@ -7326,6 +7293,7 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
+V8_DEPRECATE_SOON("Use *WithData version.")
typedef void (*MicrotasksCompletedCallback)(Isolate*);
typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
@@ -7517,6 +7485,9 @@ typedef bool (*WasmThreadsEnabledCallback)(Local<Context> context);
typedef Local<String> (*WasmLoadSourceMapCallback)(Isolate* isolate,
const char* name);
+// --- Callback for checking if WebAssembly Simd is enabled ---
+typedef bool (*WasmSimdEnabledCallback)(Local<Context> context);
+
// --- Garbage Collection Callbacks ---
/**
@@ -7594,6 +7565,7 @@ class V8_EXPORT SharedMemoryStatistics {
size_t read_only_space_physical_size_;
friend class V8;
+ friend class internal::ReadOnlyHeap;
};
/**
@@ -7873,16 +7845,12 @@ enum class MemoryPressureLevel { kNone, kModerate, kCritical };
*/
class V8_EXPORT EmbedderHeapTracer {
public:
+ using EmbedderStackState = cppgc::EmbedderStackState;
+
enum TraceFlags : uint64_t {
kNoFlags = 0,
kReduceMemory = 1 << 0,
- };
-
- // Indicator for the stack state of the embedder.
- enum EmbedderStackState {
- kUnknown,
- kNonEmpty,
- kEmpty,
+ kForced = 1 << 2,
};
/**
@@ -8445,7 +8413,7 @@ class V8_EXPORT Isolate {
kOptimizedFunctionWithOneShotBytecode = 71,
kRegExpMatchIsTrueishOnNonJSRegExp = 72,
kRegExpMatchIsFalseishOnJSRegExp = 73,
- kDateGetTimezoneOffset = 74,
+ kDateGetTimezoneOffset = 74, // Unused.
kStringNormalize = 75,
kCallSiteAPIGetFunctionSloppyCall = 76,
kCallSiteAPIGetThisSloppyCall = 77,
@@ -8461,6 +8429,24 @@ class V8_EXPORT Isolate {
kDateTimeFormatDateTimeStyle = 87,
kBreakIteratorTypeWord = 88,
kBreakIteratorTypeLine = 89,
+ kInvalidatedArrayBufferDetachingProtector = 90,
+ kInvalidatedArrayConstructorProtector = 91,
+ kInvalidatedArrayIteratorLookupChainProtector = 92,
+ kInvalidatedArraySpeciesLookupChainProtector = 93,
+ kInvalidatedIsConcatSpreadableLookupChainProtector = 94,
+ kInvalidatedMapIteratorLookupChainProtector = 95,
+ kInvalidatedNoElementsProtector = 96,
+ kInvalidatedPromiseHookProtector = 97,
+ kInvalidatedPromiseResolveLookupChainProtector = 98,
+ kInvalidatedPromiseSpeciesLookupChainProtector = 99,
+ kInvalidatedPromiseThenLookupChainProtector = 100,
+ kInvalidatedRegExpSpeciesLookupChainProtector = 101,
+ kInvalidatedSetIteratorLookupChainProtector = 102,
+ kInvalidatedStringIteratorLookupChainProtector = 103,
+ kInvalidatedStringLengthOverflowLookupChainProtector = 104,
+ kInvalidatedTypedArraySpeciesLookupChainProtector = 105,
+ kWasmSimdOpcodes = 106,
+ kVarRedeclaredCatchBinding = 107,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@@ -8550,17 +8536,6 @@ class V8_EXPORT Isolate {
AbortOnUncaughtExceptionCallback callback);
/**
- * This specifies the callback to be called when FinalizationRegistries
- * are ready to be cleaned up and require FinalizationGroup::Cleanup()
- * to be called in a future task.
- */
- V8_DEPRECATED(
- "FinalizationRegistry cleanup is automatic if "
- "HostCleanupFinalizationGroupCallback is not set")
- void SetHostCleanupFinalizationGroupCallback(
- HostCleanupFinalizationGroupCallback callback);
-
- /**
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
@@ -9374,6 +9349,8 @@ class V8_EXPORT Isolate {
void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
+ void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
+
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@@ -9512,7 +9489,6 @@ class V8_EXPORT Isolate {
internal::Address* GetDataFromSnapshotOnce(size_t index);
void ReportExternalAllocationLimitReached();
- void CheckMemoryPressure();
};
class V8_EXPORT StartupData {
@@ -9601,7 +9577,8 @@ class V8_EXPORT V8 {
V8_INLINE static bool Initialize() {
const int kBuildConfiguration =
(internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
- (internal::SmiValuesAre31Bits() ? k31BitSmis : 0);
+ (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
+ (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0);
return Initialize(kBuildConfiguration);
}
@@ -9740,6 +9717,7 @@ class V8_EXPORT V8 {
enum BuildConfigurationFeatures {
kPointerCompression = 1 << 0,
k31BitSmis = 1 << 1,
+ kHeapSandbox = 1 << 2,
};
/**
@@ -11381,7 +11359,9 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType)) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
- return I::ReadRawField<void*>(obj, offset);
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value = I::ReadExternalPointerField(isolate, obj, offset);
+ return reinterpret_cast<void*>(value);
}
#endif
return SlowGetAlignedPointerFromInternalField(index);
@@ -11411,7 +11391,9 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
- void* value = I::ReadRawField<void*>(obj, I::kStringResourceOffset);
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value =
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset);
result = reinterpret_cast<String::ExternalStringResource*>(value);
} else {
result = GetExternalStringResourceSlow();
@@ -11433,8 +11415,10 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
ExternalStringResourceBase* resource;
if (type == I::kExternalOneByteRepresentationTag ||
type == I::kExternalTwoByteRepresentationTag) {
- void* value = I::ReadRawField<void*>(obj, I::kStringResourceOffset);
- resource = static_cast<ExternalStringResourceBase*>(value);
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value =
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset);
+ resource = reinterpret_cast<ExternalStringResourceBase*>(value);
} else {
resource = GetExternalStringResourceBaseSlow(encoding_out);
}
@@ -11950,7 +11934,6 @@ MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) {
typedef internal::Internals I;
- constexpr int64_t kMemoryReducerActivationLimit = 32 * 1024 * 1024;
int64_t* external_memory = reinterpret_cast<int64_t*>(
reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryOffset);
int64_t* external_memory_limit = reinterpret_cast<int64_t*>(
@@ -11973,14 +11956,6 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
if (change_in_bytes <= 0) return *external_memory;
- int64_t allocation_diff_since_last_mc = static_cast<int64_t>(
- static_cast<uint64_t>(*external_memory) -
- static_cast<uint64_t>(*external_memory_low_since_mc));
- // Only check memory pressure and potentially trigger GC if the amount of
- // external memory increased.
- if (allocation_diff_since_last_mc > kMemoryReducerActivationLimit) {
- CheckMemoryPressure();
- }
if (amount > *external_memory_limit) {
ReportExternalAllocationLimitReached();
}
@@ -12022,7 +11997,9 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
- return I::ReadRawField<void*>(embedder_data, value_offset);
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
+ return reinterpret_cast<void*>(
+ I::ReadExternalPointerField(isolate, embedder_data, value_offset));
#else
return SlowGetAlignedPointerFromEmbedderData(index);
#endif
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 40d23c35c1..9825232d6a 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -405,6 +405,15 @@
#endif
+#if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ < 6)
+# define V8_ENUM_DEPRECATED(message)
+# define V8_ENUM_DEPRECATE_SOON(message)
+#else
+# define V8_ENUM_DEPRECATED(message) V8_DEPRECATED(message)
+# define V8_ENUM_DEPRECATE_SOON(message) V8_DEPRECATE_SOON(message)
+#endif
+
+
// A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 8677333c2f..d4abcf89eb 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -78,6 +78,7 @@
# Sanitizers.
'V8 Linux64 ASAN': 'release_x64_asan',
'V8 Linux64 TSAN - builder': 'release_x64_tsan',
+ 'V8 Linux - arm64 - sim - CFI': 'release_simulate_arm64_cfi',
'V8 Linux - arm64 - sim - MSAN': 'release_simulate_arm64_msan',
# Misc.
'V8 Linux gcc': 'release_x86_gcc',
@@ -98,6 +99,7 @@
'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
'V8 Linux - predictable': 'release_x86_predictable',
'V8 Linux - full debug': 'full_debug_x86',
+ 'V8 Mac64 - full debug': 'full_debug_x64',
'V8 Random Deopt Fuzzer - debug': 'debug_x64',
},
'client.v8.clusterfuzz': {
@@ -251,6 +253,7 @@
'v8_mac64_rel_ng': 'release_x64_trybot',
'v8_mac64_dbg': 'debug_x64',
'v8_mac64_dbg_ng': 'debug_x64',
+ 'v8_mac64_compile_full_dbg_ng': 'full_debug_x64',
'v8_mac64_asan_rel': 'release_x64_asan_no_lsan',
'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot',
'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot',
@@ -258,6 +261,7 @@
'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm',
'v8_linux_arm64_rel_ng': 'release_simulate_arm64_trybot',
+ 'v8_linux_arm64_cfi_rel_ng' : 'release_simulate_arm64_cfi',
'v8_linux_arm64_dbg': 'debug_simulate_arm64',
'v8_linux_arm64_gc_stress_dbg': 'debug_simulate_arm64',
'v8_linux_mipsel_compile_rel': 'release_simulate_mipsel',
@@ -363,6 +367,8 @@
'release_trybot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm64': [
'release_bot', 'simulate_arm64'],
+ 'release_simulate_arm64_cfi': [
+ 'release_bot', 'simulate_arm64', 'v8_control_flow_integrity'],
'release_simulate_arm64_pointer_compression': [
# TODO(v8:v7703): Make pointer compression bots testing non pointer
# compression mode while pointer compression is temporarily enabled
@@ -452,7 +458,7 @@
'release_x64_fuchsia_trybot': [
'release_trybot', 'x64', 'fuchsia'],
'release_x64_gcc_coverage': [
- 'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx',
+ 'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'no_custom_libcxx',
'no_sysroot'],
'release_x64_ios_simulator': [
'release_bot', 'x64', 'ios_simulator'],
@@ -502,7 +508,7 @@
'debug_x64_fuchsia': [
'debug_bot', 'x64', 'fuchsia'],
'debug_x64_gcc': [
- 'debug_bot', 'x64', 'gcc', 'v8_check_header_includes'],
+ 'debug_bot_no_goma', 'x64', 'gcc', 'v8_check_header_includes'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_minimal_symbols': [
@@ -513,6 +519,8 @@
'debug_trybot', 'x64'],
'debug_x64_trybot_custom': [
'debug_trybot', 'x64', 'v8_snapshot_custom'],
+ 'full_debug_x64': [
+ 'debug_bot', 'x64', 'v8_full_debug'],
# Debug configs for x86.
'debug_x86': [
@@ -532,9 +540,9 @@
'release_x86': [
'release_bot', 'x86'],
'release_x86_gcc': [
- 'release_bot', 'x86', 'gcc', 'v8_check_header_includes'],
+ 'release_bot_no_goma', 'x86', 'gcc', 'v8_check_header_includes'],
'release_x86_gcc_minimal_symbols': [
- 'release_bot', 'x86', 'gcc', 'minimal_symbols',
+ 'release_bot_no_goma', 'x86', 'gcc', 'minimal_symbols',
'v8_check_header_includes'],
'release_x86_gcmole': [
'release_bot', 'x86', 'gcmole'],
@@ -623,6 +631,12 @@
'v8_optimized_debug'],
},
+ 'debug_bot_no_goma': {
+ 'mixins': [
+ 'debug', 'shared', 'no_goma', 'v8_enable_slow_dchecks',
+ 'v8_optimized_debug'],
+ },
+
'debug_trybot': {
'mixins': ['debug_bot', 'minimal_symbols'],
},
@@ -776,6 +790,10 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
+ 'v8_control_flow_integrity' : {
+ 'gn_args': 'v8_control_flow_integrity=true',
+ },
+
'v8_enable_lite_mode': {
'gn_args': 'v8_enable_lite_mode=true',
},
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 6c3379b5cb..72f739487c 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -310,12 +310,15 @@
'os': 'Ubuntu-16.04',
},
'tests': [
+ # Infra staging.
+ {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
+ # Native context independent code.
+ {'name': 'v8testing', 'variant': 'nci'},
# Stress sampling.
{'name': 'mjsunit', 'variant': 'stress_sampling'},
{'name': 'webkit', 'variant': 'stress_sampling'},
- # Infra staging.
- {'name': 'test262', 'variant': 'infra_staging', 'shards': 2},
- {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
+ # Stress snapshot.
+ {'name': 'mjsunit', 'variant': 'stress_snapshot'},
],
},
'v8_linux64_msan_rel': {
@@ -481,6 +484,15 @@
{'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
+ 'v8_linux_arm64_cfi_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-16.04',
+ },
+ 'tests': [
+ {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'v8testing', 'shards': 4},
+ ],
+ },
'v8_linux64_arm64_pointer_compression_rel_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-16.04',
@@ -679,6 +691,15 @@
},
],
},
+ 'V8 Linux - arm64 - sim - CFI': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-16.04',
+ },
+ 'tests': [
+ {'name': 'test262', 'variant': 'default', 'shards': 3},
+ {'name': 'v8testing', 'shards': 4},
+ ],
+ },
'V8 Linux - arm64 - sim - MSAN': {
'swarming_dimensions': {
'os': 'Ubuntu-16.04',
@@ -941,9 +962,13 @@
'tests': [
# Infra staging.
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
+ # Native context independent code.
+ {'name': 'v8testing', 'variant': 'nci'},
# Stress sampling.
- {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
- {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
+ {'name': 'mjsunit', 'variant': 'stress_sampling'},
+ {'name': 'webkit', 'variant': 'stress_sampling'},
+ # Stress snapshot.
+ {'name': 'mjsunit', 'variant': 'stress_snapshot'},
],
},
'V8 Linux64 - debug - perfetto': {
@@ -965,10 +990,14 @@
},
'tests': [
# Infra staging.
- {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'infra_staging'},
+ # Native context independent code.
+ {'name': 'v8testing', 'variant': 'nci'},
# Stress sampling.
- {'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
- {'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
+ {'name': 'mjsunit', 'variant': 'stress_sampling'},
+ {'name': 'webkit', 'variant': 'stress_sampling'},
+ # Stress snapshot.
+ {'name': 'mjsunit', 'variant': 'stress_snapshot'},
],
},
'V8 Linux64 - gcov coverage': {
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 772ad53b32..abea95558d 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -12,15 +12,22 @@ include_rules = [
"+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
+ # TODO(v8:10496): Don't expose so much (through transitive includes) outside
+ # of heap/.
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
"+src/heap/local-heap.h",
+ # TODO(v8:10496): Don't expose memory chunk outside of heap/.
+ "+src/heap/memory-chunk.h",
+ "+src/heap/memory-chunk-inl.h",
"+src/heap/off-thread-factory-inl.h",
"+src/heap/off-thread-factory.h",
+ "+src/heap/off-thread-heap.h",
"+src/heap/read-only-heap-inl.h",
"+src/heap/read-only-heap.h",
+ "+src/heap/safepoint.h",
"-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
@@ -54,4 +61,7 @@ specific_include_rules = {
"+include/libplatform/v8-tracing.h",
"+perfetto/tracing.h"
],
+ "builtins-trace\.cc": [
+ "+protos/perfetto",
+ ],
}
diff --git a/deps/v8/src/api/api-arguments.h b/deps/v8/src/api/api-arguments.h
index 794681b71d..18690b5db2 100644
--- a/deps/v8/src/api/api-arguments.h
+++ b/deps/v8/src/api/api-arguments.h
@@ -160,11 +160,9 @@ class FunctionCallbackArguments
static const int kIsolateIndex = T::kIsolateIndex;
static const int kNewTargetIndex = T::kNewTargetIndex;
- FunctionCallbackArguments(internal::Isolate* isolate, internal::Object data,
- internal::HeapObject callee,
- internal::Object holder,
- internal::HeapObject new_target,
- internal::Address* argv, int argc);
+ FunctionCallbackArguments(Isolate* isolate, Object data, HeapObject callee,
+ Object holder, HeapObject new_target, Address* argv,
+ int argc);
/*
* The following Call function wraps the calling of all callbacks to handle
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index 0d2ad2f8a0..f686424286 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -86,7 +86,6 @@ MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
-MAKE_TO_LOCAL(ToLocal, JSFinalizationRegistry, FinalizationGroup)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 1b59f2cf64..93780bceec 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -28,6 +28,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/cpu-features.h"
#include "src/common/assert-scope.h"
+#include "src/common/external-pointer.h"
#include "src/common/globals.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/date/date.h"
@@ -96,10 +97,8 @@
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
-#include "src/snapshot/partial-serializer.h"
-#include "src/snapshot/read-only-serializer.h"
#include "src/snapshot/snapshot.h"
-#include "src/snapshot/startup-serializer.h"
+#include "src/snapshot/startup-serializer.h" // For SerializedHandleChecker.
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-hasher.h"
#include "src/strings/unicode-inl.h"
@@ -328,6 +327,7 @@ class CallDepthScope {
bool CheckKeptObjectsClearedAfterMicrotaskCheckpoint(
i::MicrotaskQueue* microtask_queue) {
bool did_perform_microtask_checkpoint =
+ isolate_->thread_local_top()->CallDepthIsZero() &&
do_callback && microtask_queue &&
microtask_queue->microtasks_policy() == MicrotasksPolicy::kAuto;
return !did_perform_microtask_checkpoint ||
@@ -598,7 +598,6 @@ SnapshotCreator::SnapshotCreator(Isolate* isolate,
const intptr_t* external_references,
StartupData* existing_snapshot) {
SnapshotCreatorData* data = new SnapshotCreatorData(isolate);
- data->isolate_ = isolate;
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->set_array_buffer_allocator(&data->allocator_);
internal_isolate->set_api_external_references(external_references);
@@ -733,8 +732,11 @@ StartupData SnapshotCreator::CreateBlob(
DCHECK(!data->created_);
DCHECK(!data->default_context_.IsEmpty());
- int num_additional_contexts = static_cast<int>(data->contexts_.Size());
+ const int num_additional_contexts = static_cast<int>(data->contexts_.Size());
+ const int num_contexts = num_additional_contexts + 1; // The default context.
+ // Create and store lists of embedder-provided data needed during
+ // serialization.
{
i::HandleScope scope(isolate);
// Convert list of context-independent data to FixedArray.
@@ -773,48 +775,15 @@ StartupData SnapshotCreator::CreateBlob(
isolate->heap()->CompactWeakArrayLists(internal::AllocationType::kOld);
}
- if (function_code_handling == FunctionCodeHandling::kClear) {
- // Clear out re-compilable data from all shared function infos. Any
- // JSFunctions using these SFIs will have their code pointers reset by the
- // partial serializer.
- //
- // We have to iterate the heap and collect handles to each clearable SFI,
- // before we disable allocation, since we have to allocate UncompiledDatas
- // to be able to recompile them.
- //
- // Compiled irregexp code is also flushed by collecting and clearing any
- // seen JSRegExp objects.
- i::HandleScope scope(isolate);
- std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
-
- { // Heap allocation is disallowed within this scope.
- i::HeapObjectIterator heap_iterator(isolate->heap());
- for (i::HeapObject current_obj = heap_iterator.Next();
- !current_obj.is_null(); current_obj = heap_iterator.Next()) {
- if (current_obj.IsSharedFunctionInfo()) {
- i::SharedFunctionInfo shared =
- i::SharedFunctionInfo::cast(current_obj);
- if (shared.CanDiscardCompiled()) {
- sfis_to_clear.emplace_back(shared, isolate);
- }
- } else if (current_obj.IsJSRegExp()) {
- i::JSRegExp regexp = i::JSRegExp::cast(current_obj);
- if (regexp.HasCompiledCode()) {
- regexp.DiscardCompiledCodeForSerialization();
- }
- }
- }
- }
-
- // Must happen after heap iteration since SFI::DiscardCompiled may allocate.
- for (i::Handle<i::SharedFunctionInfo> shared : sfis_to_clear) {
- i::SharedFunctionInfo::DiscardCompiled(isolate, shared);
- }
- }
+ i::Snapshot::ClearReconstructableDataForSerialization(
+ isolate, function_code_handling == FunctionCodeHandling::kClear);
i::DisallowHeapAllocation no_gc_from_here_on;
- int num_contexts = num_additional_contexts + 1;
+ // Create a vector with all contexts and clear associated Persistent fields.
+ // Note these contexts may be dead after calling Clear(), but will not be
+ // collected until serialization completes and the DisallowHeapAllocation
+ // scope above goes out of scope.
std::vector<i::Context> contexts;
contexts.reserve(num_contexts);
{
@@ -834,82 +803,19 @@ StartupData SnapshotCreator::CreateBlob(
i::SerializedHandleChecker handle_checker(isolate, &contexts);
CHECK(handle_checker.CheckGlobalAndEternalHandles());
- i::HeapObjectIterator heap_iterator(isolate->heap());
- for (i::HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
- current_obj = heap_iterator.Next()) {
- if (current_obj.IsJSFunction()) {
- i::JSFunction fun = i::JSFunction::cast(current_obj);
-
- // Complete in-object slack tracking for all functions.
- fun.CompleteInobjectSlackTrackingIfActive();
-
- // Also, clear out feedback vectors, or any optimized code.
- // Note that checking for fun.IsOptimized() || fun.IsInterpreted() is not
- // sufficient because the function can have a feedback vector even if it
- // is not compiled (e.g. when the bytecode was flushed). On the other
- // hand, only checking for the feedback vector is not sufficient because
- // there can be multiple functions sharing the same feedback vector. So we
- // need all these checks.
- if (fun.IsOptimized() || fun.IsInterpreted() ||
- !fun.raw_feedback_cell().value().IsUndefined()) {
- fun.raw_feedback_cell().set_value(
- i::ReadOnlyRoots(isolate).undefined_value());
- fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
- }
- if (function_code_handling == FunctionCodeHandling::kClear) {
- DCHECK(fun.shared().HasWasmExportedFunctionData() ||
- fun.shared().HasBuiltinId() || fun.shared().IsApiFunction() ||
- fun.shared().HasUncompiledDataWithoutPreparseData());
- }
- }
+ // Create a vector with all embedder fields serializers.
+ std::vector<SerializeInternalFieldsCallback> embedder_fields_serializers;
+ embedder_fields_serializers.reserve(num_contexts);
+ embedder_fields_serializers.push_back(
+ data->default_embedder_fields_serializer_);
+ for (int i = 0; i < num_additional_contexts; i++) {
+ embedder_fields_serializers.push_back(
+ data->embedder_fields_serializers_[i]);
}
- i::ReadOnlySerializer read_only_serializer(isolate);
- read_only_serializer.SerializeReadOnlyRoots();
-
- i::StartupSerializer startup_serializer(isolate, &read_only_serializer);
- startup_serializer.SerializeStrongReferences();
-
- // Serialize each context with a new partial serializer.
- std::vector<i::SnapshotData*> context_snapshots;
- context_snapshots.reserve(num_contexts);
-
- // TODO(6593): generalize rehashing, and remove this flag.
- bool can_be_rehashed = true;
-
- for (int i = 0; i < num_contexts; i++) {
- bool is_default_context = i == 0;
- i::PartialSerializer partial_serializer(
- isolate, &startup_serializer,
- is_default_context ? data->default_embedder_fields_serializer_
- : data->embedder_fields_serializers_[i - 1]);
- partial_serializer.Serialize(&contexts[i], !is_default_context);
- can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
- context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
- }
-
- startup_serializer.SerializeWeakReferencesAndDeferred();
- can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
-
- startup_serializer.CheckNoDirtyFinalizationRegistries();
-
- read_only_serializer.FinalizeSerialization();
- can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
-
- i::SnapshotData read_only_snapshot(&read_only_serializer);
- i::SnapshotData startup_snapshot(&startup_serializer);
- StartupData result =
- i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &read_only_snapshot,
- context_snapshots, can_be_rehashed);
-
- // Delete heap-allocated context snapshot instances.
- for (const auto context_snapshot : context_snapshots) {
- delete context_snapshot;
- }
data->created_ = true;
-
- DCHECK(i::Snapshot::VerifyChecksum(&result));
- return result;
+ return i::Snapshot::Create(isolate, &contexts, embedder_fields_serializers,
+ no_gc_from_here_on);
}
bool StartupData::CanBeRehashed() const {
@@ -1341,21 +1247,25 @@ void Context::SetEmbedderData(int index, v8::Local<Value> value) {
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
- HandleScope handle_scope(GetIsolate());
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::EmbedderDataArray> data =
EmbedderDataFor(this, index, false, location);
if (data.is_null()) return nullptr;
void* result;
- Utils::ApiCheck(i::EmbedderDataSlot(*data, index).ToAlignedPointer(&result),
- location, "Pointer is not aligned");
+ Utils::ApiCheck(
+ i::EmbedderDataSlot(*data, index).ToAlignedPointer(isolate, &result),
+ location, "Pointer is not aligned");
return result;
}
void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::Handle<i::EmbedderDataArray> data =
EmbedderDataFor(this, index, true, location);
- bool ok = i::EmbedderDataSlot(*data, index).store_aligned_pointer(value);
+ bool ok =
+ i::EmbedderDataSlot(*data, index).store_aligned_pointer(isolate, value);
Utils::ApiCheck(ok, location, "Pointer is not aligned");
DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index));
}
@@ -3776,6 +3686,12 @@ void v8::debug::AccessorPair::CheckCast(Value* that) {
"Value is not a debug::AccessorPair");
}
+void v8::debug::WasmValue::CheckCast(Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsWasmValue(), "v8::WasmValue::Cast",
+ "Value is not a debug::WasmValue");
+}
+
v8::BackingStore::~BackingStore() {
auto i_this = reinterpret_cast<const i::BackingStore*>(this);
i_this->~BackingStore(); // manually call internal destructor
@@ -5494,7 +5410,9 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
}
if (i::StringShape(str).IsExternalTwoByte()) {
- void* value = I::ReadRawField<void*>(str.ptr(), I::kStringResourceOffset);
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(str.ptr());
+ internal::Address value = I::ReadExternalPointerField(
+ isolate, str.ptr(), I::kStringResourceOffset);
return reinterpret_cast<String::ExternalStringResource*>(value);
}
return nullptr;
@@ -5516,8 +5434,10 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
if (i::StringShape(str).IsExternalOneByte() ||
i::StringShape(str).IsExternalTwoByte()) {
- void* value = I::ReadRawField<void*>(string, I::kStringResourceOffset);
- resource = static_cast<ExternalStringResourceBase*>(value);
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(string);
+ internal::Address value =
+ I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset);
+ resource = reinterpret_cast<ExternalStringResourceBase*>(value);
}
return resource;
}
@@ -5635,7 +5555,7 @@ void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
if (!InternalFieldOK(obj, index, location)) return nullptr;
void* result;
Utils::ApiCheck(i::EmbedderDataSlot(i::JSObject::cast(*obj), index)
- .ToAlignedPointer(&result),
+ .ToAlignedPointer(obj->GetIsolate(), &result),
location, "Unaligned pointer");
return result;
}
@@ -5645,7 +5565,7 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
Utils::ApiCheck(i::EmbedderDataSlot(i::JSObject::cast(*obj), index)
- .store_aligned_pointer(value),
+ .store_aligned_pointer(obj->GetIsolate(), value),
location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
@@ -5664,9 +5584,9 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
return;
}
void* value = values[i];
- Utils::ApiCheck(
- i::EmbedderDataSlot(js_obj, index).store_aligned_pointer(value),
- location, "Unaligned pointer");
+ Utils::ApiCheck(i::EmbedderDataSlot(js_obj, index)
+ .store_aligned_pointer(obj->GetIsolate(), value),
+ location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
}
@@ -5707,6 +5627,15 @@ bool v8::V8::Initialize(const int build_config) {
kEmbedderSmiValueSize, internal::kSmiValueSize);
}
+ const bool kEmbedderHeapSandbox = (build_config & kHeapSandbox) != 0;
+ if (kEmbedderHeapSandbox != V8_HEAP_SANDBOX_BOOL) {
+ FATAL(
+ "Embedder-vs-V8 build configuration mismatch. On embedder side "
+ "heap sandbox is %s while on V8 side it's %s.",
+ kEmbedderHeapSandbox ? "ENABLED" : "DISABLED",
+ V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
+ }
+
i::V8::Initialize();
return true;
}
@@ -5824,17 +5753,7 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
-#ifdef V8_SHARED_RO_HEAP
- i::ReadOnlySpace* ro_space = i::ReadOnlyHeap::Instance()->read_only_space();
- statistics->read_only_space_size_ = ro_space->CommittedMemory();
- statistics->read_only_space_used_size_ = ro_space->SizeOfObjects();
- statistics->read_only_space_physical_size_ =
- ro_space->CommittedPhysicalMemory();
-#else
- statistics->read_only_space_size_ = 0;
- statistics->read_only_space_used_size_ = 0;
- statistics->read_only_space_physical_size_ = 0;
-#endif // V8_SHARED_RO_HEAP
+ i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
template <typename ObjectType>
@@ -7247,8 +7166,10 @@ MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
}
CompiledWasmModule::CompiledWasmModule(
- std::shared_ptr<internal::wasm::NativeModule> native_module)
- : native_module_(std::move(native_module)) {
+ std::shared_ptr<internal::wasm::NativeModule> native_module,
+ const char* source_url, size_t url_length)
+ : native_module_(std::move(native_module)),
+ source_url_(source_url, url_length) {
CHECK_NOT_NULL(native_module_);
}
@@ -7269,7 +7190,13 @@ MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() {
CompiledWasmModule WasmModuleObject::GetCompiledModule() {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
- return Utils::Convert(obj->shared_native_module());
+ auto source_url = i::String::cast(obj->script().source_url());
+ int length;
+ std::unique_ptr<char[]> cstring = source_url.ToCString(
+ i::DISALLOW_NULLS, i::FAST_STRING_TRAVERSAL, &length);
+ i::Handle<i::String> url(source_url, obj->GetIsolate());
+ return CompiledWasmModule(std::move(obj->shared_native_module()),
+ cstring.get(), length);
}
MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
@@ -7277,7 +7204,8 @@ MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::WasmModuleObject> module_object =
i_isolate->wasm_engine()->ImportNativeModule(
- i_isolate, Utils::Open(compiled_module));
+ i_isolate, compiled_module.native_module_,
+ i::VectorOf(compiled_module.source_url()));
return Local<WasmModuleObject>::Cast(
Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
}
@@ -8059,12 +7987,6 @@ void Isolate::ReportExternalAllocationLimitReached() {
heap->ReportExternalMemoryPressure();
}
-void Isolate::CheckMemoryPressure() {
- i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
- if (heap->gc_state() != i::Heap::NOT_IN_GC) return;
- heap->CheckMemoryPressure();
-}
-
HeapProfiler* Isolate::GetHeapProfiler() {
i::HeapProfiler* heap_profiler =
reinterpret_cast<i::Isolate*>(this)->heap_profiler();
@@ -8373,29 +8295,6 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
isolate->SetAbortOnUncaughtExceptionCallback(callback);
}
-void Isolate::SetHostCleanupFinalizationGroupCallback(
- HostCleanupFinalizationGroupCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->SetHostCleanupFinalizationGroupCallback(callback);
-}
-
-Maybe<bool> FinalizationGroup::Cleanup(
- Local<FinalizationGroup> finalization_group) {
- i::Handle<i::JSFinalizationRegistry> fr =
- Utils::OpenHandle(*finalization_group);
- i::Isolate* isolate = fr->native_context().GetIsolate();
- i::Handle<i::Context> i_context(fr->native_context(), isolate);
- Local<Context> context = Utils::ToLocal(i_context);
- ENTER_V8(isolate, context, FinalizationGroup, Cleanup, Nothing<bool>(),
- i::HandleScope);
- i::Handle<i::Object> callback(fr->cleanup(), isolate);
- fr->set_scheduled_for_cleanup(false);
- has_pending_exception =
- i::JSFinalizationRegistry::Cleanup(isolate, fr, callback).IsNothing();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(true);
-}
-
void Isolate::SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8730,11 +8629,7 @@ void Isolate::EnqueueMicrotask(Local<Function> v8_function) {
void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::HandleScope scope(isolate);
- i::Handle<i::CallbackTask> microtask = isolate->factory()->NewCallbackTask(
- isolate->factory()->NewForeign(reinterpret_cast<i::Address>(callback)),
- isolate->factory()->NewForeign(reinterpret_cast<i::Address>(data)));
- isolate->default_microtask_queue()->EnqueueMicrotask(*microtask);
+ isolate->default_microtask_queue()->EnqueueMicrotask(this, callback, data);
}
void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
@@ -9011,6 +8906,9 @@ CALLBACK_SETTER(WasmThreadsEnabledCallback, WasmThreadsEnabledCallback,
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
wasm_load_source_map_callback)
+CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
+ wasm_simd_enabled_callback)
+
void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -9138,6 +9036,7 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#ifdef V8_INTL_SUPPORT
i_isolate->ResetDefaultLocale();
+ i_isolate->ClearCachedIcuObjects();
#endif // V8_INTL_SUPPORT
}
@@ -9256,6 +9155,9 @@ DEFINE_ERROR(RangeError, range_error)
DEFINE_ERROR(ReferenceError, reference_error)
DEFINE_ERROR(SyntaxError, syntax_error)
DEFINE_ERROR(TypeError, type_error)
+DEFINE_ERROR(WasmCompileError, wasm_compile_error)
+DEFINE_ERROR(WasmLinkError, wasm_link_error)
+DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
DEFINE_ERROR(Error, error)
#undef DEFINE_ERROR
@@ -9767,6 +9669,37 @@ debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) {
return static_cast<WasmScript*>(script);
}
+debug::WasmScript::DebugSymbolsType debug::WasmScript::GetDebugSymbolType()
+ const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ switch (script->wasm_native_module()->module()->debug_symbols.type) {
+ case i::wasm::WasmDebugSymbols::Type::None:
+ return debug::WasmScript::DebugSymbolsType::None;
+ case i::wasm::WasmDebugSymbols::Type::EmbeddedDWARF:
+ return debug::WasmScript::DebugSymbolsType::EmbeddedDWARF;
+ case i::wasm::WasmDebugSymbols::Type::ExternalDWARF:
+ return debug::WasmScript::DebugSymbolsType::ExternalDWARF;
+ case i::wasm::WasmDebugSymbols::Type::SourceMap:
+ return debug::WasmScript::DebugSymbolsType::SourceMap;
+ }
+}
+
+MemorySpan<const char> debug::WasmScript::ExternalSymbolsURL() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+
+ const i::wasm::WasmDebugSymbols& symbols =
+ script->wasm_native_module()->module()->debug_symbols;
+ if (symbols.external_url.is_empty()) return {};
+
+ internal::wasm::ModuleWireBytes wire_bytes(
+ script->wasm_native_module()->wire_bytes());
+ i::wasm::WasmName external_url =
+ wire_bytes.GetNameOrNull(symbols.external_url);
+ return {external_url.data(), external_url.size()};
+}
+
int debug::WasmScript::NumFunctions() const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -10365,6 +10298,51 @@ bool debug::AccessorPair::IsAccessorPair(Local<Value> that) {
return obj->IsAccessorPair();
}
+int debug::WasmValue::value_type() {
+ i::Handle<i::WasmValue> obj = Utils::OpenHandle(this);
+ return obj->value_type();
+}
+
+v8::Local<v8::Array> debug::WasmValue::bytes() {
+ i::Handle<i::WasmValue> obj = Utils::OpenHandle(this);
+ // Should only be called on i32, i64, f32, f64, s128.
+ DCHECK_GE(1, obj->value_type());
+ DCHECK_LE(5, obj->value_type());
+
+ i::Isolate* isolate = obj->GetIsolate();
+ i::Handle<i::Object> bytes_or_ref(obj->bytes_or_ref(), isolate);
+ i::Handle<i::ByteArray> bytes(i::Handle<i::ByteArray>::cast(bytes_or_ref));
+
+ int length = bytes->length();
+
+ i::Handle<i::FixedArray> fa = isolate->factory()->NewFixedArray(length);
+ i::Handle<i::JSArray> arr = obj->GetIsolate()->factory()->NewJSArray(
+ i::PACKED_SMI_ELEMENTS, length, length);
+ i::JSArray::SetContent(arr, fa);
+
+ for (int i = 0; i < length; i++) {
+ fa->set(i, i::Smi::FromInt(bytes->get(i)));
+ }
+
+ return Utils::ToLocal(arr);
+}
+
+v8::Local<v8::Value> debug::WasmValue::ref() {
+ i::Handle<i::WasmValue> obj = Utils::OpenHandle(this);
+ // Should only be called on anyref.
+ DCHECK_EQ(6, obj->value_type());
+
+ i::Isolate* isolate = obj->GetIsolate();
+ i::Handle<i::Object> bytes_or_ref(obj->bytes_or_ref(), isolate);
+
+ return Utils::ToLocal(bytes_or_ref);
+}
+
+bool debug::WasmValue::IsWasmValue(Local<Value> that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*that);
+ return obj->IsWasmValue();
+}
+
MaybeLocal<Message> debug::GetMessageFromPromise(Local<Promise> p) {
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*p);
i::Isolate* isolate = promise->GetIsolate();
@@ -11174,8 +11152,11 @@ void InvokeFinalizationRegistryCleanupFromTask(
Local<v8::Context> api_context = Utils::ToLocal(context);
CallDepthScope<true> call_depth_scope(isolate, api_context);
VMState<OTHER> state(isolate);
- if (JSFinalizationRegistry::Cleanup(isolate, finalization_registry, callback)
- .IsNothing()) {
+ Handle<Object> argv[] = {callback};
+ if (Execution::CallBuiltin(isolate,
+ isolate->finalization_registry_cleanup_some(),
+ finalization_registry, arraysize(argv), argv)
+ .is_null()) {
call_depth_scope.Escape();
}
}
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 4c383d3e43..ad879657c9 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -33,6 +33,7 @@ namespace debug {
class AccessorPair;
class GeneratorObject;
class Script;
+class WasmValue;
class WeakMap;
} // namespace debug
@@ -93,7 +94,6 @@ class RegisteredExtension {
V(Data, Object) \
V(RegExp, JSRegExp) \
V(Object, JSReceiver) \
- V(FinalizationGroup, JSFinalizationRegistry) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
@@ -129,6 +129,7 @@ class RegisteredExtension {
V(debug::Script, Script) \
V(debug::WeakMap, JSWeakMap) \
V(debug::AccessorPair, AccessorPair) \
+ V(debug::WasmValue, WasmValue) \
V(Promise, JSPromise) \
V(Primitive, Object) \
V(PrimitiveArray, FixedArray) \
@@ -205,8 +206,6 @@ class Utils {
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<BigUint64Array> ToLocalBigUint64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
- static inline Local<FinalizationGroup> ToLocal(
- v8::internal::Handle<v8::internal::JSFinalizationRegistry> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
@@ -275,16 +274,6 @@ class Utils {
return OpenHandle(*handle);
}
- static inline CompiledWasmModule Convert(
- std::shared_ptr<i::wasm::NativeModule> native_module) {
- return CompiledWasmModule{std::move(native_module)};
- }
-
- static inline const std::shared_ptr<i::wasm::NativeModule>& Open(
- const CompiledWasmModule& compiled_module) {
- return compiled_module.native_module_;
- }
-
private:
static void ReportApiFailure(const char* location, const char* message);
};
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 5a6846c33f..17bf39c853 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -187,7 +187,8 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
explicit AsmJsCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator)
: UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
- &compilation_info_),
+ &compilation_info_,
+ CanOffThreadFinalize::kNo),
allocator_(allocator),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
@@ -223,7 +224,7 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
// Step 1: Translate asm.js module to WebAssembly module.
- Zone* compile_zone = compilation_info()->zone();
+ Zone* compile_zone = &zone_;
Zone translate_zone(allocator_, ZONE_NAME);
Utf16CharacterStream* stream = parse_info()->character_stream();
@@ -332,6 +333,13 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
// but should instead point to the instantiation site (more intuitive).
int position = shared->StartPosition();
+ // Check that the module is not instantiated as a generator or async function.
+ if (IsResumableFunction(shared->scope_info().function_kind())) {
+ ReportInstantiationFailure(script, position,
+ "Cannot be instantiated as resumable function");
+ return MaybeHandle<Object>();
+ }
+
// Check that all used stdlib members are valid.
bool stdlib_use_of_typed_array_present = false;
wasm::AsmJsParser::StdlibSet stdlib_uses =
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index eaff042d31..7314086708 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -329,7 +329,7 @@ void AsmJsScanner::ConsumeNumber(uc32 ch) {
token_ = kParseError;
return;
}
- if (has_dot) {
+ if (has_dot || trunc(double_value_) != double_value_) {
token_ = kDouble;
} else {
// Exceeding safe integer range is an error.
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 7e1be44da1..23f28b834a 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -27,6 +27,7 @@
#include "src/ast/ast-value-factory.h"
+#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/execution/off-thread-isolate.h"
#include "src/heap/factory-inl.h"
@@ -332,20 +333,22 @@ const AstRawString* AstValueFactory::CloneFromOtherFactory(
}
AstConsString* AstValueFactory::NewConsString() {
- return new (zone_) AstConsString;
+ return new (zone()) AstConsString;
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str) {
- return NewConsString()->AddString(zone_, str);
+ return NewConsString()->AddString(zone(), str);
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
const AstRawString* str2) {
- return NewConsString()->AddString(zone_, str1)->AddString(zone_, str2);
+ return NewConsString()->AddString(zone(), str1)->AddString(zone(), str2);
}
template <typename LocalIsolate>
void AstValueFactory::Internalize(LocalIsolate* isolate) {
+ if (!zone_) return;
+
// Strings need to be internalized before values, because values refer to
// strings.
for (AstRawString* current = strings_; current != nullptr;) {
@@ -355,6 +358,7 @@ void AstValueFactory::Internalize(LocalIsolate* isolate) {
}
ResetStrings();
+ zone_ = nullptr;
}
template EXPORT_TEMPLATE_DEFINE(
V8_EXPORT_PRIVATE) void AstValueFactory::Internalize<Isolate>(Isolate*
@@ -373,9 +377,9 @@ AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
if (entry->value == nullptr) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
- byte* new_literal_bytes = zone_->NewArray<byte>(length);
+ byte* new_literal_bytes = zone()->NewArray<byte>(length);
memcpy(new_literal_bytes, literal_bytes.begin(), length);
- AstRawString* new_string = new (zone_) AstRawString(
+ AstRawString* new_string = new (zone()) AstRawString(
is_one_byte, Vector<const byte>(new_literal_bytes, length), hash_field);
CHECK_NOT_NULL(new_string);
AddString(new_string);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index dce9de4069..134612f1fd 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -31,6 +31,7 @@
#include <forward_list>
#include "src/base/hashmap.h"
+#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/heap/factory.h"
#include "src/numbers/conversions.h"
@@ -66,6 +67,8 @@ class AstRawString final : public ZoneObject {
int byte_length() const { return literal_bytes_.length(); }
const unsigned char* raw_data() const { return literal_bytes_.begin(); }
+ bool IsPrivateName() const { return length() > 0 && FirstCharacter() == '#'; }
+
// For storing AstRawStrings in a hash map.
uint32_t hash_field() const { return hash_field_; }
uint32_t Hash() const { return hash_field_ >> Name::kHashShift; }
@@ -288,6 +291,7 @@ class AstValueFactory {
empty_cons_string_(nullptr),
zone_(zone),
hash_seed_(hash_seed) {
+ DCHECK_NOT_NULL(zone_);
DCHECK_EQ(hash_seed, string_constants->hash_seed());
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
@@ -295,7 +299,10 @@ class AstValueFactory {
empty_cons_string_ = NewConsString();
}
- Zone* zone() const { return zone_; }
+ Zone* zone() const {
+ DCHECK_NOT_NULL(zone_);
+ return zone_;
+ }
const AstRawString* GetOneByteString(Vector<const uint8_t> literal) {
return GetOneByteStringInternal(literal);
@@ -317,6 +324,9 @@ class AstValueFactory {
V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str1,
const AstRawString* str2);
+ // Internalize all the strings in the factory, and prevent any more from being
+ // allocated. Multiple calls to Internalize are allowed, for simplicity, where
+ // subsequent calls are a no-op.
template <typename LocalIsolate>
void Internalize(LocalIsolate* isolate);
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 5bf2d7e192..6fcf30499a 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -14,8 +14,6 @@
#include "src/codegen/bailout-reason.h"
#include "src/codegen/label.h"
#include "src/common/globals.h"
-#include "src/execution/isolate.h"
-#include "src/execution/off-thread-isolate.h"
#include "src/heap/factory.h"
#include "src/objects/elements-kind.h"
#include "src/objects/function-syntax-kind.h"
@@ -117,6 +115,9 @@ namespace internal {
EXPRESSION_NODE_LIST(V)
// Forward declarations
+class Isolate;
+class OffThreadIsolate;
+
class AstNode;
class AstNodeFactory;
class Declaration;
@@ -1445,9 +1446,7 @@ class VariableProxy final : public Expression {
HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
}
- bool IsPrivateName() const {
- return raw_name()->length() > 0 && raw_name()->FirstCharacter() == '#';
- }
+ bool IsPrivateName() const { return raw_name()->IsPrivateName(); }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -2242,7 +2241,7 @@ class FunctionLiteral final : public Expression {
private:
friend class AstNodeFactory;
- FunctionLiteral(Zone* zone, const AstRawString* name,
+ FunctionLiteral(Zone* zone, const AstConsString* name,
AstValueFactory* ast_value_factory, DeclarationScope* scope,
const ScopedPtrList<Statement>& body,
int expected_property_count, int parameter_count,
@@ -2258,7 +2257,7 @@ class FunctionLiteral final : public Expression {
function_token_position_(kNoSourcePosition),
suspend_count_(0),
function_literal_id_(function_literal_id),
- raw_name_(name ? ast_value_factory->NewConsString(name) : nullptr),
+ raw_name_(name),
scope_(scope),
body_(0, nullptr),
raw_inferred_name_(ast_value_factory->empty_cons_string()),
@@ -3109,7 +3108,8 @@ class AstNodeFactory final {
bool has_braces, int function_literal_id,
ProducedPreparseData* produced_preparse_data = nullptr) {
return new (zone_) FunctionLiteral(
- zone_, name, ast_value_factory_, scope, body, expected_property_count,
+ zone_, name ? ast_value_factory_->NewConsString(name) : nullptr,
+ ast_value_factory_, scope, body, expected_property_count,
parameter_count, function_length, function_syntax_kind,
has_duplicate_parameters, eager_compile_hint, position, has_braces,
function_literal_id, produced_preparse_data);
@@ -3122,8 +3122,8 @@ class AstNodeFactory final {
DeclarationScope* scope, const ScopedPtrList<Statement>& body,
int expected_property_count, int parameter_count) {
return new (zone_) FunctionLiteral(
- zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
- body, expected_property_count, parameter_count, parameter_count,
+ zone_, ast_value_factory_->empty_cons_string(), ast_value_factory_,
+ scope, body, expected_property_count, parameter_count, parameter_count,
FunctionSyntaxKind::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, /* has_braces */ false,
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 8c13556db9..3f0a1adbc3 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -598,7 +598,7 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
allow_deref.emplace();
info->consumed_preparse_data()->RestoreScopeAllocationData(
- scope, info->ast_value_factory());
+ scope, info->ast_value_factory(), info->zone());
}
if (!scope->AllocateVariables(info)) return false;
@@ -1138,7 +1138,8 @@ Variable* Scope::NewTemporary(const AstRawString* name,
return var;
}
-Declaration* DeclarationScope::CheckConflictingVarDeclarations() {
+Declaration* DeclarationScope::CheckConflictingVarDeclarations(
+ bool* allowed_catch_binding_var_redeclaration) {
if (has_checked_syntax_) return nullptr;
for (Declaration* decl : decls_) {
// Lexical vs lexical conflicts within the same scope have already been
@@ -1152,11 +1153,12 @@ Declaration* DeclarationScope::CheckConflictingVarDeclarations() {
// Iterate through all scopes until the declaration scope.
do {
// There is a conflict if there exists a non-VAR binding.
+ Variable* other_var = current->LookupLocal(decl->var()->raw_name());
if (current->is_catch_scope()) {
+ *allowed_catch_binding_var_redeclaration |= other_var != nullptr;
current = current->outer_scope();
continue;
}
- Variable* other_var = current->LookupLocal(decl->var()->raw_name());
if (other_var != nullptr) {
DCHECK(IsLexicalVariableMode(other_var->mode()));
return decl;
@@ -2586,8 +2588,8 @@ Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
bool* was_added) {
Variable* result = EnsureRareData()->private_name_map.Declare(
zone(), this, name, mode, NORMAL_VARIABLE,
- InitializationFlag::kNeedsInitialization,
- MaybeAssignedFlag::kMaybeAssigned, is_static_flag, was_added);
+ InitializationFlag::kNeedsInitialization, MaybeAssignedFlag::kNotAssigned,
+ is_static_flag, was_added);
if (*was_added) {
locals_.Add(result);
has_static_private_methods_ |=
@@ -2683,7 +2685,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
DCHECK(IsConstVariableMode(mode));
DCHECK_EQ(init_flag, InitializationFlag::kNeedsInitialization);
- DCHECK_EQ(maybe_assigned_flag, MaybeAssignedFlag::kMaybeAssigned);
+ DCHECK_EQ(maybe_assigned_flag, MaybeAssignedFlag::kNotAssigned);
// Add the found private name to the map to speed up subsequent
// lookups for the same name.
@@ -2725,7 +2727,7 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) {
if (var == nullptr) {
// It's only possible to fail to resolve private names here if
// this is at the top level or the private name is accessed through eval.
- DCHECK(info->is_eval() || outer_scope_->is_script_scope());
+ DCHECK(info->flags().is_eval() || outer_scope_->is_script_scope());
Scanner::Location loc = proxy->location();
info->pending_error_handler()->ReportMessageAt(
loc.beg_pos, loc.end_pos,
@@ -2812,7 +2814,7 @@ Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory,
Variable* brand = Declare(zone(), ast_value_factory->dot_brand_string(),
VariableMode::kConst, NORMAL_VARIABLE,
InitializationFlag::kNeedsInitialization,
- MaybeAssignedFlag::kMaybeAssigned, &was_added);
+ MaybeAssignedFlag::kNotAssigned, &was_added);
DCHECK(was_added);
brand->set_is_static_flag(is_static_flag);
brand->ForceContextAllocation();
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 08bbc696d9..11f44bb498 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -909,7 +909,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Check if the scope has conflicting var
// declarations, i.e. a var declaration that has been hoisted from a nested
// scope over a let binding of the same name.
- Declaration* CheckConflictingVarDeclarations();
+ Declaration* CheckConflictingVarDeclarations(
+ bool* allowed_catch_binding_var_redeclaration);
void set_has_checked_syntax(bool has_checked_syntax) {
has_checked_syntax_ = has_checked_syntax;
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index a3a5199620..7c6ee4324e 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -90,7 +90,10 @@ class Variable final : public ZoneObject {
}
void SetMaybeAssigned() {
if (mode() == VariableMode::kConst) return;
-
+ // Private names are only initialized once by us.
+ if (name_->IsPrivateName()) {
+ return;
+ }
// If this variable is dynamically shadowing another variable, then that
// variable could also be assigned (in the non-shadowing case).
if (has_local_if_not_shadowed()) {
diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h
index 9ef6160d2a..44151606c0 100644
--- a/deps/v8/src/base/address-region.h
+++ b/deps/v8/src/base/address-region.h
@@ -15,6 +15,14 @@ namespace base {
// Helper class representing an address region of certain size.
class AddressRegion {
public:
+ // Function object that compares the start address of two regions. Usable as
+ // compare function on std data structures and algorithms.
+ struct StartAddressLess {
+ bool operator()(base::AddressRegion a, base::AddressRegion b) const {
+ return a.begin() < b.begin();
+ }
+ };
+
using Address = uintptr_t;
AddressRegion() = default;
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index f1c48fa135..bbdae525e3 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -605,7 +605,11 @@ CPU::CPU()
#endif
#elif V8_HOST_ARCH_ARM64
-// Implementer, variant and part are currently unused under ARM64.
+#ifdef V8_OS_WIN
+ // Windows makes high-resolution thread timing information available in
+ // user-space.
+ has_non_stop_time_stamp_counter_ = true;
+#endif // V8_OS_WIN
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 5b3b31ec1e..c3144f7ceb 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -67,7 +67,8 @@ class V8_BASE_EXPORT Mutex final {
return native_handle_;
}
- V8_INLINE void AssertHeld() { DCHECK_EQ(1, level_); }
+ V8_INLINE void AssertHeld() const { DCHECK_EQ(1, level_); }
+ V8_INLINE void AssertUnheld() const { DCHECK_EQ(0, level_); }
private:
NativeHandle native_handle_;
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index e3d7c426b4..e1ccda2ab0 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -129,5 +129,34 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
+// static
+void* Stack::GetStackStart() {
+ // pthread_getthrds_np creates 3 values:
+ // __pi_stackaddr, __pi_stacksize, __pi_stackend
+
+ // higher address ----- __pi_stackend, stack base
+ //
+ // |
+ // | __pi_stacksize, stack grows downwards
+ // |
+ // V
+ //
+ // lower address ----- __pi_stackaddr, current sp
+
+ pthread_t tid = pthread_self();
+ struct __pthrdsinfo buf;
+ // clear buf
+ memset(&buf, 0, sizeof(buf));
+ char regbuf[1];
+ int regbufsize = sizeof(regbuf);
+ const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &buf,
+ sizeof(buf), regbuf, &regbufsize);
+ CHECK(!rc);
+ if (buf.__pi_stackend == NULL || buf.__pi_stackaddr == NULL) {
+ return nullptr;
+ }
+ return reinterpret_cast<void*>(buf.__pi_stackend);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 54f72e04e6..c3f0b08ddd 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -970,7 +970,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
// pthread_getattr_np used below is non portable (hence the _np suffix). We
// keep this version in POSIX as most Linux-compatible derivatives will
// support it. MacOS and FreeBSD are different here.
-#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(V8_OS_SOLARIS)
+#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX)
// static
void* Stack::GetStackStart() {
@@ -996,7 +996,7 @@ void* Stack::GetStackStart() {
return nullptr;
}
-#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(V8_OS_SOLARIS)
+#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX)
// static
void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index b4ac98ce73..b5b16dac56 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -65,23 +65,5 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
-// static
-void* Stack::GetStackStart() {
- pthread_attr_t attr;
- int error;
- pthread_attr_init(&attr);
- error = pthread_attr_get_np(pthread_self(), &attr);
- if (!error) {
- void* base;
- size_t size;
- error = pthread_attr_getstack(&attr, &base, &size);
- CHECK(!error);
- pthread_attr_destroy(&attr);
- return reinterpret_cast<uint8_t*>(base) + size;
- }
- pthread_attr_destroy(&attr);
- return nullptr;
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index f07fd8e595..a12a5b0d0a 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -125,12 +125,6 @@ V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
}
#elif V8_OS_WIN
-V8_INLINE bool IsQPCReliable() {
- v8::base::CPU cpu;
- // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
- return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
-}
-
// Returns the current value of the performance counter.
V8_INLINE uint64_t QPCNowRaw() {
LARGE_INTEGER perf_counter_now = {};
@@ -645,11 +639,6 @@ TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
-bool IsBuggyAthlon(const CPU& cpu) {
- // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
- return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
-}
-
void InitializeTimeTicksNowFunctionPointer() {
LARGE_INTEGER ticks_per_sec = {};
if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
@@ -667,8 +656,7 @@ void InitializeTimeTicksNowFunctionPointer() {
// ~72% of users fall within this category.
TimeTicksNowFunction now_function;
CPU cpu;
- if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
- IsBuggyAthlon(cpu)) {
+ if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter()) {
now_function = &RolloverProtectedNow;
} else {
now_function = &QPCNow;
@@ -800,8 +788,7 @@ ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
// static
bool ThreadTicks::IsSupportedWin() {
- static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
- !IsQPCReliable();
+ static bool is_supported = base::CPU().has_non_stop_time_stamp_counter();
return is_supported;
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 49f578d1fd..d340fd20b0 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -123,32 +123,23 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r0);
__ SmiUntag(r0);
+#ifdef V8_REVERSE_JSARGS
+ // Set up pointer to last argument (skip receiver).
+ __ add(
+ r4, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(r4, r0, r5);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+#else
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-
// Set up pointer to last argument.
__ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
// Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(r5, r0);
- // ----------- S t a t e -------------
- // -- r0: number of arguments (untagged)
- // -- r1: constructor function
- // -- r3: new target
- // -- r4: pointer to last argument
- // -- r5: counter
- // -- sp[0*kPointerSize]: the hole (receiver)
- // -- sp[1*kPointerSize]: number of arguments (tagged)
- // -- sp[2*kPointerSize]: context
- // -----------------------------------
- __ b(&entry);
- __ bind(&loop);
- __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(scratch);
- __ bind(&entry);
- __ sub(r5, r5, Operand(1), SetCC);
- __ b(ge, &loop);
+ __ PushArray(r4, r0, r5);
+#endif
// Call the function.
// r0: number of arguments (untagged)
@@ -239,29 +230,36 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(r3);
+
+#ifdef V8_REVERSE_JSARGS
+ // Push the allocated receiver to the stack.
+ __ Push(r0);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r6
+ // since r0 needs to store the number of arguments before
+ // InvokingFunction.
+ __ mov(r6, r0);
+
+ // Set up pointer to first argument (skip receiver).
+ __ add(
+ r4, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+#else
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
__ Push(r0, r0);
- // ----------- S t a t e -------------
- // -- r3: new target
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
- // -----------------------------------
+ // Set up pointer to last argument.
+ __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+#endif
// Restore constructor function and argument count.
__ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r0);
- // Set up pointer to last argument.
- __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
Label enough_stack_space, stack_overflow;
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
__ b(&enough_stack_space);
@@ -275,29 +273,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(r5, r0);
- // ----------- S t a t e -------------
- // -- r0: number of arguments (untagged)
- // -- r3: new target
- // -- r4: pointer to last argument
- // -- r5: counter
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- r1 and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
- // -----------------------------------
- __ b(&entry);
+ // Copy arguments to the expression stack.
+ __ PushArray(r4, r0, r5);
- __ bind(&loop);
- __ ldr(r6, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(r6);
- __ bind(&entry);
- __ sub(r5, r5, Operand(1), SetCC);
- __ b(ge, &loop);
+#ifdef V8_REVERSE_JSARGS
+ // Push implicit receiver.
+ __ Push(r6);
+#endif
// Call the function.
__ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
@@ -424,9 +406,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ cmp(sp, scratch);
__ b(lo, &stack_overflow);
+#ifndef V8_REVERSE_JSARGS
// Push receiver.
__ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
+#endif
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
@@ -443,19 +427,38 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
+#ifdef V8_REVERSE_JSARGS
+ Label done_loop, loop;
+ __ mov(r6, r3);
+
+ __ bind(&loop);
+ __ sub(r6, r6, Operand(1), SetCC);
+ __ b(lt, &done_loop);
+ __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Push(scratch);
+ __ b(&loop);
+
+ __ bind(&done_loop);
+
+ // Push receiver.
+ __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
+#else
Label done_loop, loop;
__ mov(r6, Operand(0));
__ bind(&loop);
__ cmp(r6, r3);
__ b(ge, &done_loop);
- __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
+ __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done_loop);
+#endif
}
// Underlying function needs to have bytecode available.
@@ -744,13 +747,14 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Move(cp, context_address);
__ ldr(cp, MemOperand(cp));
- // Push the function and the receiver onto the stack.
- __ Push(r2, r3);
+ // Push the function.
+ __ Push(r2);
- // Check if we have enough stack space to push all arguments.
- // Clobbers r3.
+ // Check if we have enough stack space to push all arguments + receiver.
+ // Clobbers r5.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, r0, r3, &stack_overflow);
+ __ add(r6, r0, Operand(1)); // Add one for receiver.
+ Generate_StackOverflowCheck(masm, r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -762,19 +766,42 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// r1: new.target
// r2: function
+ // r3: receiver
// r0: argc
// r4: argv, i.e. points to first arg
+#ifdef V8_REVERSE_JSARGS
+ Label loop, entry;
+ __ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
+ // r6 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r5, MemOperand(r6, -kSystemPointerSize,
+ PreIndex)); // read next parameter
+ __ ldr(r5, MemOperand(r5)); // dereference handle
+ __ push(r5); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, r6);
+ __ b(ne, &loop);
+
+ // Push the receiver.
+ __ Push(r3);
+#else
+ // Push the receiver.
+ __ Push(r3);
+
Label loop, entry;
- __ add(r3, r4, Operand(r0, LSL, kPointerSizeLog2));
- // r1 points past last arg.
+ __ add(r3, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
+ // r3 points past last arg.
__ b(&entry);
__ bind(&loop);
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r5, MemOperand(r4, kSystemPointerSize,
+ PostIndex)); // read next parameter
__ ldr(r5, MemOperand(r5)); // dereference handle
__ push(r5); // push parameter
__ bind(&entry);
__ cmp(r4, r3);
__ b(ne, &loop);
+#endif
// Setup new.target and function.
__ mov(r3, r1);
@@ -1237,21 +1264,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register num_args, Register index,
- Register limit, Register scratch) {
- // Find the address of the last argument.
- __ mov(limit, num_args);
- __ mov(limit, Operand(limit, LSL, kPointerSizeLog2));
- __ sub(limit, index, limit);
-
- Label loop_header, loop_check;
- __ b(al, &loop_check);
- __ bind(&loop_header);
- __ ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
- __ push(scratch);
- __ bind(&loop_check);
- __ cmp(index, limit);
- __ b(hi, &loop_header);
+ Register num_args,
+ Register start_address,
+ Register scratch) {
+ // Find the argument with lowest address.
+ __ sub(scratch, num_args, Operand(1));
+ __ mov(scratch, Operand(scratch, LSL, kSystemPointerSizeLog2));
+ __ sub(start_address, start_address, scratch);
+ // Push the arguments.
+#ifdef V8_REVERSE_JSARGS
+ __ PushArray(start_address, num_args, scratch,
+ TurboAssembler::PushArrayOrder::kReverse);
+#else
+ __ PushArray(start_address, num_args, scratch);
+#endif
}
// static
@@ -1268,23 +1294,53 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ sub(r0, r0, Operand(1));
+ }
+#endif
+
__ add(r3, r0, Operand(1)); // Add one for receiver.
Generate_StackOverflowCheck(masm, r3, r4, &stack_overflow);
+#ifdef V8_REVERSE_JSARGS
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver. Argument count is correct.
+ __ mov(r3, r0);
+ }
+
+ // Push the arguments. r2 and r4 will be modified.
+ Generate_InterpreterPushArgs(masm, r3, r2, r4);
+
+ // Push "undefined" as the receiver arg if we need to.
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register r2.
+ // r2 already points to the penultimate argument, the spread
+ // lies in the next interpreter register.
+ __ sub(r2, r2, Operand(kSystemPointerSize));
+ __ ldr(r2, MemOperand(r2));
+ }
+#else
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
__ mov(r3, r0); // Argument count is correct.
}
- // Push the arguments. r2, r4, r5 will be modified.
- Generate_InterpreterPushArgs(masm, r3, r2, r4, r5);
+ // Push the arguments. r2 and r4 will be modified.
+ Generate_InterpreterPushArgs(masm, r3, r2, r4);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r2); // Pass the spread in a register
__ sub(r0, r0, Operand(1)); // Subtract one for spread
}
+#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1315,14 +1371,39 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
+ __ add(r5, r0, Operand(1)); // Add one for receiver.
+
+ Generate_StackOverflowCheck(masm, r5, r6, &stack_overflow);
+
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ sub(r0, r0, Operand(1));
+ }
+
+ // Push the arguments. r4 and r5 will be modified.
+ Generate_InterpreterPushArgs(masm, r0, r4, r5);
+
// Push a slot for the receiver to be constructed.
__ mov(r5, Operand::Zero());
__ push(r5);
- Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register r2.
+ // r4 already points to the penultimate argument, the spread
+ // lies in the next interpreter register.
+ __ sub(r4, r4, Operand(kSystemPointerSize));
+ __ ldr(r2, MemOperand(r4));
+ } else {
+ __ AssertUndefinedOrAllocationSite(r2, r5);
+ }
+#else
+ // Push a slot for the receiver to be constructed.
+ __ mov(r5, Operand::Zero());
+ __ push(r5);
- // Push the arguments. r5, r4, r6 will be modified.
- Generate_InterpreterPushArgs(masm, r0, r4, r5, r6);
+ // Push the arguments. r4 and r5 will be modified.
+ Generate_InterpreterPushArgs(masm, r0, r4, r5);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(r2); // Pass the spread in a register
@@ -1330,6 +1411,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r2, r5);
}
+#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r1);
@@ -1604,12 +1686,21 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
__ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
+#ifdef V8_REVERSE_JSARGS
+ __ ldr(r1, MemOperand(sp, 0)); // receiver
+ __ cmp(r0, Operand(1));
+ __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
+ __ cmp(r0, Operand(2), ge);
+ __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
+#else
+ __ ldr(r1, MemOperand(sp, r0, LSL, kSystemPointerSizeLog2)); // receiver
__ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
+ __ ldr(r5, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // thisArg
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
- __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
+ ge); // argArray
+#endif
+ __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r5, MemOperand(sp, 0));
}
@@ -1643,6 +1734,24 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+#ifdef V8_REVERSE_JSARGS
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ __ Pop(r1);
+
+ // 2. Make sure we have at least one argument.
+ // r0: actual number of arguments
+ {
+ Label done;
+ __ cmp(r0, Operand::Zero());
+ __ b(ne, &done);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ add(r0, r0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 3. Adjust the actual number of arguments.
+ __ sub(r0, r0, Operand(1));
+#else
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{
@@ -1656,7 +1765,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the callable to call (passed as receiver) from the stack.
// r0: actual number of arguments
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(r1, __ ReceiverOperand(r0));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1667,12 +1776,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register scratch = r3;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ add(r2, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ bind(&loop);
- __ ldr(scratch, MemOperand(r2, -kPointerSize));
+ __ ldr(scratch, MemOperand(r2, -kSystemPointerSize));
__ str(scratch, MemOperand(r2));
- __ sub(r2, r2, Operand(kPointerSize));
+ __ sub(r2, r2, Operand(kSystemPointerSize));
__ cmp(r2, sp);
__ b(ne, &loop);
// Adjust the actual number of arguments and remove the top element
@@ -1680,6 +1789,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ sub(r0, r0, Operand(1));
__ pop();
}
+#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1693,6 +1803,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[8] : target
// -- sp[12] : receiver
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r1 (if present), argumentsList into r2 (if present),
// remove all arguments from the stack (including the receiver), and push
@@ -1701,13 +1812,24 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
+#ifdef V8_REVERSE_JSARGS
+ __ cmp(r0, Operand(1));
+ __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
+ __ cmp(r0, Operand(2), ge);
+ __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
+ __ cmp(r0, Operand(3), ge);
+ __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
+#else
__ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
+ __ ldr(r1, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // target
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
+ __ ldr(r5, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
+ ge); // thisArgument
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
- __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
+ ge); // argumentsList
+#endif
+ __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r5, MemOperand(sp, 0));
}
@@ -1734,6 +1856,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[8] : target
// -- sp[12] : receiver
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r1 (if present), argumentsList into r2 (if present),
// new.target into r3 (if present, otherwise use target), remove all
@@ -1742,15 +1865,30 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
- __ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
+#ifdef V8_REVERSE_JSARGS
+ __ mov(r4, r1);
+ __ cmp(r0, Operand(1));
+ __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
+ __ mov(r3, r1); // new.target defaults to target
+ __ cmp(r0, Operand(2), ge);
+ __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
+ __ cmp(r0, Operand(3), ge);
+ __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
+ __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
+ __ str(r4, MemOperand(sp, 0)); // set undefined to the receiver
+#else
+ __ str(r2, MemOperand(sp, r0, LSL, kSystemPointerSizeLog2)); // receiver
__ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
+ __ ldr(r1, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // target
__ mov(r3, r1); // new.target defaults to target
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
+ ge); // argumentsList
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // new.target
- __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r3, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
+ ge); // new.target
+ __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
+#endif
}
// ----------- S t a t e -------------
@@ -1830,7 +1968,29 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label stack_overflow;
Generate_StackOverflowCheck(masm, r4, scratch, &stack_overflow);
- // Push arguments onto the stack (thisArgument is already on the stack).
+#ifdef V8_REVERSE_JSARGS
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy, check;
+ Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
+ __ mov(src, sp);
+ // Update stack pointer.
+ __ lsl(scratch, r4, Operand(kSystemPointerSizeLog2));
+ __ AllocateStackSpace(scratch);
+ __ mov(dest, sp);
+ __ mov(num, r0);
+ __ b(&check);
+ __ bind(&copy);
+ __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
+ __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
+ __ sub(num, num, Operand(1), SetCC);
+ __ bind(&check);
+ __ b(ge, &copy);
+ }
+#endif
+
+ // Copy arguments onto the stack (thisArgument is already on the stack).
{
__ mov(r6, Operand(0));
__ LoadRoot(r5, RootIndex::kTheHoleValue);
@@ -1838,11 +1998,16 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&loop);
__ cmp(r6, r4);
__ b(eq, &done);
- __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
+ __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(scratch, r5);
+ // Turn the hole into undefined as we go.
__ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
+#ifdef V8_REVERSE_JSARGS
+ __ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
+#else
__ Push(scratch);
+#endif
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
@@ -1981,7 +2146,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(r3);
} else {
Label convert_to_object, convert_receiver;
- __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(r3, __ ReceiverOperand(r0));
__ JumpIfSmi(r3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
@@ -2017,7 +2182,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ str(r3, __ ReceiverOperand(r0));
}
__ bind(&done_convert);
@@ -2073,10 +2238,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
- __ mov(scratch, Operand(r4, LSL, kPointerSizeLog2));
+ __ mov(scratch, Operand(r4, LSL, kSystemPointerSizeLog2));
{
UseScratchRegisterScope temps(masm);
Register remaining_stack_size = temps.Acquire();
+ DCHECK(!AreAliased(r0, r1, r2, r3, r4, scratch, remaining_stack_size));
// Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative.
@@ -2096,6 +2262,25 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+#ifdef V8_REVERSE_JSARGS
+ // Pop receiver.
+ __ Pop(r5);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop;
+ __ add(r0, r0, r4); // Adjust effective number of arguments.
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ ldr(scratch, MemOperand(r2, r4, LSL, kTaggedSizeLog2));
+ __ Push(scratch);
+ __ b(gt, &loop);
+ }
+
+ // Push receiver.
+ __ Push(r5);
+#else
// Reserve stack space for the [[BoundArguments]].
__ AllocateStackSpace(scratch);
@@ -2106,8 +2291,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(r5, r0);
__ b(gt, &done_loop);
- __ ldr(scratch, MemOperand(sp, r4, LSL, kPointerSizeLog2));
- __ str(scratch, MemOperand(sp, r5, LSL, kPointerSizeLog2));
+ __ ldr(scratch, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, r5, LSL, kSystemPointerSizeLog2));
__ add(r4, r4, Operand(1));
__ add(r5, r5, Operand(1));
__ b(&loop);
@@ -2127,6 +2312,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ add(r0, r0, Operand(1));
__ b(gt, &loop);
}
+#endif
}
__ bind(&no_bound_arguments);
}
@@ -2143,7 +2329,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
- __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ str(r3, __ ReceiverOperand(r0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2183,7 +2369,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ str(r1, __ ReceiverOperand(r0));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2292,7 +2478,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ str(r1, __ ReceiverOperand(r0));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2319,9 +2505,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ b(eq, &dont_adapt_arguments);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
+
+#ifndef V8_REVERSE_JSARGS
+ // This optimization is disabled when the arguments are reversed.
__ tst(r4,
Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
__ b(ne, &skip_adapt_arguments);
+#endif
// -------------------------------------------
// Adapt arguments.
@@ -2342,10 +2532,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
+#ifdef V8_REVERSE_JSARGS
+ __ add(r0, fp, Operand(r2, LSL, kSystemPointerSizeLog2));
+#else
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+#endif
// adjust for return address and receiver
- __ add(r0, r0, Operand(2 * kPointerSize));
- __ sub(r4, r0, Operand(r2, LSL, kPointerSizeLog2));
+ __ add(r0, r0, Operand(2 * kSystemPointerSize));
+ __ sub(r4, r0, Operand(r2, LSL, kSystemPointerSizeLog2));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
@@ -2359,7 +2553,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r0, 0));
__ push(r5);
__ cmp(r0, r4); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
+ __ sub(r0, r0, Operand(kSystemPointerSize));
__ b(ne, &copy);
__ b(&invoke);
@@ -2371,6 +2565,49 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+#ifdef V8_REVERSE_JSARGS
+ // Fill the remaining expected arguments with undefined.
+ // r0: actual number of arguments as a smi
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ __ LoadRoot(r5, RootIndex::kUndefinedValue);
+ __ sub(r6, r2, Operand::SmiUntag(r0));
+ __ sub(r4, fp, Operand(r6, LSL, kPointerSizeLog2));
+ // Adjust for frame.
+ __ sub(r4, r4,
+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ push(r5);
+ __ cmp(sp, r4);
+ __ b(ne, &fill);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r0: actual number of arguments as a smi
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r0: copy start address
+ // r1: function
+ // r2: expected number of arguments
+ // r3: new target (passed through to callee)
+ Label copy;
+ __ bind(&copy);
+
+ // Adjust load for return address and receiver.
+ __ ldr(r5, MemOperand(r0, 2 * kPointerSize));
+ __ push(r5);
+
+ __ cmp(r0, fp); // Compare before moving to next argument.
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ b(ne, &copy);
+#else
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
// r1: function
@@ -2410,6 +2647,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ push(r5);
__ cmp(sp, r4);
__ b(ne, &fill);
+#endif
}
// Call the entry point.
@@ -2915,6 +3153,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- sp[(argc - 1) * 4] : first argument
// -- sp[(argc + 0) * 4] : receiver
// -----------------------------------
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r1;
Register argc = r2;
@@ -2982,8 +3221,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
+#ifdef V8_REVERSE_JSARGS
+ __ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
+#else
__ add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
__ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
+#endif
__ str(scratch, MemOperand(sp, 2 * kPointerSize));
// FunctionCallbackInfo::length_.
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 9c38ae085e..46ab7a61fa 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -143,17 +143,19 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Claim(slot_count);
// Preserve the incoming parameters on the stack.
- __ LoadRoot(x10, RootIndex::kTheHoleValue);
+ __ LoadRoot(x4, RootIndex::kTheHoleValue);
// Compute a pointer to the slot immediately above the location on the
// stack to which arguments will be later copied.
__ SlotAddress(x2, argc);
+#ifndef V8_REVERSE_JSARGS
// Poke the hole (receiver) in the highest slot.
- __ Str(x10, MemOperand(x2));
- __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
+ __ Str(x4, MemOperand(x2));
+#endif
// Store padding, if needed.
+ __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
__ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
__ Bind(&already_aligned);
@@ -162,9 +164,18 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Register count = x2;
Register dst = x10;
Register src = x11;
- __ Mov(count, argc);
__ SlotAddress(dst, 0);
+#ifdef V8_REVERSE_JSARGS
+ // Poke the hole (receiver).
+ __ Str(x4, MemOperand(dst));
+ __ Add(dst, dst, kSystemPointerSize); // Skip receiver.
+ __ Add(src, fp,
+ StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize); // Skip receiver.
+#else
__ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
+#endif
+ __ Mov(count, argc);
__ CopyDoubleWords(dst, src, count);
}
@@ -175,7 +186,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// If argc is odd:
// -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
- // -- sp[(n-1)*kSystemPointerSize]: argument 0
+ // -- sp[(n-1)*kSystemPointerSize]: argument 1
// -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: padding
@@ -184,12 +195,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// If argc is even:
// -- sp[0*kSystemPointerSize]: argument n - 1
// -- ...
- // -- sp[(n-1)*kSystemPointerSize]: argument 0
+ // -- sp[(n-1)*kSystemPointerSize]: argument 1
// -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
// -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// Call the function.
__ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION);
@@ -264,8 +276,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
x4, x5);
+
__ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
RelocInfo::CODE_TARGET);
+
__ B(&post_instantiation_deopt_entry);
// Else: use TheHoleValue as receiver for constructor call
@@ -346,8 +360,15 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Register dst = x10;
Register src = x11;
__ Mov(count, x12);
+#ifdef V8_REVERSE_JSARGS
+ __ Poke(x0, 0); // Add the receiver.
+ __ SlotAddress(dst, 1); // Skip receiver.
+ __ Add(src, fp,
+ StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
+#else
__ SlotAddress(dst, 0);
__ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
+#endif
__ CopyDoubleWords(dst, src, count);
}
@@ -496,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Poke receiver into highest claimed slot.
__ LoadTaggedPointerField(
x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
- __ Poke(x5, Operand(x10, LSL, kSystemPointerSizeLog2));
+ __ Poke(x5, __ ReceiverOperand(x10));
// ----------- S t a t e -------------
// -- x1 : the JSGeneratorObject to resume
@@ -504,26 +525,33 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x10 : argument count
// -- cp : generator context
// -- lr : return address
- // -- sp[arg count] : generator receiver
- // -- sp[0 .. arg count - 1] : claimed for args
+ // -- sp[0 .. arg count] : claimed for receiver and args
// -----------------------------------
// Copy the function arguments from the generator object's register file.
-
__ LoadTaggedPointerField(
x5,
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done;
__ Cbz(x10, &done);
+#ifdef V8_REVERSE_JSARGS
+ __ SlotAddress(x12, x10);
+ __ Add(x5, x5, Operand(x10, LSL, kTaggedSizeLog2));
+ __ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Bind(&loop);
+ __ Sub(x10, x10, 1);
+ __ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
+ __ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
+#else
__ Mov(x12, 0);
-
__ Bind(&loop);
__ Sub(x10, x10, 1);
__ Add(x11, x5, Operand(x12, LSL, kTaggedSizeLog2));
__ LoadAnyTaggedField(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
__ Poke(x11, Operand(x10, LSL, kSystemPointerSizeLog2));
__ Add(x12, x12, 1);
+#endif
__ Cbnz(x10, &loop);
__ Bind(&done);
}
@@ -862,9 +890,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ SlotAddress(scratch, slots_to_claim);
__ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
+#ifdef V8_REVERSE_JSARGS
+ // Store receiver on the stack.
+ __ Poke(receiver, 0);
+ // Store function on the stack.
+ __ SlotAddress(scratch, argc);
+ __ Str(function, MemOperand(scratch, kSystemPointerSize));
+#else
// Store receiver and function on the stack.
__ SlotAddress(scratch, argc);
__ Stp(receiver, function, MemOperand(scratch));
+#endif
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -874,9 +910,21 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Skip the argument set up if we have no arguments.
__ Cbz(argc, &done);
- // scratch has been set to point to the location of the receiver, which
+ // scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
-
+#ifdef V8_REVERSE_JSARGS
+ __ SlotAddress(x0, 1); // Skips receiver.
+ __ Bind(&loop);
+ // Load the handle.
+ __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
+ // Dereference the handle.
+ __ Ldr(x11, MemOperand(x11));
+ // Poke the result into the stack.
+ __ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
+ // Loop if we've not reached the end of copy marker.
+ __ Cmp(x0, scratch);
+ __ B(le, &loop);
+#else
__ Bind(&loop);
// Load the handle.
__ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
@@ -887,6 +935,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Loop if we've not reached the end of copy marker.
__ Cmp(sp, scratch);
__ B(lt, &loop);
+#endif
__ Bind(&done);
@@ -1418,6 +1467,36 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
+#ifdef V8_REVERSE_JSARGS
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ Mov(slots_to_copy, num_args);
+ __ SlotAddress(stack_addr, 1);
+ } else {
+ // If we're not given an explicit receiver to store, we'll need to copy it
+ // together with the rest of the arguments.
+ __ Add(slots_to_copy, num_args, 1);
+ __ SlotAddress(stack_addr, 0);
+ }
+
+ __ Sub(last_arg_addr, first_arg_index,
+ Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
+ __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
+
+ // Load the final spread argument into spread_arg_out, if necessary.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
+ }
+
+ __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
+ TurboAssembler::kDstLessThanSrcAndReverse);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Store "undefined" as the receiver arg if we need to.
+ Register receiver = x14;
+ __ LoadRoot(receiver, RootIndex::kUndefinedValue);
+ __ Poke(receiver, 0);
+ }
+#else // !V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Store "undefined" as the receiver arg if we need to.
Register receiver = x14;
@@ -1443,6 +1522,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// Copy the rest of the arguments.
__ SlotAddress(stack_addr, 0);
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
+#endif // !V8_REVERSE_JSARGS
}
// static
@@ -1778,14 +1858,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : thisArg (if argc >= 1)
// -- sp[16] : receiver
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
+
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
Register argc = x0;
- Register arg_array = x2;
Register receiver = x1;
- Register this_arg = x0;
- Register undefined_value = x3;
- Register null_value = x4;
+ Register arg_array = x2;
+ Register this_arg = x3;
+ Register undefined_value = x4;
+ Register null_value = x5;
__ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
__ LoadRoot(null_value, RootIndex::kNullValue);
@@ -1793,8 +1875,21 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
+#ifdef V8_REVERSE_JSARGS
+ {
+ Label done;
+ __ Mov(this_arg, undefined_value);
+ __ Mov(arg_array, undefined_value);
+ __ Peek(receiver, 0);
+ __ Cmp(argc, Immediate(1));
+ __ B(lt, &done);
+ __ Peek(this_arg, kSystemPointerSize);
+ __ B(eq, &done);
+ __ Peek(arg_array, 2 * kSystemPointerSize);
+ __ bind(&done);
+ }
+#else // !V8_REVERSE_JSARGS
{
- Register saved_argc = x10;
Register scratch = x11;
// Push two undefined values on the stack, to put it in a consistent state
@@ -1814,16 +1909,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// There are now always three arguments to read, in the slots starting from
// slot argc.
__ SlotAddress(scratch, argc);
-
- __ Mov(saved_argc, argc);
- __ Ldp(arg_array, this_arg, MemOperand(scratch)); // Overwrites argc.
+ __ Ldp(arg_array, this_arg, MemOperand(scratch));
__ Ldr(receiver, MemOperand(scratch, 2 * kSystemPointerSize));
-
__ Drop(2); // Drop the undefined values we pushed above.
- __ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
-
- __ PushArgument(this_arg);
}
+#endif // !V8_REVERSE_JSARGS
+ __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ PushArgument(this_arg);
// ----------- S t a t e -------------
// -- x2 : argArray
@@ -1863,7 +1955,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
// 1. Get the callable to call (passed as receiver) from the stack.
- __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ Peek(function, __ ReceiverOperand(argc));
// 2. Handle case with no arguments.
{
@@ -1879,9 +1971,39 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Bind(&non_zero);
}
+ Label arguments_ready;
+#ifdef V8_REVERSE_JSARGS
+ // 3. Shift arguments. It depends if the arguments is even or odd.
+ // That is if padding exists or not.
+ {
+ Label even;
+ Register copy_from = x10;
+ Register copy_to = x11;
+ Register count = x12;
+ __ Mov(count, argc); // CopyDoubleWords changes the count argument.
+ __ Tbz(argc, 0, &even);
+
+ // Shift arguments one slot down on the stack (overwriting the original
+ // receiver).
+ __ SlotAddress(copy_from, 1);
+ __ Sub(copy_to, copy_from, kSystemPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, count);
+ // Overwrite the duplicated remaining last argument.
+ __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ B(&arguments_ready);
+
+ // Copy arguments one slot higher in memory, overwriting the original
+ // receiver and padding.
+ __ Bind(&even);
+ __ SlotAddress(copy_from, count);
+ __ Add(copy_to, copy_from, kSystemPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, count,
+ TurboAssembler::kSrcLessThanDst);
+ __ Drop(2);
+ }
+#else // !V8_REVERSE_JSARGS
// 3. Overwrite the receiver with padding. If argc is odd, this is all we
// need to do.
- Label arguments_ready;
__ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
__ Tbnz(argc, 0, &arguments_ready);
@@ -1902,6 +2024,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Drop two slots. These are copies of the last two arguments.
__ Drop(2);
}
+#endif // !V8_REVERSE_JSARGS
// 5. Adjust argument count to make the original first argument the new
// receiver and call the callable.
@@ -1918,6 +2041,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[16] : target (if argc >= 1)
// -- sp[24] : receiver
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
+
ASM_LOCATION("Builtins::Generate_ReflectApply");
Register argc = x0;
@@ -1931,6 +2056,23 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
+#ifdef V8_REVERSE_JSARGS
+ {
+ Label done;
+ __ Mov(target, undefined_value);
+ __ Mov(this_argument, undefined_value);
+ __ Mov(arguments_list, undefined_value);
+ __ Cmp(argc, Immediate(1));
+ __ B(lt, &done);
+ __ Peek(target, kSystemPointerSize);
+ __ B(eq, &done);
+ __ Peek(this_argument, 2 * kSystemPointerSize);
+ __ Cmp(argc, Immediate(3));
+ __ B(lt, &done);
+ __ Peek(arguments_list, 3 * kSystemPointerSize);
+ __ bind(&done);
+ }
+#else // !V8_REVERSE_JSARGS
{
// Push four undefined values on the stack, to put it in a consistent state
// so that we can always read the three arguments we need from it. The
@@ -1967,10 +2109,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
__ Drop(4); // Drop the undefined values we pushed above.
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
-
- __ PushArgument(this_argument);
}
+#endif // !V8_REVERSE_JSARGS
+ __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ PushArgument(this_argument);
// ----------- S t a t e -------------
// -- x2 : argumentsList
@@ -1995,6 +2137,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[16] : target
// -- sp[24] : receiver
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
+
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
Register argc = x0;
@@ -2009,6 +2153,24 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// new.target into x3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
+#ifdef V8_REVERSE_JSARGS
+ {
+ Label done;
+ __ Mov(target, undefined_value);
+ __ Mov(arguments_list, undefined_value);
+ __ Mov(new_target, undefined_value);
+ __ Cmp(argc, Immediate(1));
+ __ B(lt, &done);
+ __ Peek(target, kSystemPointerSize);
+ __ B(eq, &done);
+ __ Peek(arguments_list, 2 * kSystemPointerSize);
+ __ Mov(new_target, target); // new.target defaults to target
+ __ Cmp(argc, Immediate(3));
+ __ B(lt, &done);
+ __ Peek(new_target, 3 * kSystemPointerSize);
+ __ bind(&done);
+ }
+#else // !V8_REVERSE_JSARGS
{
// Push four undefined values on the stack, to put it in a consistent state
// so that we can always read the three arguments we need from it. The
@@ -2048,11 +2210,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ CmovX(new_target, target, ls); // target if argc <= 2.
__ Drop(4); // Drop the undefined values we pushed above.
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
-
- // Push receiver (undefined).
- __ PushArgument(undefined_value);
}
+#endif // !V8_REVERSE_JSARGS
+
+ __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+
+ // Push receiver (undefined).
+ __ PushArgument(undefined_value);
// ----------- S t a t e -------------
// -- x2 : argumentsList
@@ -2105,6 +2269,39 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// one slot up or one slot down, as needed.
void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register len) {
+#ifdef V8_REVERSE_JSARGS
+ Label even;
+ Register slots_to_copy = x10;
+ Register slots_to_claim = x12;
+
+ __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ __ Mov(slots_to_claim, len);
+ __ Tbz(slots_to_claim, 0, &even);
+
+ // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
+ // one extra padding slot. If argc is odd, we know that the original arguments
+ // will have a padding slot we can reuse (since len is odd), so
+ // slots_to_claim = len - 1.
+ {
+ Register scratch = x11;
+ __ Add(slots_to_claim, len, 1);
+ __ And(scratch, argc, 1);
+ __ Eor(scratch, scratch, 1);
+ __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ }
+
+ __ Bind(&even);
+ __ Claim(slots_to_claim);
+
+ // Move the arguments already in the stack including the receiver.
+ {
+ Register src = x11;
+ Register dst = x12;
+ __ SlotAddress(src, slots_to_claim);
+ __ SlotAddress(dst, 0);
+ __ CopyDoubleWords(dst, src, slots_to_copy);
+ }
+#else // !V8_REVERSE_JSARGS
Label len_odd, exit;
Register slots_to_copy = x10; // If needed.
__ Add(slots_to_copy, argc, 1);
@@ -2158,6 +2355,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
}
__ Bind(&exit);
+#endif // !V8_REVERSE_JSARGS
}
} // namespace
@@ -2217,6 +2415,19 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// We do not use the CompareRoot macro as it would do a LoadRoot behind the
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
+#ifdef V8_REVERSE_JSARGS
+ Register dst = x16;
+ __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
+ __ SlotAddress(dst, dst);
+ __ Add(argc, argc, len); // Update new argc.
+ __ Bind(&loop);
+ __ Sub(len, len, 1);
+ __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
+ __ CmpTagged(scratch, the_hole_value);
+ __ Csel(scratch, scratch, undefined_value, ne);
+ __ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
+ __ Cbnz(len, &loop);
+#else
__ Bind(&loop);
__ Sub(len, len, 1);
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
@@ -2224,9 +2435,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Csel(scratch, scratch, undefined_value, ne);
__ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
__ Cbnz(len, &loop);
+#endif
}
__ Bind(&done);
-
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2368,7 +2579,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(x3);
} else {
Label convert_to_object, convert_receiver;
- __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
+ __ Peek(x3, __ ReceiverOperand(x0));
__ JumpIfSmi(x3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
@@ -2403,7 +2614,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Bind(&convert_receiver);
}
- __ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
+ __ Poke(x3, __ ReceiverOperand(x0));
}
__ Bind(&done_convert);
@@ -2474,6 +2685,83 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
+#ifdef V8_REVERSE_JSARGS
+ Label copy_bound_args;
+ Register total_argc = x15;
+ Register slots_to_claim = x12;
+ Register scratch = x10;
+ Register receiver = x14;
+
+ __ Add(total_argc, argc, bound_argc);
+ __ Peek(receiver, 0);
+
+ // Round up slots_to_claim to an even number if it is odd.
+ __ Add(slots_to_claim, bound_argc, 1);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
+ __ Claim(slots_to_claim, kSystemPointerSize);
+
+ __ Tbz(bound_argc, 0, &copy_bound_args);
+ {
+ Label argc_even;
+ __ Tbz(argc, 0, &argc_even);
+ // Arguments count is odd (with the receiver it's even), so there's no
+ // alignment padding above the arguments and we have to "add" it. We
+ // claimed bound_argc + 1, since it is odd and it was rounded up. +1 here
+ // is for stack alignment padding.
+ // 1. Shift args one slot down.
+ {
+ Register copy_from = x11;
+ Register copy_to = x12;
+ __ SlotAddress(copy_to, slots_to_claim);
+ __ Add(copy_from, copy_to, kSystemPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, argc);
+ }
+ // 2. Write a padding in the last slot.
+ __ Add(scratch, total_argc, 1);
+ __ Str(padreg, MemOperand(sp, scratch, LSL, kSystemPointerSizeLog2));
+ __ B(&copy_bound_args);
+
+ __ Bind(&argc_even);
+ // Arguments count is even (with the receiver it's odd), so there's an
+ // alignment padding above the arguments and we can reuse it. We need to
+ // claim bound_argc - 1, but we claimed bound_argc + 1, since it is odd
+ // and it was rounded up.
+ // 1. Drop 2.
+ __ Drop(2);
+ // 2. Shift args one slot up.
+ {
+ Register copy_from = x11;
+ Register copy_to = x12;
+ __ SlotAddress(copy_to, total_argc);
+ __ Sub(copy_from, copy_to, kSystemPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, argc,
+ TurboAssembler::kSrcLessThanDst);
+ }
+ }
+
+ // If bound_argc is even, there is no alignment massage to do, and we have
+ // already claimed the correct number of slots (bound_argc).
+ __ Bind(&copy_bound_args);
+
+ // Copy the receiver back.
+ __ Poke(receiver, 0);
+ // Copy [[BoundArguments]] to the stack (below the receiver).
+ {
+ Label loop;
+ Register counter = bound_argc;
+ Register copy_to = x12;
+ __ Add(bound_argv, bound_argv, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SlotAddress(copy_to, 1);
+ __ Bind(&loop);
+ __ Sub(counter, counter, 1);
+ __ LoadAnyTaggedField(scratch,
+ MemOperand(bound_argv, kTaggedSize, PostIndex));
+ __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
+ __ Cbnz(counter, &loop);
+ }
+ // Update argc.
+ __ Mov(argc, total_argc);
+#else // !V8_REVERSE_JSARGS
// Check if we need padding.
Label copy_args, copy_bound_args;
Register total_argc = x15;
@@ -2546,6 +2834,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
}
+#endif // !V8_REVERSE_JSARGS
}
__ Bind(&no_bound_arguments);
}
@@ -2563,7 +2852,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
__ LoadAnyTaggedField(x10,
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
- __ Poke(x10, Operand(x0, LSL, kSystemPointerSizeLog2));
+ __ Poke(x10, __ ReceiverOperand(x0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2604,7 +2893,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
- __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
+ __ Poke(x1, __ ReceiverOperand(x0));
+
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2720,7 +3010,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
+ __ Poke(x1, __ ReceiverOperand(x0));
+
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2793,18 +3084,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// alignment of the arguments.
// If the number of expected arguments is larger than the number of actual
// arguments, the remaining expected slots will be filled with undefined.
+ // TODO(v8:10201) update comment once reversed arguments order sticks
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register argc_actual_minus_expected = x5;
- Label create_adaptor_frame, dont_adapt_arguments, stack_overflow,
- adapt_arguments_in_place;
+ Label create_adaptor_frame, dont_adapt_arguments, stack_overflow;
__ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
+#ifndef V8_REVERSE_JSARGS
+ // This optimization is disabled when the arguments are reversed.
+ Label adapt_arguments_in_place;
+ Register argc_actual_minus_expected = x5;
+
// When the difference between argc_actual and argc_expected is odd, we
// create an arguments adaptor frame.
__ Sub(argc_actual_minus_expected, argc_actual, argc_expected);
@@ -2818,6 +3113,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ TestAndBranchIfAnySet(
w4, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
&adapt_arguments_in_place);
+#endif
// -------------------------------------------
// Create an arguments adaptor frame.
@@ -2828,11 +3124,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Register copy_from = x10;
- Register copy_end = x11;
Register copy_to = x12;
+ Register copy_end = x11;
Register argc_to_copy = x13;
- Register argc_unused_actual = x14;
- Register scratch1 = x15, scratch2 = x16;
+ Register scratch1 = x15;
// We need slots for the expected arguments, with one extra slot for the
// receiver.
@@ -2846,12 +3141,61 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kSystemPointerSize);
+#ifdef V8_REVERSE_JSARGS
+ // If we don't have enough arguments, fill the remaining expected
+ // arguments with undefined, otherwise skip this step.
+ Label enough_arguments;
+ __ Cmp(argc_actual, argc_expected);
+ __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
+ __ Add(argc_to_copy, argc_to_copy, 1); // Include receiver.
+ __ B(ge, &enough_arguments);
+
+ // Fill the remaining expected arguments with undefined.
+ __ RecordComment("-- Fill slots with undefined --");
+ Label fill;
+ // scratch1 still contains the size of the claimed area,
+ // which is RoundUp(argc_expected + 1, 2).
+ __ SlotAddress(copy_to, scratch1);
+ __ SlotAddress(copy_end, argc_to_copy);
+ __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
+ // Now we can write pairs of undefineds, potentially overwriting one word
+ // below copy_end, but that's ok because that slot is still within claimed
+ // region. This loop will execute at least once because at this point we
+ // know that there's at least one undefined to be pushed and
+ // argc_to_copy >= 1.
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1,
+ MemOperand(copy_to, -2 * kSystemPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &fill);
+
+ // Enough arguments.
+ __ Bind(&enough_arguments);
+
+ // Store padding if needed, when expected arguments is even.
+ __ RecordComment("-- Store padding --");
+ Label skip_padding;
+ __ Tbnz(argc_expected, 0, &skip_padding);
+ __ SlotAddress(scratch1, argc_expected);
+ __ Str(padreg, MemOperand(scratch1, kSystemPointerSize));
+ __ bind(&skip_padding);
+
+ // Copy arguments.
+ __ RecordComment("-- Copy actual arguments --");
__ Mov(copy_to, sp);
+ __ Add(copy_from, fp, 2 * kSystemPointerSize);
+ __ CopyDoubleWords(copy_to, copy_from, argc_to_copy);
+
+#else // !V8_REVERSE_JSARGS
+ Register argc_unused_actual = x14;
+ Register scratch2 = x16;
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
// much as possible.
+ __ Mov(copy_to, sp);
+
// (1) If we don't have enough arguments, fill the remaining expected
// arguments with undefined, otherwise skip this step.
Label enough_arguments;
@@ -2918,6 +3262,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
MemOperand(copy_from, argc_actual, LSL, kSystemPointerSizeLog2));
__ Str(scratch1,
MemOperand(sp, argc_expected, LSL, kSystemPointerSizeLog2));
+#endif
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2939,6 +3284,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Ret();
}
+#ifndef V8_REVERSE_JSARGS
// -----------------------------------------
// Adapt arguments in the existing frame.
// -----------------------------------------
@@ -2976,6 +3322,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ B(&dont_adapt_arguments);
}
}
+#endif
// -------------------------------------------
// Dont adapt arguments.
@@ -3492,6 +3839,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- sp[(argc - 1) * 8] : first argument
// -- sp[(argc + 0) * 8] : receiver
// -----------------------------------
+ // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
Register api_function_address = x1;
Register argc = x2;
@@ -3561,9 +3909,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
+#ifdef V8_REVERSE_JSARGS
+ __ Add(scratch, scratch,
+ Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
+#else
__ Add(scratch, scratch,
Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
__ Add(scratch, scratch, Operand(argc, LSL, kSystemPointerSizeLog2));
+#endif
__ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index cee0b1e1a4..3d2a456efb 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -3,92 +3,91 @@
// found in the LICENSE file.
namespace array {
- macro ConvertToRelativeIndex(index: Number, length: Number): Number {
- return index < 0 ? Max(index + length, 0) : Min(index, length);
- }
+macro ConvertToRelativeIndex(index: Number, length: Number): Number {
+ return index < 0 ? Max(index + length, 0) : Min(index, length);
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
- transitioning javascript builtin ArrayPrototypeCopyWithin(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // 1. Let O be ? ToObject(this value).
- const object: JSReceiver = ToObject_Inline(context, receiver);
+// https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
+transitioning javascript builtin ArrayPrototypeCopyWithin(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(object);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(object);
- // 3. Let relativeTarget be ? ToInteger(target).
- const relativeTarget: Number = ToInteger_Inline(arguments[0]);
+ // 3. Let relativeTarget be ? ToInteger(target).
+ const relativeTarget: Number = ToInteger_Inline(arguments[0]);
- // 4. If relativeTarget < 0, let to be max((len + relativeTarget), 0);
- // else let to be min(relativeTarget, len).
- let to: Number = ConvertToRelativeIndex(relativeTarget, length);
+ // 4. If relativeTarget < 0, let to be max((len + relativeTarget), 0);
+ // else let to be min(relativeTarget, len).
+ let to: Number = ConvertToRelativeIndex(relativeTarget, length);
- // 5. Let relativeStart be ? ToInteger(start).
- const relativeStart: Number = ToInteger_Inline(arguments[1]);
+ // 5. Let relativeStart be ? ToInteger(start).
+ const relativeStart: Number = ToInteger_Inline(arguments[1]);
- // 6. If relativeStart < 0, let from be max((len + relativeStart), 0);
- // else let from be min(relativeStart, len).
- let from: Number = ConvertToRelativeIndex(relativeStart, length);
+ // 6. If relativeStart < 0, let from be max((len + relativeStart), 0);
+ // else let from be min(relativeStart, len).
+ let from: Number = ConvertToRelativeIndex(relativeStart, length);
- // 7. If end is undefined, let relativeEnd be len;
- // else let relativeEnd be ? ToInteger(end).
- let relativeEnd: Number = length;
- if (arguments[2] != Undefined) {
- relativeEnd = ToInteger_Inline(arguments[2]);
- }
+ // 7. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ let relativeEnd: Number = length;
+ if (arguments[2] != Undefined) {
+ relativeEnd = ToInteger_Inline(arguments[2]);
+ }
- // 8. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
- // else let final be min(relativeEnd, len).
- const final: Number = ConvertToRelativeIndex(relativeEnd, length);
+ // 8. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ const final: Number = ConvertToRelativeIndex(relativeEnd, length);
- // 9. Let count be min(final-from, len-to).
- let count: Number = Min(final - from, length - to);
+ // 9. Let count be min(final-from, len-to).
+ let count: Number = Min(final - from, length - to);
- // 10. If from<to and to<from+count, then.
- let direction: Number = 1;
+ // 10. If from<to and to<from+count, then.
+ let direction: Number = 1;
- if (from < to && to < (from + count)) {
- // a. Let direction be -1.
- direction = -1;
+ if (from < to && to < (from + count)) {
+ // a. Let direction be -1.
+ direction = -1;
- // b. Let from be from + count - 1.
- from = from + count - 1;
+ // b. Let from be from + count - 1.
+ from = from + count - 1;
- // c. Let to be to + count - 1.
- to = to + count - 1;
- }
+ // c. Let to be to + count - 1.
+ to = to + count - 1;
+ }
- // 12. Repeat, while count > 0.
- while (count > 0) {
- // a. Let fromKey be ! ToString(from).
- // b. Let toKey be ! ToString(to).
- // c. Let fromPresent be ? HasProperty(O, fromKey).
- const fromPresent: Boolean = HasProperty(object, from);
-
- // d. If fromPresent is true, then.
- if (fromPresent == True) {
- // i. Let fromVal be ? Get(O, fromKey).
- const fromVal: JSAny = GetProperty(object, from);
-
- // ii. Perform ? Set(O, toKey, fromVal, true).
- SetProperty(object, to, fromVal);
- } else {
- // i. Perform ? DeletePropertyOrThrow(O, toKey).
- DeleteProperty(object, to, LanguageMode::kStrict);
- }
-
- // f. Let from be from + direction.
- from = from + direction;
-
- // g. Let to be to + direction.
- to = to + direction;
-
- // h. Let count be count - 1.
- --count;
+ // 12. Repeat, while count > 0.
+ while (count > 0) {
+ // a. Let fromKey be ! ToString(from).
+ // b. Let toKey be ! ToString(to).
+ // c. Let fromPresent be ? HasProperty(O, fromKey).
+ const fromPresent: Boolean = HasProperty(object, from);
+
+ // d. If fromPresent is true, then.
+ if (fromPresent == True) {
+ // i. Let fromVal be ? Get(O, fromKey).
+ const fromVal: JSAny = GetProperty(object, from);
+
+ // ii. Perform ? Set(O, toKey, fromVal, true).
+ SetProperty(object, to, fromVal);
+ } else {
+ // i. Perform ? DeletePropertyOrThrow(O, toKey).
+ DeleteProperty(object, to, LanguageMode::kStrict);
}
- // 13. Return O.
- return object;
+ // f. Let from be from + direction.
+ from = from + direction;
+
+ // g. Let to be to + direction.
+ to = to + direction;
+
+ // h. Let count be count - 1.
+ --count;
}
+
+ // 13. Return O.
+ return object;
+}
}
diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq
index 4e5f99d40a..2514a18b74 100644
--- a/deps/v8/src/builtins/array-every.tq
+++ b/deps/v8/src/builtins/array-every.tq
@@ -3,145 +3,142 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayEveryLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArrayEveryLoopContinuation(
- jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
- numberLength, Undefined);
+transitioning javascript builtin
+ArrayEveryLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayEveryLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+}
+
+transitioning javascript builtin
+ArrayEveryLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+ result: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. every() needs
+ // to pick up at the next step, which is either continuing to the next
+ // array element or returning false if {result} is false.
+ if (!ToBoolean(result)) {
+ return False;
}
- transitioning javascript builtin
- ArrayEveryLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
- result: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // This custom lazy deopt point is right after the callback. every() needs
- // to pick up at the next step, which is either continuing to the next
- // array element or returning false if {result} is false.
- if (!ToBoolean(result)) {
- return False;
- }
+ numberK = numberK + 1;
- numberK = numberK + 1;
+ return ArrayEveryLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+}
- return ArrayEveryLoopContinuation(
- jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
- numberLength, Undefined);
- }
+transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, _array: JSAny,
+ o: JSReceiver, initialK: Number, length: Number, _initialTo: JSAny): JSAny {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
- transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- _array: JSAny, o: JSReceiver, initialK: Number, length: Number,
- _initialTo: JSAny): JSAny {
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 6a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 6b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty_Inline(o, k);
-
- // 6c. If kPresent is true, then
- if (kPresent == True) {
- // 6c. i. Let kValue be ? Get(O, Pk).
- const kValue: JSAny = GetProperty(o, k);
-
- // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
- const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
-
- // iii. If selected is true, then...
- if (!ToBoolean(result)) {
- return False;
- }
- }
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
- // 6d. Increase k by 1. (done by the loop).
- }
- return True;
- }
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: JSAny = GetProperty(o, k);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
- transitioning macro FastArrayEvery(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
- labels Bailout(Smi) {
- let k: Smi = 0;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- const fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
- let fastOW = NewFastJSArrayWitness(fastO);
-
- // Build a fast loop over the smi array.
- for (; k < smiLen; k++) {
- fastOW.Recheck() otherwise goto Bailout(k);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k);
- const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
- const result: JSAny =
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ // iii. If selected is true, then...
if (!ToBoolean(result)) {
return False;
}
}
- return True;
- }
- // https://tc39.github.io/ecma262/#sec-array.prototype.every
- transitioning javascript builtin
- ArrayEvery(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
- JSAny {
- try {
- RequireObjectCoercible(receiver, 'Array.prototype.every');
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return True;
+}
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+transitioning macro FastArrayEvery(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+ const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+ const result: JSAny =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (!ToBoolean(result)) {
+ return False;
+ }
+ }
+ return True;
+}
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+// https://tc39.github.io/ecma262/#sec-array.prototype.every
+transitioning javascript builtin
+ArrayEvery(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.every');
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto TypeError;
- }
- const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // Special cases.
- try {
- return FastArrayEvery(o, len, callbackfn, thisArg)
- otherwise Bailout;
- }
- label Bailout(kValue: Smi) deferred {
- return ArrayEveryLoopContinuation(
- o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined);
- }
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
}
- label TypeError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ try {
+ return FastArrayEvery(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ } label Bailout(kValue: Smi) deferred {
+ return ArrayEveryLoopContinuation(
+ o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined);
}
+ } label TypeError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
index 1da1c55166..1add88fa6a 100644
--- a/deps/v8/src/builtins/array-filter.tq
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -3,200 +3,197 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayFilterLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
- length: JSAny, initialTo: JSAny): JSAny {
- // All continuation points in the optimized filter implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberTo = Cast<Number>(initialTo) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
+transitioning javascript builtin
+ArrayFilterLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+ length: JSAny, initialTo: JSAny): JSAny {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberTo = Cast<Number>(initialTo) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayFilterLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength, numberTo);
+}
- return ArrayFilterLoopContinuation(
- jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
- numberLength, numberTo);
+transitioning javascript builtin
+ArrayFilterLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+ length: JSAny, valueK: JSAny, initialTo: JSAny, result: JSAny): JSAny {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ let numberTo = Cast<Number>(initialTo) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. filter() needs
+ // to pick up at the next step, which is setting the callback
+ // result in the output array. After incrementing k and to, we can glide
+ // into the loop continuation builtin.
+ if (ToBoolean(result)) {
+ FastCreateDataProperty(outputArray, numberTo, valueK);
+ numberTo = numberTo + 1;
}
- transitioning javascript builtin
- ArrayFilterLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
- length: JSAny, valueK: JSAny, initialTo: JSAny, result: JSAny): JSAny {
- // All continuation points in the optimized filter implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
- let numberTo = Cast<Number>(initialTo) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // This custom lazy deopt point is right after the callback. filter() needs
- // to pick up at the next step, which is setting the callback
- // result in the output array. After incrementing k and to, we can glide
- // into the loop continuation builtin.
- if (ToBoolean(result)) {
- FastCreateDataProperty(outputArray, numberTo, valueK);
- numberTo = numberTo + 1;
- }
-
- numberK = numberK + 1;
+ numberK = numberK + 1;
- return ArrayFilterLoopContinuation(
- jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
- numberLength, numberTo);
- }
+ return ArrayFilterLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength, numberTo);
+}
- transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- array: JSReceiver, o: JSReceiver, initialK: Number, length: Number,
- initialTo: Number): JSAny {
- let to: Number = initialTo;
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 6a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 6b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty_Inline(o, k);
-
- // 6c. If kPresent is true, then
- if (kPresent == True) {
- // 6c. i. Let kValue be ? Get(O, Pk).
- const kValue: JSAny = GetProperty(o, k);
-
- // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
- const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
-
- // iii. If selected is true, then...
- if (ToBoolean(result)) {
- // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
- FastCreateDataProperty(array, to, kValue);
- // 2. Increase to by 1.
- to = to + 1;
- }
+transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
+ array: JSReceiver, o: JSReceiver, initialK: Number, length: Number,
+ initialTo: Number): JSAny {
+ let to: Number = initialTo;
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
+
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: JSAny = GetProperty(o, k);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
+
+ // iii. If selected is true, then...
+ if (ToBoolean(result)) {
+ // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
+ FastCreateDataProperty(array, to, kValue);
+ // 2. Increase to by 1.
+ to = to + 1;
}
-
- // 6d. Increase k by 1. (done by the loop).
}
- return array;
+
+ // 6d. Increase k by 1. (done by the loop).
}
+ return array;
+}
- transitioning macro FastArrayFilter(implicit context: Context)(
- fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: JSAny,
- output: FastJSArray) labels Bailout(Number, Number) {
- let k: Smi = 0;
- let to: Smi = 0;
- let fastOW = NewFastJSArrayWitness(fastO);
- let fastOutputW = NewFastJSArrayWitness(output);
-
- fastOutputW.EnsureArrayPushable() otherwise goto Bailout(k, to);
-
- // Build a fast loop over the array.
- for (; k < len; k++) {
- fastOW.Recheck() otherwise goto Bailout(k, to);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k, to);
- const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
- const result: JSAny =
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
- if (ToBoolean(result)) {
- try {
- // Since the call to {callbackfn} is observable, we can't
- // use the Bailout label until we've successfully stored.
- // Hence the {SlowStore} label.
- fastOutputW.Recheck() otherwise SlowStore;
- if (fastOutputW.Get().length != to) goto SlowStore;
- fastOutputW.Push(value) otherwise SlowStore;
- }
- label SlowStore {
- FastCreateDataProperty(fastOutputW.stable, to, value);
- }
- to = to + 1;
+transitioning macro FastArrayFilter(implicit context: Context)(
+ fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: JSAny,
+ output: FastJSArray) labels
+Bailout(Number, Number) {
+ let k: Smi = 0;
+ let to: Smi = 0;
+ let fastOW = NewFastJSArrayWitness(fastO);
+ let fastOutputW = NewFastJSArrayWitness(output);
+
+ fastOutputW.EnsureArrayPushable() otherwise goto Bailout(k, to);
+
+ // Build a fast loop over the array.
+ for (; k < len; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k, to);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k, to);
+ const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+ const result: JSAny =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(result)) {
+ try {
+ // Since the call to {callbackfn} is observable, we can't
+ // use the Bailout label until we've successfully stored.
+ // Hence the {SlowStore} label.
+ fastOutputW.Recheck() otherwise SlowStore;
+ if (fastOutputW.Get().length != to) goto SlowStore;
+ fastOutputW.Push(value) otherwise SlowStore;
+ } label SlowStore {
+ FastCreateDataProperty(fastOutputW.stable, to, value);
}
+ to = to + 1;
}
}
+}
- // This method creates a 0-length array with the ElementsKind of the
- // receiver if possible, otherwise, bails out. It makes sense for the
- // caller to know that the slow case needs to be invoked.
- macro FastFilterSpeciesCreate(implicit context: Context)(
- receiver: JSReceiver): JSReceiver labels Slow {
- const len: Smi = 0;
- if (IsArraySpeciesProtectorCellInvalid()) goto Slow;
- const o = Cast<FastJSArray>(receiver) otherwise Slow;
- const newMap: Map =
- LoadJSArrayElementsMap(o.map.elements_kind, LoadNativeContext(context));
- return AllocateJSArray(ElementsKind::PACKED_SMI_ELEMENTS, newMap, len, len);
- }
+// This method creates a 0-length array with the ElementsKind of the
+// receiver if possible, otherwise, bails out. It makes sense for the
+// caller to know that the slow case needs to be invoked.
+macro FastFilterSpeciesCreate(implicit context: Context)(receiver: JSReceiver):
+ JSReceiver labels Slow {
+ const len: Smi = 0;
+ if (IsArraySpeciesProtectorCellInvalid()) goto Slow;
+ const o = Cast<FastJSArray>(receiver) otherwise Slow;
+ const newMap: Map =
+ LoadJSArrayElementsMap(o.map.elements_kind, LoadNativeContext(context));
+ return AllocateJSArray(ElementsKind::PACKED_SMI_ELEMENTS, newMap, len, len);
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.filter
- transitioning javascript builtin
- ArrayFilter(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- try {
- RequireObjectCoercible(receiver, 'Array.prototype.filter');
+// https://tc39.github.io/ecma262/#sec-array.prototype.filter
+transitioning javascript builtin
+ArrayFilter(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.filter');
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto TypeError;
- }
- const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- let output: JSReceiver;
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
+ let output: JSReceiver;
+
+ // Special cases.
+ let k: Number = 0;
+ let to: Number = 0;
+ try {
+ output = FastFilterSpeciesCreate(o) otherwise SlowSpeciesCreate;
- // Special cases.
- let k: Number = 0;
- let to: Number = 0;
try {
- output = FastFilterSpeciesCreate(o) otherwise SlowSpeciesCreate;
-
- try {
- const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k, to);
- const fastOutput =
- Cast<FastJSArray>(output) otherwise goto Bailout(k, to);
- const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k, to);
-
- FastArrayFilter(fastO, smiLen, callbackfn, thisArg, fastOutput)
- otherwise Bailout;
- return output;
- }
- label Bailout(kValue: Number, toValue: Number) deferred {
- k = kValue;
- to = toValue;
- }
- }
- label SlowSpeciesCreate {
- output = ArraySpeciesCreate(context, receiver, 0);
+ const smiLen: Smi = Cast<Smi>(len) otherwise goto Bailout(k, to);
+ const fastOutput =
+ Cast<FastJSArray>(output) otherwise goto Bailout(k, to);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k, to);
+
+ FastArrayFilter(fastO, smiLen, callbackfn, thisArg, fastOutput)
+ otherwise Bailout;
+ return output;
+ } label Bailout(kValue: Number, toValue: Number) deferred {
+ k = kValue;
+ to = toValue;
}
-
- return ArrayFilterLoopContinuation(
- o, callbackfn, thisArg, output, o, k, len, to);
- }
- label TypeError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label SlowSpeciesCreate {
+ output = ArraySpeciesCreate(context, receiver, 0);
}
+
+ return ArrayFilterLoopContinuation(
+ o, callbackfn, thisArg, output, o, k, len, to);
+ } label TypeError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq
index cd3ec45e98..9b53f9c700 100644
--- a/deps/v8/src/builtins/array-find.tq
+++ b/deps/v8/src/builtins/array-find.tq
@@ -3,152 +3,149 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayFindLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized find implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArrayFindLoopContinuation(
- jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
- }
+transitioning javascript builtin
+ArrayFindLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized find implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayFindLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+}
+
+transitioning javascript builtin
+ArrayFindLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny,
+ _result: JSAny): JSAny {
+ // This deopt continuation point is never actually called, it just
+ // exists to make stack traces correct from a ThrowTypeError if the
+ // callback was found to be non-callable.
+ unreachable;
+}
- transitioning javascript builtin
- ArrayFindLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny,
- _result: JSAny): JSAny {
- // This deopt continuation point is never actually called, it just
- // exists to make stack traces correct from a ThrowTypeError if the
- // callback was found to be non-callable.
- unreachable;
+// Continuation that is called after a lazy deoptimization from TF that
+// happens right after the callback and it's returned value must be handled
+// before iteration continues.
+transitioning javascript builtin
+ArrayFindLoopAfterCallbackLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+ foundValue: JSAny, isFound: JSAny): JSAny {
+ // All continuation points in the optimized find implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the
+ // callback value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+
+ if (ToBoolean(isFound)) {
+ return foundValue;
}
- // Continuation that is called after a lazy deoptimization from TF that
- // happens right after the callback and it's returned value must be handled
- // before iteration continues.
- transitioning javascript builtin
- ArrayFindLoopAfterCallbackLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
- foundValue: JSAny, isFound: JSAny): JSAny {
- // All continuation points in the optimized find implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // This custom lazy deopt point is right after the callback. find() needs
- // to pick up at the next step, which is returning the element if the
- // callback value is truthy. Otherwise, continue the search by calling the
- // continuation.
-
- if (ToBoolean(isFound)) {
- return foundValue;
+ return ArrayFindLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+}
+
+transitioning builtin ArrayFindLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, o: JSReceiver,
+ initialK: Number, length: Number): JSAny {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. i. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
+
+ // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
+ // O>>)).
+ const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o);
+
+ // 6d. If testResult is true, return kValue.
+ if (ToBoolean(testResult)) {
+ return value;
}
- return ArrayFindLoopContinuation(
- jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+ // 6e. Increase k by 1. (done by the loop).
}
+ return Undefined;
+}
- transitioning builtin ArrayFindLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- o: JSReceiver, initialK: Number, length: Number): JSAny {
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 6a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 6b. i. Let kValue be ? Get(O, Pk).
- const value: JSAny = GetProperty(o, k);
-
- // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
- // O>>)).
- const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o);
-
- // 6d. If testResult is true, return kValue.
- if (ToBoolean(testResult)) {
- return value;
- }
-
- // 6e. Increase k by 1. (done by the loop).
+transitioning macro FastArrayFind(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ const value: JSAny = fastOW.LoadElementOrUndefined(k);
+ const testResult: JSAny =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(testResult)) {
+ return value;
}
- return Undefined;
}
+ return Undefined;
+}
- transitioning macro FastArrayFind(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
- labels Bailout(Smi) {
- let k: Smi = 0;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
- let fastOW = NewFastJSArrayWitness(fastO);
-
- // Build a fast loop over the smi array.
- for (; k < smiLen; k++) {
- fastOW.Recheck() otherwise goto Bailout(k);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k);
-
- const value: JSAny = fastOW.LoadElementOrUndefined(k);
- const testResult: JSAny =
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
- if (ToBoolean(testResult)) {
- return value;
- }
+// https://tc39.github.io/ecma262/#sec-array.prototype.find
+transitioning javascript builtin
+ArrayPrototypeFind(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.find');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
}
- return Undefined;
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallableError;
- // https://tc39.github.io/ecma262/#sec-array.prototype.find
- transitioning javascript builtin
- ArrayPrototypeFind(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
try {
- RequireObjectCoercible(receiver, 'Array.prototype.find');
-
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
-
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto NotCallableError;
- }
- const callbackfn =
- Cast<Callable>(arguments[0]) otherwise NotCallableError;
-
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
-
- // Special cases.
- try {
- return FastArrayFind(o, len, callbackfn, thisArg)
- otherwise Bailout;
- }
- label Bailout(k: Smi) deferred {
- return ArrayFindLoopContinuation(o, callbackfn, thisArg, o, k, len);
- }
- }
- label NotCallableError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return FastArrayFind(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Smi) deferred {
+ return ArrayFindLoopContinuation(o, callbackfn, thisArg, o, k, len);
}
+ } label NotCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq
index 05a2640646..ed70a12259 100644
--- a/deps/v8/src/builtins/array-findindex.tq
+++ b/deps/v8/src/builtins/array-findindex.tq
@@ -3,154 +3,149 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayFindIndexLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized findIndex implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArrayFindIndexLoopContinuation(
- jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
- }
+transitioning javascript builtin
+ArrayFindIndexLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized findIndex implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayFindIndexLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+}
+
+transitioning javascript builtin
+ArrayFindIndexLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny,
+ _result: JSAny): JSAny {
+ // This deopt continuation point is never actually called, it just
+ // exists to make stack traces correct from a ThrowTypeError if the
+ // callback was found to be non-callable.
+ unreachable;
+}
- transitioning javascript builtin
- ArrayFindIndexLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- _callback: JSAny, _thisArg: JSAny, _initialK: JSAny, _length: JSAny,
- _result: JSAny): JSAny {
- // This deopt continuation point is never actually called, it just
- // exists to make stack traces correct from a ThrowTypeError if the
- // callback was found to be non-callable.
- unreachable;
+// Continuation that is called after a lazy deoptimization from TF that
+// happens right after the callback and it's returned value must be handled
+// before iteration continues.
+transitioning javascript builtin
+ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+ foundValue: JSAny, isFound: JSAny): JSAny {
+ // All continuation points in the optimized findIndex implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. find() needs
+ // to pick up at the next step, which is returning the element if the
+ // callback value is truthy. Otherwise, continue the search by calling the
+ // continuation.
+
+ if (ToBoolean(isFound)) {
+ return foundValue;
}
- // Continuation that is called after a lazy deoptimization from TF that
- // happens right after the callback and it's returned value must be handled
- // before iteration continues.
- transitioning javascript builtin
- ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
- foundValue: JSAny, isFound: JSAny): JSAny {
- // All continuation points in the optimized findIndex implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // This custom lazy deopt point is right after the callback. find() needs
- // to pick up at the next step, which is returning the element if the
- // callback value is truthy. Otherwise, continue the search by calling the
- // continuation.
-
- if (ToBoolean(isFound)) {
- return foundValue;
+ return ArrayFindIndexLoopContinuation(
+ jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+}
+
+transitioning builtin ArrayFindIndexLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, o: JSReceiver,
+ initialK: Number, length: Number): Number {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 6b. i. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
+
+ // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
+ // O>>)).
+ const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o);
+
+ // 6d. If testResult is true, return k.
+ if (ToBoolean(testResult)) {
+ return k;
}
- return ArrayFindIndexLoopContinuation(
- jsreceiver, callbackfn, thisArg, jsreceiver, numberK, numberLength);
+ // 6e. Increase k by 1. (done by the loop).
}
+ return Convert<Smi>(-1);
+}
- transitioning builtin ArrayFindIndexLoopContinuation(implicit context:
- Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- o: JSReceiver, initialK: Number, length: Number): Number {
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 6a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 6b. i. Let kValue be ? Get(O, Pk).
- const value: JSAny = GetProperty(o, k);
-
- // 6c. Let testResult be ToBoolean(? Call(predicate, T, <<kValue, k,
- // O>>)).
- const testResult: JSAny = Call(context, callbackfn, thisArg, value, k, o);
-
- // 6d. If testResult is true, return k.
- if (ToBoolean(testResult)) {
- return k;
- }
-
- // 6e. Increase k by 1. (done by the loop).
+transitioning macro FastArrayFindIndex(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): Number
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+
+ const value: JSAny = fastOW.LoadElementOrUndefined(k);
+ const testResult: JSAny =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(testResult)) {
+ return k;
}
- return Convert<Smi>(-1);
}
+ return -1;
+}
- transitioning macro FastArrayFindIndex(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): Number
- labels Bailout(Smi) {
- let k: Smi = 0;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
- let fastOW = NewFastJSArrayWitness(fastO);
-
- // Build a fast loop over the smi array.
- for (; k < smiLen; k++) {
- fastOW.Recheck() otherwise goto Bailout(k);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k);
-
- const value: JSAny = fastOW.LoadElementOrUndefined(k);
- const testResult: JSAny =
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
- if (ToBoolean(testResult)) {
- return k;
- }
+// https://tc39.github.io/ecma262/#sec-array.prototype.findIndex
+transitioning javascript builtin
+ArrayPrototypeFindIndex(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.findIndex');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NotCallableError;
}
- return -1;
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallableError;
- // https://tc39.github.io/ecma262/#sec-array.prototype.findIndex
- transitioning javascript builtin
- ArrayPrototypeFindIndex(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
try {
- RequireObjectCoercible(receiver, 'Array.prototype.findIndex');
-
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
-
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto NotCallableError;
- }
- const callbackfn =
- Cast<Callable>(arguments[0]) otherwise NotCallableError;
-
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
-
- // Special cases.
- try {
- return FastArrayFindIndex(o, len, callbackfn, thisArg)
- otherwise Bailout;
- }
- label Bailout(k: Smi) deferred {
- return ArrayFindIndexLoopContinuation(
- o, callbackfn, thisArg, o, k, len);
- }
- }
- label NotCallableError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return FastArrayFindIndex(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ } label Bailout(k: Smi) deferred {
+ return ArrayFindIndexLoopContinuation(o, callbackfn, thisArg, o, k, len);
}
+ } label NotCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index b30c8533e6..938210dcdc 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -3,129 +3,126 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayForEachLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized forEach implemntation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
+transitioning javascript builtin
+ArrayForEachLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized forEach implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayForEachLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+}
- return ArrayForEachLoopContinuation(
- jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
- numberLength, Undefined);
- }
+transitioning javascript builtin
+ArrayForEachLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+ _result: JSAny): JSAny {
+ // All continuation points in the optimized forEach implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayForEachLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+}
- transitioning javascript builtin
- ArrayForEachLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
- _result: JSAny): JSAny {
- // All continuation points in the optimized forEach implemntation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
+transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, _array: JSAny,
+ o: JSReceiver, initialK: Number, len: Number, _to: JSAny): JSAny {
+ // variables {array} and {to} are ignored.
- return ArrayForEachLoopContinuation(
- jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
- numberLength, Undefined);
- }
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < len; k = k + 1) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
- transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- _array: JSAny, o: JSReceiver, initialK: Number, len: Number,
- _to: JSAny): JSAny {
- // variables {array} and {to} are ignored.
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Number = initialK; k < len; k = k + 1) {
- // 6a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: JSAny = GetProperty(o, k);
- // 6b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty_Inline(o, k);
-
- // 6c. If kPresent is true, then
- if (kPresent == True) {
- // 6c. i. Let kValue be ? Get(O, Pk).
- const kValue: JSAny = GetProperty(o, k);
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ Call(context, callbackfn, thisArg, kValue, k, o);
+ }
- // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
- Call(context, callbackfn, thisArg, kValue, k, o);
- }
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return Undefined;
+}
- // 6d. Increase k by 1. (done by the loop).
- }
- return Undefined;
+transitioning macro FastArrayForEach(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+ const value: JSAny = fastOW.LoadElementNoHole(k)
+ otherwise continue;
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
}
+ return Undefined;
+}
+
+// https://tc39.github.io/ecma262/#sec-array.prototype.foreach
+transitioning javascript builtin
+ArrayForEach(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.forEach');
+
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- transitioning macro FastArrayForEach(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
- labels Bailout(Smi) {
- let k: Smi = 0;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
- let fastOW = NewFastJSArrayWitness(fastO);
-
- // Build a fast loop over the smi array.
- for (; k < smiLen; k++) {
- fastOW.Recheck() otherwise goto Bailout(k);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k);
- const value: JSAny = fastOW.LoadElementNoHole(k)
- otherwise continue;
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
}
- return Undefined;
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
- // https://tc39.github.io/ecma262/#sec-array.prototype.foreach
- transitioning javascript builtin
- ArrayForEach(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ let k: Number = 0;
try {
- RequireObjectCoercible(receiver, 'Array.prototype.forEach');
-
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
-
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto TypeError;
- }
- const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
-
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
-
- // Special cases.
- let k: Number = 0;
- try {
- return FastArrayForEach(o, len, callbackfn, thisArg)
- otherwise Bailout;
- }
- label Bailout(kValue: Smi) deferred {
- k = kValue;
- }
-
- return ArrayForEachLoopContinuation(
- o, callbackfn, thisArg, Undefined, o, k, len, Undefined);
- }
- label TypeError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return FastArrayForEach(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ } label Bailout(kValue: Smi) deferred {
+ k = kValue;
}
+
+ return ArrayForEachLoopContinuation(
+ o, callbackfn, thisArg, Undefined, o, k, len, Undefined);
+ } label TypeError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-from.tq b/deps/v8/src/builtins/array-from.tq
index 6eb3b00693..e51d37fef2 100644
--- a/deps/v8/src/builtins/array-from.tq
+++ b/deps/v8/src/builtins/array-from.tq
@@ -3,182 +3,181 @@
// found in the LICENSE file.
namespace array {
- // Array.from( items [, mapfn [, thisArg ] ] )
- // ES #sec-array.from
- transitioning javascript builtin
- ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
- JSReceiver {
- // Use fast path if:
- // * |items| is the only argument, and
- // * the receiver is the Array function.
- if (arguments.length == 1 && receiver == GetArrayFunction()) {
- try {
- return iterator::FastIterableToList(arguments[0]) otherwise Slow;
- }
- label Slow {
- // fall through
- }
+// Array.from( items [, mapfn [, thisArg ] ] )
+// ES #sec-array.from
+transitioning javascript builtin
+ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
+ JSReceiver {
+ // Use fast path if:
+ // * |items| is the only argument, and
+ // * the receiver is the Array function.
+ if (arguments.length == 1 && receiver == GetArrayFunction()) {
+ try {
+ return iterator::FastIterableToList(arguments[0]) otherwise Slow;
+ } label Slow {
+ // fall through
}
+ }
- const items = arguments[0];
- const mapfn = arguments[1];
- const thisArg = arguments[2];
-
- // 1. Let C be the this value.
- const c = receiver;
-
- let mapping: bool;
- // 2. If mapfn is undefined, let mapping be false.
- if (mapfn == Undefined) {
- mapping = false;
- } else {
- // a. If IsCallable(mapfn) is false, throw a TypeError exception.
- if (!TaggedIsCallable(mapfn)) deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn);
- }
- // b. Let mapping be true.
- mapping = true;
+ const items = arguments[0];
+ const mapfn = arguments[1];
+ const thisArg = arguments[2];
+
+ // 1. Let C be the this value.
+ const c = receiver;
+
+ let mapping: bool;
+ // 2. If mapfn is undefined, let mapping be false.
+ if (mapfn == Undefined) {
+ mapping = false;
+ } else {
+ // a. If IsCallable(mapfn) is false, throw a TypeError exception.
+ if (!Is<Callable>(mapfn)) deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn);
+ }
+ // b. Let mapping be true.
+ mapping = true;
+ }
+
+ // 4. Let usingIterator be ? GetMethod(items, @@iterator).
+ // 5. If usingIterator is not undefined, then
+ try {
+ const usingIterator = GetMethod(items, IteratorSymbolConstant())
+ otherwise IteratorIsUndefined, IteratorNotCallable;
+
+ let a: JSReceiver;
+ // a. If IsConstructor(C) is true, then
+ typeswitch (c) {
+ case (c: Constructor): {
+ // i. Let A be ? Construct(C).
+ a = Construct(c);
+ }
+ case (JSAny): {
+ // i. Let A be ? ArrayCreate(0).
+ a = ArrayCreate(0);
+ }
}
- // 4. Let usingIterator be ? GetMethod(items, @@iterator).
- // 5. If usingIterator is not undefined, then
- try {
- const usingIterator = GetMethod(items, IteratorSymbolConstant())
- otherwise IteratorIsUndefined, IteratorNotCallable;
-
- let a: JSReceiver;
- // a. If IsConstructor(C) is true, then
- typeswitch (c) {
- case (c: Constructor): {
- // i. Let A be ? Construct(C).
- a = Construct(c);
- }
- case (JSAny): {
- // i. Let A be ? ArrayCreate(0).
- a = ArrayCreate(0);
- }
+ // c. Let iteratorRecord be ? GetIterator(items, sync, usingIterator).
+ const iteratorRecord = iterator::GetIterator(items, usingIterator);
+
+ const fastIteratorResultMap = GetIteratorResultMap();
+
+ // d. Let k be 0.
+ let k: Smi = 0;
+ // e. Repeat,
+ while (true) {
+ // i. If k ā‰„ 2^53-1, then
+ // 1. Let error be ThrowCompletion(a newly created TypeError object).
+ // 2. Return ? IteratorClose(iteratorRecord, error).
+ // The spec requires that we throw an exception if index reaches 2^53-1,
+ // but an empty loop would take >100 days to do this many iterations. To
+ // actually run for that long would require an iterator that never set
+ // done to true and a target array which somehow never ran out of
+ // memory, e.g. a proxy that discarded the values. Ignoring this case
+ // just means we would repeatedly call CreateDataProperty with index =
+ // 2^53
+ assert(k < kMaxSafeInteger);
+
+ // ii. Let Pk be ! ToString(k).
+
+ // iii. Let next be ? IteratorStep(iteratorRecord).
+ let next: JSReceiver;
+ try {
+ next = iterator::IteratorStep(iteratorRecord, fastIteratorResultMap)
+ otherwise NextIsFalse;
+ }
+ // iv. If next is false, then
+ label NextIsFalse {
+ // 1. Perform ? Set(A, "length", k, true).
+ array::SetPropertyLength(a, k);
+ // 2. Return A.
+ return a;
}
- // c. Let iteratorRecord be ? GetIterator(items, sync, usingIterator).
- const iteratorRecord = iterator::GetIterator(items, usingIterator);
-
- const fastIteratorResultMap = GetIteratorResultMap();
-
- // d. Let k be 0.
- let k: Smi = 0;
- // e. Repeat,
- while (true) {
- // i. If k ā‰„ 2^53-1, then
- // 1. Let error be ThrowCompletion(a newly created TypeError object).
- // 2. Return ? IteratorClose(iteratorRecord, error).
- // The spec requires that we throw an exception if index reaches 2^53-1,
- // but an empty loop would take >100 days to do this many iterations. To
- // actually run for that long would require an iterator that never set
- // done to true and a target array which somehow never ran out of
- // memory, e.g. a proxy that discarded the values. Ignoring this case
- // just means we would repeatedly call CreateDataProperty with index =
- // 2^53
- assert(k < kMaxSafeInteger);
-
- // ii. Let Pk be ! ToString(k).
-
- // iii. Let next be ? IteratorStep(iteratorRecord).
- let next: JSReceiver;
- try {
- next = iterator::IteratorStep(iteratorRecord, fastIteratorResultMap)
- otherwise NextIsFalse;
- }
- // iv. If next is false, then
- label NextIsFalse {
- // 1. Perform ? Set(A, "length", k, true).
- array::SetPropertyLength(a, k);
- // 2. Return A.
- return a;
- }
+ // v. Let nextValue be ? IteratorValue(next).
+ const nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
- // v. Let nextValue be ? IteratorValue(next).
- const nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
-
- let mappedValue: JSAny;
- // vi. If mapping is true, then
- if (mapping) {
- // 1. Let mappedValue be Call(mapfn, thisArg, Ā« nextValue, k Ā»).
- // 2. If mappedValue is an abrupt completion,
- // return ? IteratorClose(iteratorRecord, mappedValue).
- // 3. Set mappedValue to mappedValue.[[Value]].
- try {
- mappedValue = Call(
- context, UnsafeCast<Callable>(mapfn), thisArg, nextValue, k);
- } catch (e) {
- iterator::IteratorCloseOnException(iteratorRecord, e);
- }
- } else {
- mappedValue = nextValue;
- }
- // viii. Let defineStatus be
- // CreateDataPropertyOrThrow(A, Pk, mappedValue).
- // ix. If defineStatus is an abrupt completion,
- // return ? IteratorClose(iteratorRecord, defineStatus).
+ let mappedValue: JSAny;
+ // vi. If mapping is true, then
+ if (mapping) {
+ // 1. Let mappedValue be Call(mapfn, thisArg, Ā« nextValue, k Ā»).
+ // 2. If mappedValue is an abrupt completion,
+ // return ? IteratorClose(iteratorRecord, mappedValue).
+ // 3. Set mappedValue to mappedValue.[[Value]].
try {
- FastCreateDataProperty(a, k, mappedValue);
- } catch (e) deferred {
- iterator::IteratorCloseOnException(iteratorRecord, e);
+ mappedValue =
+ Call(context, UnsafeCast<Callable>(mapfn), thisArg, nextValue, k);
+ } catch (e) {
+ iterator::IteratorCloseOnException(iteratorRecord);
+ ReThrow(context, e);
}
- // x. Set k to k + 1.
- k += 1;
+ } else {
+ mappedValue = nextValue;
}
- unreachable;
+ // viii. Let defineStatus be
+ // CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ // ix. If defineStatus is an abrupt completion,
+ // return ? IteratorClose(iteratorRecord, defineStatus).
+ try {
+ FastCreateDataProperty(a, k, mappedValue);
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(iteratorRecord);
+ ReThrow(context, e);
+ }
+ // x. Set k to k + 1.
+ k += 1;
}
- label IteratorIsUndefined {
- // 6. NOTE: items is not an Iterable so assume it is an array-like object.
- // 7. Let arrayLike be ! ToObject(items).
- const arrayLike = ToObject_Inline(context, items);
- // 8. Let len be ? LengthOfArrayLike(arrayLike).
- const len = GetLengthProperty(arrayLike);
-
- let a: JSReceiver;
- // 9. If IsConstructor(C) is true, then
- typeswitch (c) {
- case (c: Constructor): {
- // a. Let A be ? Construct(C, Ā« len Ā»).
- a = Construct(c, len);
- }
- case (JSAny): {
- // a. Let A be ? ArrayCreate(len).
- a = ArrayCreate(len);
- }
+ unreachable;
+ } label IteratorIsUndefined {
+ // 6. NOTE: items is not an Iterable so assume it is an array-like object.
+ // 7. Let arrayLike be ! ToObject(items).
+ const arrayLike = ToObject_Inline(context, items);
+ // 8. Let len be ? LengthOfArrayLike(arrayLike).
+ const len = GetLengthProperty(arrayLike);
+
+ let a: JSReceiver;
+ // 9. If IsConstructor(C) is true, then
+ typeswitch (c) {
+ case (c: Constructor): {
+ // a. Let A be ? Construct(C, Ā« len Ā»).
+ a = Construct(c, len);
}
-
- // 11. Let k be 0.
- let k: Smi = 0;
- // 12. Repeat, while k < len
- while (k < len) {
- // a. Let Pk be ! ToString(k).
- // b. Let kValue be ? Get(arrayLike, Pk).
- const kValue = GetProperty(arrayLike, k);
- let mappedValue: JSAny;
- // c. If mapping is true, then
- if (mapping) {
- // i. Let mappedValue be ? Call(mapfn, thisArg, Ā« kValue, k Ā»).
- mappedValue =
- Call(context, UnsafeCast<Callable>(mapfn), thisArg, kValue, k);
- } else {
- // d. Else, let mappedValue be kValue.
- mappedValue = kValue;
- }
- // e. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- FastCreateDataProperty(a, k, mappedValue);
- // f. Set k to k + 1.
- k += 1;
+ case (JSAny): {
+ // a. Let A be ? ArrayCreate(len).
+ a = ArrayCreate(len);
}
-
- // 13. Perform ? Set(A, "length", len, true).
- array::SetPropertyLength(a, len);
- // 14. Return A.
- return a;
}
- label IteratorNotCallable(_value: JSAny) deferred {
- ThrowTypeError(MessageTemplate::kIteratorSymbolNonCallable);
+
+ // 11. Let k be 0.
+ let k: Smi = 0;
+ // 12. Repeat, while k < len
+ while (k < len) {
+ // a. Let Pk be ! ToString(k).
+ // b. Let kValue be ? Get(arrayLike, Pk).
+ const kValue = GetProperty(arrayLike, k);
+ let mappedValue: JSAny;
+ // c. If mapping is true, then
+ if (mapping) {
+ // i. Let mappedValue be ? Call(mapfn, thisArg, Ā« kValue, k Ā»).
+ mappedValue =
+ Call(context, UnsafeCast<Callable>(mapfn), thisArg, kValue, k);
+ } else {
+ // d. Else, let mappedValue be kValue.
+ mappedValue = kValue;
+ }
+ // e. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ FastCreateDataProperty(a, k, mappedValue);
+ // f. Set k to k + 1.
+ k += 1;
}
+
+ // 13. Perform ? Set(A, "length", len, true).
+ array::SetPropertyLength(a, len);
+ // 14. Return A.
+ return a;
+ } label IteratorNotCallable(_value: JSAny) deferred {
+ ThrowTypeError(MessageTemplate::kIteratorSymbolNonCallable);
}
}
+}
diff --git a/deps/v8/src/builtins/array-isarray.tq b/deps/v8/src/builtins/array-isarray.tq
index 48fca60339..a88c1579d1 100644
--- a/deps/v8/src/builtins/array-isarray.tq
+++ b/deps/v8/src/builtins/array-isarray.tq
@@ -3,25 +3,25 @@
// found in the LICENSE file.
namespace runtime {
- extern runtime ArrayIsArray(implicit context: Context)(JSAny): JSAny;
+extern runtime ArrayIsArray(implicit context: Context)(JSAny): JSAny;
} // namespace runtime
namespace array {
- // ES #sec-array.isarray
- javascript builtin ArrayIsArray(js-implicit context:
- NativeContext)(arg: JSAny): JSAny {
- // 1. Return ? IsArray(arg).
- typeswitch (arg) {
- case (JSArray): {
- return True;
- }
- case (JSProxy): {
- // TODO(verwaest): Handle proxies in-place
- return runtime::ArrayIsArray(arg);
- }
- case (JSAny): {
- return False;
- }
+// ES #sec-array.isarray
+javascript builtin ArrayIsArray(js-implicit context: NativeContext)(arg: JSAny):
+ JSAny {
+ // 1. Return ? IsArray(arg).
+ typeswitch (arg) {
+ case (JSArray): {
+ return True;
+ }
+ case (JSProxy): {
+ // TODO(verwaest): Handle proxies in-place
+ return runtime::ArrayIsArray(arg);
+ }
+ case (JSAny): {
+ return False;
}
}
+}
} // namespace array
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index a06365f335..08d0cbf894 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -3,646 +3,632 @@
// found in the LICENSE file.
namespace array {
- type LoadJoinElementFn = builtin(Context, JSReceiver, uintptr) => JSAny;
+type LoadJoinElementFn = builtin(Context, JSReceiver, uintptr) => JSAny;
- // Fast C call to write a fixed array (see Buffer.fixedArray) to a single
- // string.
- extern macro
- ArrayBuiltinsAssembler::CallJSArrayArrayJoinConcatToSequentialString(
- FixedArray, intptr, String, String): String;
+// Fast C call to write a fixed array (see Buffer.fixedArray) to a single
+// string.
+extern macro
+ArrayBuiltinsAssembler::CallJSArrayArrayJoinConcatToSequentialString(
+ FixedArray, intptr, String, String): String;
- transitioning builtin LoadJoinElement<T : type extends ElementsKind>(
- context: Context, receiver: JSReceiver, k: uintptr): JSAny {
+transitioning builtin LoadJoinElement<T : type extends ElementsKind>(
+ context: Context, receiver: JSReceiver, k: uintptr): JSAny {
+ return GetProperty(receiver, Convert<Number>(k));
+}
+
+transitioning LoadJoinElement<array::DictionaryElements>(
+ context: Context, receiver: JSReceiver, k: uintptr): JSAny {
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const dict: NumberDictionary = UnsafeCast<NumberDictionary>(array.elements);
+ try {
+ return BasicLoadNumberDictionaryElement(dict, Signed(k))
+ otherwise IfNoData, IfHole;
+ } label IfNoData deferred {
return GetProperty(receiver, Convert<Number>(k));
+ } label IfHole {
+ return kEmptyString;
}
+}
- transitioning LoadJoinElement<array::DictionaryElements>(
- context: Context, receiver: JSReceiver, k: uintptr): JSAny {
- const array: JSArray = UnsafeCast<JSArray>(receiver);
- const dict: NumberDictionary = UnsafeCast<NumberDictionary>(array.elements);
- try {
- return BasicLoadNumberDictionaryElement(dict, Signed(k))
- otherwise IfNoData, IfHole;
- }
- label IfNoData deferred {
- return GetProperty(receiver, Convert<Number>(k));
- }
- label IfHole {
- return kEmptyString;
+LoadJoinElement<array::FastSmiOrObjectElements>(
+ context: Context, receiver: JSReceiver, k: uintptr): JSAny {
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
+ const element: Object = fixedArray.objects[k];
+ return element == TheHole ? kEmptyString : UnsafeCast<JSAny>(element);
+}
+
+LoadJoinElement<array::FastDoubleElements>(
+ context: Context, receiver: JSReceiver, k: uintptr): JSAny {
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ const fixedDoubleArray: FixedDoubleArray =
+ UnsafeCast<FixedDoubleArray>(array.elements);
+ const element: float64 =
+ fixedDoubleArray.floats[k].Value() otherwise return kEmptyString;
+ return AllocateHeapNumberWithValue(element);
+}
+
+builtin LoadJoinTypedElement<T : type extends ElementsKind>(
+ context: Context, receiver: JSReceiver, k: uintptr): JSAny {
+ const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
+ assert(!IsDetachedBuffer(typedArray.buffer));
+ return typed_array::LoadFixedTypedArrayElementAsTagged(
+ typedArray.data_ptr, k, typed_array::KindForArrayType<T>());
+}
+
+transitioning builtin ConvertToLocaleString(
+ context: Context, element: JSAny, locales: JSAny, options: JSAny): String {
+ if (IsNullOrUndefined(element)) return kEmptyString;
+
+ const prop: JSAny = GetProperty(element, 'toLocaleString');
+ try {
+ const callable: Callable = Cast<Callable>(prop) otherwise TypeError;
+ let result: JSAny;
+ if (IsNullOrUndefined(locales)) {
+ result = Call(context, callable, element);
+ } else if (IsNullOrUndefined(options)) {
+ result = Call(context, callable, element, locales);
+ } else {
+ result = Call(context, callable, element, locales, options);
}
+ return ToString_Inline(result);
+ } label TypeError {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, prop);
}
+}
- LoadJoinElement<array::FastSmiOrObjectElements>(
- context: Context, receiver: JSReceiver, k: uintptr): JSAny {
- const array: JSArray = UnsafeCast<JSArray>(receiver);
- const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
- const element: Object = fixedArray.objects[k];
- return element == TheHole ? kEmptyString : UnsafeCast<JSAny>(element);
- }
+// Verifies the current element JSArray accessor can still be safely used
+// (see LoadJoinElement<ElementsAccessor>).
+macro CannotUseSameArrayAccessor<T: type>(implicit context: Context)(
+ loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
+ originalLen: Number): bool;
+
+CannotUseSameArrayAccessor<JSArray>(implicit context: Context)(
+ loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
+ originalLen: Number): bool {
+ if (loadFn == LoadJoinElement<array::GenericElementsAccessor>) return false;
+
+ const array: JSArray = UnsafeCast<JSArray>(receiver);
+ if (originalMap != array.map) return true;
+ if (originalLen != array.length) return true;
+ if (IsNoElementsProtectorCellInvalid()) return true;
+ return false;
+}
- LoadJoinElement<array::FastDoubleElements>(
- context: Context, receiver: JSReceiver, k: uintptr): JSAny {
- const array: JSArray = UnsafeCast<JSArray>(receiver);
- const fixedDoubleArray: FixedDoubleArray =
- UnsafeCast<FixedDoubleArray>(array.elements);
- const element: float64 =
- fixedDoubleArray.floats[k].Value() otherwise return kEmptyString;
- return AllocateHeapNumberWithValue(element);
- }
+CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
+ _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map,
+ _initialLen: Number): bool {
+ const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
+ return IsDetachedBuffer(typedArray.buffer);
+}
- builtin LoadJoinTypedElement<T : type extends ElementsKind>(
- context: Context, receiver: JSReceiver, k: uintptr): JSAny {
- const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
- assert(!IsDetachedBuffer(typedArray.buffer));
- return typed_array::LoadFixedTypedArrayElementAsTagged(
- typedArray.data_ptr, k, typed_array::KindForArrayType<T>());
+// Calculates the running total length of the resulting string. If the
+// calculated length exceeds the maximum string length (see
+// String::kMaxLength), throws a range error.
+macro AddStringLength(implicit context: Context)(
+ lenA: intptr, lenB: intptr): intptr {
+ try {
+ const length: intptr = TryIntPtrAdd(lenA, lenB) otherwise IfOverflow;
+ if (length > kStringMaxLength) goto IfOverflow;
+ return length;
+ } label IfOverflow deferred {
+ ThrowInvalidStringLength(context);
}
+}
- transitioning builtin ConvertToLocaleString(
- context: Context, element: JSAny, locales: JSAny,
- options: JSAny): String {
- if (IsNullOrUndefined(element)) return kEmptyString;
-
- const prop: JSAny = GetProperty(element, 'toLocaleString');
- try {
- const callable: Callable = Cast<Callable>(prop) otherwise TypeError;
- let result: JSAny;
- if (IsNullOrUndefined(locales)) {
- result = Call(context, callable, element);
- } else if (IsNullOrUndefined(options)) {
- result = Call(context, callable, element, locales);
- } else {
- result = Call(context, callable, element, locales, options);
- }
- return ToString_Inline(result);
- }
- label TypeError {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, prop);
+// Stores an element to a fixed array and return the fixed array. If the fixed
+// array is not large enough, create and return a new, larger fixed array that
+// contains all previously elements and the new element.
+macro StoreAndGrowFixedArray<T: type>(
+ fixedArray: FixedArray, index: intptr, element: T): FixedArray {
+ const length: intptr = fixedArray.length_intptr;
+ assert(index <= length);
+ if (index < length) {
+ fixedArray.objects[index] = element;
+ return fixedArray;
+ } else
+ deferred {
+ const newLength: intptr = CalculateNewElementsCapacity(length);
+ assert(index < newLength);
+ const newfixedArray: FixedArray =
+ ExtractFixedArray(fixedArray, 0, length, newLength);
+ newfixedArray.objects[index] = element;
+ return newfixedArray;
}
- }
+}
- // Verifies the current element JSArray accessor can still be safely used
- // (see LoadJoinElement<ElementsAccessor>).
- macro CannotUseSameArrayAccessor<T: type>(implicit context: Context)(
- loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
- originalLen: Number): bool;
-
- CannotUseSameArrayAccessor<JSArray>(implicit context: Context)(
- loadFn: LoadJoinElementFn, receiver: JSReceiver, originalMap: Map,
- originalLen: Number): bool {
- if (loadFn == LoadJoinElement<array::GenericElementsAccessor>) return false;
-
- const array: JSArray = UnsafeCast<JSArray>(receiver);
- if (originalMap != array.map) return true;
- if (originalLen != array.length) return true;
- if (IsNoElementsProtectorCellInvalid()) return true;
- return false;
+// Contains the information necessary to create a single, separator delimited,
+// flattened one or two byte string.
+// The buffer is maintained and updated by Buffer.constructor, Buffer.Add(),
+// Buffer.AddSeparators().
+struct Buffer {
+ macro Add(implicit context: Context)(
+ str: String, nofSeparators: intptr, separatorLength: intptr) {
+ // Add separators if necessary (at the beginning or more than one)
+ const writeSeparators: bool = this.index == 0 | nofSeparators > 1;
+ this.AddSeparators(nofSeparators, separatorLength, writeSeparators);
+
+ this.totalStringLength =
+ AddStringLength(this.totalStringLength, str.length_intptr);
+ this.fixedArray =
+ StoreAndGrowFixedArray(this.fixedArray, this.index++, str);
+ this.isOneByte =
+ IsOneByteStringInstanceType(str.instanceType) & this.isOneByte;
}
- CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
- _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map,
- _initialLen: Number): bool {
- const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
- return IsDetachedBuffer(typedArray.buffer);
- }
+ macro AddSeparators(implicit context: Context)(
+ nofSeparators: intptr, separatorLength: intptr, write: bool) {
+ if (nofSeparators == 0 || separatorLength == 0) return;
- // Calculates the running total length of the resulting string. If the
- // calculated length exceeds the maximum string length (see
- // String::kMaxLength), throws a range error.
- macro AddStringLength(implicit context: Context)(lenA: intptr, lenB: intptr):
- intptr {
- try {
- const length: intptr = TryIntPtrAdd(lenA, lenB) otherwise IfOverflow;
- if (length > kStringMaxLength) goto IfOverflow;
- return length;
- }
- label IfOverflow deferred {
- ThrowInvalidStringLength(context);
- }
- }
+ const nofSeparatorsInt: intptr = nofSeparators;
+ const sepsLen: intptr = separatorLength * nofSeparatorsInt;
+ // Detect integer overflow
+ // TODO(tebbi): Replace with overflow-checked multiplication.
+ if (sepsLen / separatorLength != nofSeparatorsInt) deferred {
+ ThrowInvalidStringLength(context);
+ }
- // Stores an element to a fixed array and return the fixed array. If the fixed
- // array is not large enough, create and return a new, larger fixed array that
- // contains all previously elements and the new element.
- macro StoreAndGrowFixedArray<T: type>(
- fixedArray: FixedArray, index: intptr, element: T): FixedArray {
- const length: intptr = fixedArray.length_intptr;
- assert(index <= length);
- if (index < length) {
- fixedArray.objects[index] = element;
- return fixedArray;
- } else
- deferred {
- const newLength: intptr = CalculateNewElementsCapacity(length);
- assert(index < newLength);
- const newfixedArray: FixedArray =
- ExtractFixedArray(fixedArray, 0, length, newLength);
- newfixedArray.objects[index] = element;
- return newfixedArray;
+ this.totalStringLength = AddStringLength(this.totalStringLength, sepsLen);
+ if (write) deferred {
+ this.fixedArray = StoreAndGrowFixedArray(
+ this.fixedArray, this.index++, Convert<Smi>(nofSeparatorsInt));
}
}
- // Contains the information necessary to create a single, separator delimited,
- // flattened one or two byte string.
- // The buffer is maintained and updated by Buffer.constructor, Buffer.Add(),
- // Buffer.AddSeparators().
- struct Buffer {
- macro Add(implicit context: Context)(
- str: String, nofSeparators: intptr, separatorLength: intptr) {
- // Add separators if necessary (at the beginning or more than one)
- const writeSeparators: bool = this.index == 0 | nofSeparators > 1;
- this.AddSeparators(nofSeparators, separatorLength, writeSeparators);
-
- this.totalStringLength =
- AddStringLength(this.totalStringLength, str.length_intptr);
- this.fixedArray =
- StoreAndGrowFixedArray(this.fixedArray, this.index++, str);
- this.isOneByte =
- IsOneByteStringInstanceType(str.instanceType) & this.isOneByte;
- }
+ // Fixed array holding elements that are either:
+ // 1) String result of `ToString(next)`.
+ // 2) Smi representing the number of consecutive separators.
+ // `BufferJoin()` will iterate and writes these entries to a flat string.
+ //
+ // To save space, reduce reads and writes, only separators at the beginning,
+ // end, or more than one are written.
+ //
+ // No hole example
+ // receiver: ['hello', 'world']
+ // fixedArray: ['hello', 'world']
+ //
+ // Hole example
+ // receiver: [<hole>, 'hello', <hole>, 'world', <hole>]
+ // fixedArray: [1, 'hello', 2, 'world', 1]
+ fixedArray: FixedArray;
+
+ // Index to insert a new entry into `fixedArray`.
+ index: intptr;
+
+ // Running total of the resulting string length.
+ totalStringLength: intptr;
+
+ // `true` if the separator and all strings in the buffer are one-byte,
+ // otherwise `false`.
+ isOneByte: bool;
+}
- macro AddSeparators(implicit context: Context)(
- nofSeparators: intptr, separatorLength: intptr, write: bool) {
- if (nofSeparators == 0 || separatorLength == 0) return;
+macro NewBuffer(len: uintptr, sep: String): Buffer {
+ const cappedBufferSize: intptr = len > kMaxNewSpaceFixedArrayElements ?
+ kMaxNewSpaceFixedArrayElements :
+ Signed(len);
+ assert(cappedBufferSize > 0);
+ return Buffer{
+ fixedArray: AllocateZeroedFixedArray(cappedBufferSize),
+ index: 0,
+ totalStringLength: 0,
+ isOneByte: IsOneByteStringInstanceType(sep.instanceType)
+ };
+}
- const nofSeparatorsInt: intptr = nofSeparators;
- const sepsLen: intptr = separatorLength * nofSeparatorsInt;
- // Detect integer overflow
- // TODO(tebbi): Replace with overflow-checked multiplication.
- if (sepsLen / separatorLength != nofSeparatorsInt) deferred {
- ThrowInvalidStringLength(context);
- }
+macro BufferJoin(implicit context: Context)(
+ buffer: Buffer, sep: String): String {
+ assert(IsValidPositiveSmi(buffer.totalStringLength));
+ if (buffer.totalStringLength == 0) return kEmptyString;
+
+ // Fast path when there's only one buffer element.
+ if (buffer.index == 1) {
+ const fixedArray: FixedArray = buffer.fixedArray;
+ typeswitch (fixedArray.objects[0]) {
+ // When the element is a string, just return it and completely avoid
+ // allocating another string.
+ case (str: String): {
+ return str;
+ }
- this.totalStringLength = AddStringLength(this.totalStringLength, sepsLen);
- if (write) deferred {
- this.fixedArray = StoreAndGrowFixedArray(
- this.fixedArray, this.index++, Convert<Smi>(nofSeparatorsInt));
- }
+ // When the element is a smi, use StringRepeat to quickly build a memory
+ // efficient separator repeated string.
+ case (nofSeparators: Number): {
+ return StringRepeat(context, sep, nofSeparators);
+ }
+ case (Object): {
+ unreachable;
+ }
}
-
- // Fixed array holding elements that are either:
- // 1) String result of `ToString(next)`.
- // 2) Smi representing the number of consecutive separators.
- // `BufferJoin()` will iterate and writes these entries to a flat string.
- //
- // To save space, reduce reads and writes, only separators at the beginning,
- // end, or more than one are written.
- //
- // No hole example
- // receiver: ['hello', 'world']
- // fixedArray: ['hello', 'world']
- //
- // Hole example
- // receiver: [<hole>, 'hello', <hole>, 'world', <hole>]
- // fixedArray: [1, 'hello', 2, 'world', 1]
- fixedArray: FixedArray;
-
- // Index to insert a new entry into `fixedArray`.
- index: intptr;
-
- // Running total of the resulting string length.
- totalStringLength: intptr;
-
- // `true` if the separator and all strings in the buffer are one-byte,
- // otherwise `false`.
- isOneByte: bool;
}
- macro NewBuffer(len: uintptr, sep: String): Buffer {
- const cappedBufferSize: intptr = len > kMaxNewSpaceFixedArrayElements ?
- kMaxNewSpaceFixedArrayElements :
- Signed(len);
- assert(cappedBufferSize > 0);
- return Buffer{
- fixedArray: AllocateZeroedFixedArray(cappedBufferSize),
- index: 0,
- totalStringLength: 0,
- isOneByte: IsOneByteStringInstanceType(sep.instanceType)
- };
- }
-
- macro BufferJoin(implicit context: Context)(buffer: Buffer, sep: String):
- String {
- assert(IsValidPositiveSmi(buffer.totalStringLength));
- if (buffer.totalStringLength == 0) return kEmptyString;
-
- // Fast path when there's only one buffer element.
- if (buffer.index == 1) {
- const fixedArray: FixedArray = buffer.fixedArray;
- typeswitch (fixedArray.objects[0]) {
- // When the element is a string, just return it and completely avoid
- // allocating another string.
- case (str: String): {
- return str;
- }
+ const length: uint32 = Convert<uint32>(Unsigned(buffer.totalStringLength));
+ const r: String = buffer.isOneByte ? AllocateSeqOneByteString(length) :
+ AllocateSeqTwoByteString(length);
+ return CallJSArrayArrayJoinConcatToSequentialString(
+ buffer.fixedArray, buffer.index, sep, r);
+}
- // When the element is a smi, use StringRepeat to quickly build a memory
- // efficient separator repeated string.
- case (nofSeparators: Number): {
- return StringRepeat(context, sep, nofSeparators);
- }
- case (Object): {
- unreachable;
- }
+transitioning macro ArrayJoinImpl<T: type>(implicit context: Context)(
+ receiver: JSReceiver, sep: String, lengthNumber: Number,
+ useToLocaleString: constexpr bool, locales: JSAny, options: JSAny,
+ initialLoadFn: LoadJoinElementFn): String {
+ const initialMap: Map = receiver.map;
+ const len: uintptr = Convert<uintptr>(lengthNumber);
+ const separatorLength: intptr = sep.length_intptr;
+ let nofSeparators: intptr = 0;
+ let loadFn: LoadJoinElementFn = initialLoadFn;
+ let buffer: Buffer = NewBuffer(len, sep);
+
+ // 6. Let k be 0.
+ let k: uintptr = 0;
+
+ // 7. Repeat, while k < len
+ while (k < len) {
+ if (CannotUseSameArrayAccessor<T>(
+ loadFn, receiver, initialMap, lengthNumber))
+ deferred {
+ loadFn = LoadJoinElement<array::GenericElementsAccessor>;
}
+
+ if (k > 0) {
+ // a. If k > 0, let R be the string-concatenation of R and sep.
+ nofSeparators = nofSeparators + 1;
}
- const length: uint32 = Convert<uint32>(Unsigned(buffer.totalStringLength));
- const r: String = buffer.isOneByte ? AllocateSeqOneByteString(length) :
- AllocateSeqTwoByteString(length);
- return CallJSArrayArrayJoinConcatToSequentialString(
- buffer.fixedArray, buffer.index, sep, r);
- }
+ // b. Let element be ? Get(O, ! ToString(k)).
+ const element: JSAny = loadFn(context, receiver, k++);
- transitioning macro ArrayJoinImpl<T: type>(implicit context: Context)(
- receiver: JSReceiver, sep: String, lengthNumber: Number,
- useToLocaleString: constexpr bool, locales: JSAny, options: JSAny,
- initialLoadFn: LoadJoinElementFn): String {
- const initialMap: Map = receiver.map;
- const len: uintptr = Convert<uintptr>(lengthNumber);
- const separatorLength: intptr = sep.length_intptr;
- let nofSeparators: intptr = 0;
- let loadFn: LoadJoinElementFn = initialLoadFn;
- let buffer: Buffer = NewBuffer(len, sep);
-
- // 6. Let k be 0.
- let k: uintptr = 0;
-
- // 7. Repeat, while k < len
- while (k < len) {
- if (CannotUseSameArrayAccessor<T>(
- loadFn, receiver, initialMap, lengthNumber))
- deferred {
- loadFn = LoadJoinElement<array::GenericElementsAccessor>;
+ // c. If element is undefined or null, let next be the empty String;
+ // otherwise, let next be ? ToString(element).
+ let next: String;
+ if constexpr (useToLocaleString) {
+ next = ConvertToLocaleString(context, element, locales, options);
+ if (next == kEmptyString) continue;
+ } else {
+ typeswitch (element) {
+ case (str: String): {
+ if (str == kEmptyString) continue;
+ next = str;
}
-
- if (k > 0) {
- // a. If k > 0, let R be the string-concatenation of R and sep.
- nofSeparators = nofSeparators + 1;
- }
-
- // b. Let element be ? Get(O, ! ToString(k)).
- const element: JSAny = loadFn(context, receiver, k++);
-
- // c. If element is undefined or null, let next be the empty String;
- // otherwise, let next be ? ToString(element).
- let next: String;
- if constexpr (useToLocaleString) {
- next = ConvertToLocaleString(context, element, locales, options);
- if (next == kEmptyString) continue;
- } else {
- typeswitch (element) {
- case (str: String): {
- if (str == kEmptyString) continue;
- next = str;
- }
- case (num: Number): {
- next = NumberToString(num);
- }
- case (obj: JSAny): {
- if (IsNullOrUndefined(obj)) continue;
- next = ToString(context, obj);
- }
+ case (num: Number): {
+ next = NumberToString(num);
+ }
+ case (obj: JSAny): {
+ if (IsNullOrUndefined(obj)) continue;
+ next = ToString(context, obj);
}
}
-
- // d. Set R to the string-concatenation of R and next.
- buffer.Add(next, nofSeparators, separatorLength);
- nofSeparators = 0;
}
- // Add any separators at the end.
- buffer.AddSeparators(nofSeparators, separatorLength, true);
-
- // 8. Return R.
- return BufferJoin(buffer, sep);
+ // d. Set R to the string-concatenation of R and next.
+ buffer.Add(next, nofSeparators, separatorLength);
+ nofSeparators = 0;
}
- transitioning macro ArrayJoin<T: type>(implicit context: Context)(
- useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
- lenNumber: Number, locales: JSAny, options: JSAny): JSAny;
+ // Add any separators at the end.
+ buffer.AddSeparators(nofSeparators, separatorLength, true);
- transitioning ArrayJoin<JSArray>(implicit context: Context)(
- useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
- lenNumber: Number, locales: JSAny, options: JSAny): JSAny {
- const map: Map = receiver.map;
- const kind: ElementsKind = map.elements_kind;
- let loadFn: LoadJoinElementFn;
+ // 8. Return R.
+ return BufferJoin(buffer, sep);
+}
- try {
- const array: JSArray = Cast<JSArray>(receiver) otherwise IfSlowPath;
- if (array.length != lenNumber) goto IfSlowPath;
- if (!IsPrototypeInitialArrayPrototype(map)) goto IfSlowPath;
- if (IsNoElementsProtectorCellInvalid()) goto IfSlowPath;
-
- if (IsElementsKindLessThanOrEqual(kind, ElementsKind::HOLEY_ELEMENTS)) {
- loadFn = LoadJoinElement<array::FastSmiOrObjectElements>;
- } else if (IsElementsKindLessThanOrEqual(
- kind, ElementsKind::HOLEY_DOUBLE_ELEMENTS)) {
- loadFn = LoadJoinElement<array::FastDoubleElements>;
- } else if (kind == ElementsKind::DICTIONARY_ELEMENTS)
- deferred {
- const dict: NumberDictionary =
- UnsafeCast<NumberDictionary>(array.elements);
- const nofElements: Smi = GetNumberDictionaryNumberOfElements(dict);
- if (nofElements == 0) {
- if (sep == kEmptyString) return kEmptyString;
- try {
- const nofSeparators: Smi =
- Cast<Smi>(lenNumber - 1) otherwise IfNotSmi;
- return StringRepeat(context, sep, nofSeparators);
- }
- label IfNotSmi {
- ThrowInvalidStringLength(context);
- }
- } else {
- loadFn = LoadJoinElement<array::DictionaryElements>;
+transitioning macro ArrayJoin<T: type>(implicit context: Context)(
+ useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
+ lenNumber: Number, locales: JSAny, options: JSAny): JSAny;
+
+transitioning ArrayJoin<JSArray>(implicit context: Context)(
+ useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
+ lenNumber: Number, locales: JSAny, options: JSAny): JSAny {
+ const map: Map = receiver.map;
+ const kind: ElementsKind = map.elements_kind;
+ let loadFn: LoadJoinElementFn;
+
+ try {
+ const array: JSArray = Cast<JSArray>(receiver) otherwise IfSlowPath;
+ if (array.length != lenNumber) goto IfSlowPath;
+ if (!IsPrototypeInitialArrayPrototype(map)) goto IfSlowPath;
+ if (IsNoElementsProtectorCellInvalid()) goto IfSlowPath;
+
+ if (IsElementsKindLessThanOrEqual(kind, ElementsKind::HOLEY_ELEMENTS)) {
+ loadFn = LoadJoinElement<array::FastSmiOrObjectElements>;
+ } else if (IsElementsKindLessThanOrEqual(
+ kind, ElementsKind::HOLEY_DOUBLE_ELEMENTS)) {
+ loadFn = LoadJoinElement<array::FastDoubleElements>;
+ } else if (kind == ElementsKind::DICTIONARY_ELEMENTS)
+ deferred {
+ const dict: NumberDictionary =
+ UnsafeCast<NumberDictionary>(array.elements);
+ const nofElements: Smi = GetNumberDictionaryNumberOfElements(dict);
+ if (nofElements == 0) {
+ if (sep == kEmptyString) return kEmptyString;
+ try {
+ const nofSeparators: Smi =
+ Cast<Smi>(lenNumber - 1) otherwise IfNotSmi;
+ return StringRepeat(context, sep, nofSeparators);
+ } label IfNotSmi {
+ ThrowInvalidStringLength(context);
}
+ } else {
+ loadFn = LoadJoinElement<array::DictionaryElements>;
}
- else {
- goto IfSlowPath;
}
+ else {
+ goto IfSlowPath;
}
- label IfSlowPath {
- loadFn = LoadJoinElement<array::GenericElementsAccessor>;
- }
- return ArrayJoinImpl<JSArray>(
- receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
+ } label IfSlowPath {
+ loadFn = LoadJoinElement<array::GenericElementsAccessor>;
}
+ return ArrayJoinImpl<JSArray>(
+ receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
+}
- transitioning ArrayJoin<JSTypedArray>(implicit context: Context)(
- useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
- lenNumber: Number, locales: JSAny, options: JSAny): JSAny {
- const map: Map = receiver.map;
- const kind: ElementsKind = map.elements_kind;
- let loadFn: LoadJoinElementFn;
-
- if (IsElementsKindGreaterThan(kind, ElementsKind::UINT32_ELEMENTS)) {
- if (kind == ElementsKind::INT32_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Int32Elements>;
- } else if (kind == ElementsKind::FLOAT32_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Float32Elements>;
- } else if (kind == ElementsKind::FLOAT64_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Float64Elements>;
- } else if (kind == ElementsKind::UINT8_CLAMPED_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Uint8ClampedElements>;
- } else if (kind == ElementsKind::BIGUINT64_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::BigUint64Elements>;
- } else if (kind == ElementsKind::BIGINT64_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::BigInt64Elements>;
- } else {
- unreachable;
- }
+transitioning ArrayJoin<JSTypedArray>(implicit context: Context)(
+ useToLocaleString: constexpr bool, receiver: JSReceiver, sep: String,
+ lenNumber: Number, locales: JSAny, options: JSAny): JSAny {
+ const map: Map = receiver.map;
+ const kind: ElementsKind = map.elements_kind;
+ let loadFn: LoadJoinElementFn;
+
+ if (IsElementsKindGreaterThan(kind, ElementsKind::UINT32_ELEMENTS)) {
+ if (kind == ElementsKind::INT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Int32Elements>;
+ } else if (kind == ElementsKind::FLOAT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Float32Elements>;
+ } else if (kind == ElementsKind::FLOAT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Float64Elements>;
+ } else if (kind == ElementsKind::UINT8_CLAMPED_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint8ClampedElements>;
+ } else if (kind == ElementsKind::BIGUINT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::BigUint64Elements>;
+ } else if (kind == ElementsKind::BIGINT64_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::BigInt64Elements>;
} else {
- if (kind == ElementsKind::UINT8_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Uint8Elements>;
- } else if (kind == ElementsKind::INT8_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Int8Elements>;
- } else if (kind == ElementsKind::UINT16_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Uint16Elements>;
- } else if (kind == ElementsKind::INT16_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Int16Elements>;
- } else if (kind == ElementsKind::UINT32_ELEMENTS) {
- loadFn = LoadJoinTypedElement<typed_array::Uint32Elements>;
- } else {
- unreachable;
- }
+ unreachable;
+ }
+ } else {
+ if (kind == ElementsKind::UINT8_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint8Elements>;
+ } else if (kind == ElementsKind::INT8_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Int8Elements>;
+ } else if (kind == ElementsKind::UINT16_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint16Elements>;
+ } else if (kind == ElementsKind::INT16_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Int16Elements>;
+ } else if (kind == ElementsKind::UINT32_ELEMENTS) {
+ loadFn = LoadJoinTypedElement<typed_array::Uint32Elements>;
+ } else {
+ unreachable;
}
- return ArrayJoinImpl<JSTypedArray>(
- receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
- }
-
- // The Join Stack detects cyclical calls to Array Join builtins
- // (Array.p.join(), Array.p.toString(), Array.p.toLocaleString()). This
- // FixedArray holds a stack of receivers to the current call.
- // CycleProtectedArrayJoin() is responsible for calling JoinStackPush and
- // JoinStackPop when visiting and leaving a receiver, respectively.
- const kMinJoinStackSize:
- constexpr int31 generates 'JSArray::kMinJoinStackSize';
- macro LoadJoinStack(implicit context: Context)(): FixedArray
- labels IfUninitialized {
- const nativeContext: NativeContext = LoadNativeContext(context);
- const stack: HeapObject = UnsafeCast<HeapObject>(
- nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX]);
- if (stack == Undefined) goto IfUninitialized;
- assert(IsFixedArray(stack));
- return UnsafeCast<FixedArray>(stack);
}
+ return ArrayJoinImpl<JSTypedArray>(
+ receiver, sep, lenNumber, useToLocaleString, locales, options, loadFn);
+}
- macro SetJoinStack(implicit context: Context)(stack: FixedArray): void {
- const nativeContext: NativeContext = LoadNativeContext(context);
- nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX] = stack;
- }
+// The Join Stack detects cyclical calls to Array Join builtins
+// (Array.p.join(), Array.p.toString(), Array.p.toLocaleString()). This
+// FixedArray holds a stack of receivers to the current call.
+// CycleProtectedArrayJoin() is responsible for calling JoinStackPush and
+// JoinStackPop when visiting and leaving a receiver, respectively.
+const kMinJoinStackSize:
+ constexpr int31 generates 'JSArray::kMinJoinStackSize';
+macro LoadJoinStack(implicit context: Context)(): FixedArray
+ labels IfUninitialized {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ const stack: HeapObject = UnsafeCast<HeapObject>(
+ nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX]);
+ if (stack == Undefined) goto IfUninitialized;
+ assert(IsFixedArray(stack));
+ return UnsafeCast<FixedArray>(stack);
+}
- // Adds a receiver to the stack. The FixedArray will automatically grow to
- // accommodate the receiver. If the receiver already exists on the stack,
- // this indicates a cyclical call and False is returned.
- builtin JoinStackPush(implicit context: Context)(
- stack: FixedArray, receiver: JSReceiver): Boolean {
- const capacity: intptr = stack.length_intptr;
- for (let i: intptr = 0; i < capacity; i++) {
- const previouslyVisited: Object = stack.objects[i];
-
- // Add `receiver` to the first open slot
- if (previouslyVisited == TheHole) {
- stack.objects[i] = receiver;
- return True;
- }
+macro SetJoinStack(implicit context: Context)(stack: FixedArray): void {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX] = stack;
+}
- // Detect cycles
- if (receiver == previouslyVisited) return False;
+// Adds a receiver to the stack. The FixedArray will automatically grow to
+// accommodate the receiver. If the receiver already exists on the stack,
+// this indicates a cyclical call and False is returned.
+builtin JoinStackPush(implicit context: Context)(
+ stack: FixedArray, receiver: JSReceiver): Boolean {
+ const capacity: intptr = stack.length_intptr;
+ for (let i: intptr = 0; i < capacity; i++) {
+ const previouslyVisited: Object = stack.objects[i];
+
+ // Add `receiver` to the first open slot
+ if (previouslyVisited == TheHole) {
+ stack.objects[i] = receiver;
+ return True;
}
- // If no open slots were found, grow the stack and add receiver to the end.
- const newStack: FixedArray =
- StoreAndGrowFixedArray(stack, capacity, receiver);
- SetJoinStack(newStack);
- return True;
+ // Detect cycles
+ if (receiver == previouslyVisited) return False;
}
- // Fast path the common non-nested calls. If the receiver is not already on
- // the stack, add it to the stack and return true. Otherwise return false.
- macro JoinStackPushInline(implicit context: Context)(receiver: JSReceiver):
- bool {
- try {
- const stack: FixedArray = LoadJoinStack()
- otherwise IfUninitialized;
- if (stack.objects[0] == TheHole) {
- stack.objects[0] = receiver;
- } else if (JoinStackPush(stack, receiver) == False)
- deferred {
- return false;
- }
- }
- label IfUninitialized {
- const stack: FixedArray =
- AllocateFixedArrayWithHoles(kMinJoinStackSize, AllocationFlag::kNone);
+ // If no open slots were found, grow the stack and add receiver to the end.
+ const newStack: FixedArray =
+ StoreAndGrowFixedArray(stack, capacity, receiver);
+ SetJoinStack(newStack);
+ return True;
+}
+
+// Fast path the common non-nested calls. If the receiver is not already on
+// the stack, add it to the stack and return true. Otherwise return false.
+macro JoinStackPushInline(implicit context: Context)(receiver: JSReceiver):
+ bool {
+ try {
+ const stack: FixedArray = LoadJoinStack()
+ otherwise IfUninitialized;
+ if (stack.objects[0] == TheHole) {
stack.objects[0] = receiver;
- SetJoinStack(stack);
- }
- return true;
+ } else if (JoinStackPush(stack, receiver) == False)
+ deferred {
+ return false;
+ }
+ } label IfUninitialized {
+ const stack: FixedArray =
+ AllocateFixedArrayWithHoles(kMinJoinStackSize, AllocationFlag::kNone);
+ stack.objects[0] = receiver;
+ SetJoinStack(stack);
}
+ return true;
+}
- // Removes a receiver from the stack. The FixedArray will automatically shrink
- // to Heap::kMinJoinStackSize once the stack becomes empty.
- builtin JoinStackPop(implicit context: Context)(
- stack: FixedArray, receiver: JSReceiver): JSAny {
- const len: intptr = stack.length_intptr;
- for (let i: intptr = 0; i < len; i++) {
- if (stack.objects[i] == receiver) {
- // Shrink the Join Stack if the stack will be empty and is larger than
- // the minimum size.
- if (i == 0 && len > kMinJoinStackSize) deferred {
- const newStack: FixedArray = AllocateFixedArrayWithHoles(
- kMinJoinStackSize, AllocationFlag::kNone);
- SetJoinStack(newStack);
- }
- else {
- stack.objects[i] = TheHole;
+// Removes a receiver from the stack. The FixedArray will automatically shrink
+// to Heap::kMinJoinStackSize once the stack becomes empty.
+builtin JoinStackPop(implicit context: Context)(
+ stack: FixedArray, receiver: JSReceiver): JSAny {
+ const len: intptr = stack.length_intptr;
+ for (let i: intptr = 0; i < len; i++) {
+ if (stack.objects[i] == receiver) {
+ // Shrink the Join Stack if the stack will be empty and is larger than
+ // the minimum size.
+ if (i == 0 && len > kMinJoinStackSize) deferred {
+ const newStack: FixedArray = AllocateFixedArrayWithHoles(
+ kMinJoinStackSize, AllocationFlag::kNone);
+ SetJoinStack(newStack);
}
- return Undefined;
+ else {
+ stack.objects[i] = TheHole;
}
+ return Undefined;
}
- unreachable;
}
+ unreachable;
+}
- // Fast path the common non-nested calls.
- macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver) {
- const stack: FixedArray = LoadJoinStack()
- otherwise unreachable;
- const len: intptr = stack.length_intptr;
-
- // Builtin call was not nested (receiver is the first entry) and
- // did not contain other nested arrays that expanded the stack.
- if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
- StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER);
- } else
- deferred {
- JoinStackPop(stack, receiver);
- }
- }
+// Fast path the common non-nested calls.
+macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver) {
+ const stack: FixedArray = LoadJoinStack()
+ otherwise unreachable;
+ const len: intptr = stack.length_intptr;
+
+ // Builtin call was not nested (receiver is the first entry) and
+ // did not contain other nested arrays that expanded the stack.
+ if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
+ StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER);
+ } else
+ deferred {
+ JoinStackPop(stack, receiver);
+ }
+}
- // Main entry point for all builtins using Array Join functionality.
- transitioning macro CycleProtectedArrayJoin<T: type>(implicit context:
- Context)(
- useToLocaleString: constexpr bool, o: JSReceiver, len: Number,
- sepObj: JSAny, locales: JSAny, options: JSAny): JSAny {
- // 3. If separator is undefined, let sep be the single-element String ",".
- // 4. Else, let sep be ? ToString(separator).
- const sep: String = sepObj == Undefined ? ',' : ToString_Inline(sepObj);
-
- // If the receiver is not empty and not already being joined, continue with
- // the normal join algorithm.
- if (len > 0 && JoinStackPushInline(o)) {
- try {
- const result: JSAny =
- ArrayJoin<T>(useToLocaleString, o, sep, len, locales, options);
- JoinStackPopInline(o);
- return result;
- } catch (e) deferred {
- JoinStackPopInline(o);
- ReThrow(context, e);
- }
- } else {
- return kEmptyString;
+// Main entry point for all builtins using Array Join functionality.
+transitioning macro CycleProtectedArrayJoin<T: type>(
+ implicit context: Context)(
+ useToLocaleString: constexpr bool, o: JSReceiver, len: Number,
+ sepObj: JSAny, locales: JSAny, options: JSAny): JSAny {
+ // 3. If separator is undefined, let sep be the single-element String ",".
+ // 4. Else, let sep be ? ToString(separator).
+ const sep: String = sepObj == Undefined ? ',' : ToString_Inline(sepObj);
+
+ // If the receiver is not empty and not already being joined, continue with
+ // the normal join algorithm.
+ if (len > 0 && JoinStackPushInline(o)) {
+ try {
+ const result: JSAny =
+ ArrayJoin<T>(useToLocaleString, o, sep, len, locales, options);
+ JoinStackPopInline(o);
+ return result;
+ } catch (e) deferred {
+ JoinStackPopInline(o);
+ ReThrow(context, e);
}
+ } else {
+ return kEmptyString;
}
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.join
- transitioning javascript builtin
- ArrayPrototypeJoin(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- const separator: JSAny = arguments[0];
+// https://tc39.github.io/ecma262/#sec-array.prototype.join
+transitioning javascript builtin
+ArrayPrototypeJoin(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const separator: JSAny = arguments[0];
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // Only handle valid array lengths. Although the spec allows larger
- // values, this matches historical V8 behavior.
- if (len > kMaxArrayIndex + 1)
- ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+ // Only handle valid array lengths. Although the spec allows larger
+ // values, this matches historical V8 behavior.
+ if (len > kMaxArrayIndex + 1)
+ ThrowTypeError(MessageTemplate::kInvalidArrayLength);
- return CycleProtectedArrayJoin<JSArray>(
- false, o, len, separator, Undefined, Undefined);
- }
+ return CycleProtectedArrayJoin<JSArray>(
+ false, o, len, separator, Undefined, Undefined);
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring
- transitioning javascript builtin ArrayPrototypeToLocaleString(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const locales: JSAny = arguments[0];
- const options: JSAny = arguments[1];
+// https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring
+transitioning javascript builtin ArrayPrototypeToLocaleString(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const locales: JSAny = arguments[0];
+ const options: JSAny = arguments[1];
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // Only handle valid array lengths. Although the spec allows larger
- // values, this matches historical V8 behavior.
- if (len > kMaxArrayIndex + 1)
- ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+ // Only handle valid array lengths. Although the spec allows larger
+ // values, this matches historical V8 behavior.
+ if (len > kMaxArrayIndex + 1)
+ ThrowTypeError(MessageTemplate::kInvalidArrayLength);
- return CycleProtectedArrayJoin<JSArray>(
- true, o, len, ',', locales, options);
- }
+ return CycleProtectedArrayJoin<JSArray>(true, o, len, ',', locales, options);
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.tostring
- transitioning javascript builtin ArrayPrototypeToString(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // 1. Let array be ? ToObject(this value).
- const array: JSReceiver = ToObject_Inline(context, receiver);
+// https://tc39.github.io/ecma262/#sec-array.prototype.tostring
+transitioning javascript builtin ArrayPrototypeToString(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. Let array be ? ToObject(this value).
+ const array: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let func be ? Get(array, "join").
+ const prop: JSAny = GetProperty(array, 'join');
+ try {
+ // 3. If IsCallable(func) is false, let func be the intrinsic function
+ // %ObjProto_toString%.
+ const func: Callable = Cast<Callable>(prop) otherwise NotCallable;
+
+ // 4. Return ? Call(func, array).
+ return Call(context, func, array);
+ } label NotCallable {
+ return ObjectToString(context, array);
+ }
+}
- // 2. Let func be ? Get(array, "join").
- const prop: JSAny = GetProperty(array, 'join');
- try {
- // 3. If IsCallable(func) is false, let func be the intrinsic function
- // %ObjProto_toString%.
- const func: Callable = Cast<Callable>(prop) otherwise NotCallable;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join
+transitioning javascript builtin TypedArrayPrototypeJoin(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const separator: JSAny = arguments[0];
- // 4. Return ? Call(func, array).
- return Call(context, func, array);
- }
- label NotCallable {
- return ObjectToString(context, array);
- }
- }
+ // Spec: ValidateTypedArray is applied to the this value prior to evaluating
+ // the algorithm.
+ const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
+ context, receiver, '%TypedArray%.prototype.join');
+ const length = Convert<Number>(typedArray.length);
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join
- transitioning javascript builtin TypedArrayPrototypeJoin(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const separator: JSAny = arguments[0];
+ return CycleProtectedArrayJoin<JSTypedArray>(
+ false, typedArray, length, separator, Undefined, Undefined);
+}
- // Spec: ValidateTypedArray is applied to the this value prior to evaluating
- // the algorithm.
- const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
- context, receiver, '%TypedArray%.prototype.join');
- const length = Convert<Number>(typedArray.length);
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
+transitioning javascript builtin TypedArrayPrototypeToLocaleString(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const locales: JSAny = arguments[0];
+ const options: JSAny = arguments[1];
- return CycleProtectedArrayJoin<JSTypedArray>(
- false, typedArray, length, separator, Undefined, Undefined);
- }
+ // Spec: ValidateTypedArray is applied to the this value prior to evaluating
+ // the algorithm.
+ const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
+ context, receiver, '%TypedArray%.prototype.toLocaleString');
+ const length = Convert<Number>(typedArray.length);
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
- transitioning javascript builtin TypedArrayPrototypeToLocaleString(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const locales: JSAny = arguments[0];
- const options: JSAny = arguments[1];
-
- // Spec: ValidateTypedArray is applied to the this value prior to evaluating
- // the algorithm.
- const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
- context, receiver, '%TypedArray%.prototype.toLocaleString');
- const length = Convert<Number>(typedArray.length);
-
- return CycleProtectedArrayJoin<JSTypedArray>(
- true, typedArray, length, ',', locales, options);
- }
+ return CycleProtectedArrayJoin<JSTypedArray>(
+ true, typedArray, length, ',', locales, options);
+}
}
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index 52bcc75d19..fe416fa4a2 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -3,154 +3,151 @@
// found in the LICENSE file.
namespace array {
- macro LoadWithHoleCheck<Elements : type extends FixedArrayBase>(
- elements: FixedArrayBase, index: Smi): JSAny
- labels IfHole;
-
- LoadWithHoleCheck<FixedArray>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): JSAny
- labels IfHole {
- const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- const element: Object = elements.objects[index];
- if (element == TheHole) goto IfHole;
- return UnsafeCast<JSAny>(element);
- }
+macro LoadWithHoleCheck<Elements : type extends FixedArrayBase>(
+ elements: FixedArrayBase, index: Smi): JSAny
+ labels IfHole;
+
+LoadWithHoleCheck<FixedArray>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): JSAny
+ labels IfHole {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ const element: Object = elements.objects[index];
+ if (element == TheHole) goto IfHole;
+ return UnsafeCast<JSAny>(element);
+}
- LoadWithHoleCheck<FixedDoubleArray>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): JSAny
- labels IfHole {
- const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- const element: float64 = elements.floats[index].Value() otherwise IfHole;
- return AllocateHeapNumberWithValue(element);
- }
+LoadWithHoleCheck<FixedDoubleArray>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): JSAny
+ labels IfHole {
+ const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
+ const element: float64 = elements.floats[index].Value() otherwise IfHole;
+ return AllocateHeapNumberWithValue(element);
+}
- macro FastArrayLastIndexOf<Elements : type extends FixedArrayBase>(
- context: Context, array: JSArray, from: Smi, searchElement: JSAny): Smi {
- const elements: FixedArrayBase = array.elements;
- let k: Smi = from;
-
- // Bug(898785): Due to side-effects in the evaluation of `fromIndex`
- // the {from} can be out-of-bounds here, so we need to clamp {k} to
- // the {elements} length. We might be reading holes / hole NaNs still
- // due to that, but those will be ignored below.
- if (k >= elements.length) {
- k = elements.length - 1;
- }
+macro FastArrayLastIndexOf<Elements : type extends FixedArrayBase>(
+ context: Context, array: JSArray, from: Smi, searchElement: JSAny): Smi {
+ const elements: FixedArrayBase = array.elements;
+ let k: Smi = from;
+
+ // Bug(898785): Due to side-effects in the evaluation of `fromIndex`
+ // the {from} can be out-of-bounds here, so we need to clamp {k} to
+ // the {elements} length. We might be reading holes / hole NaNs still
+ // due to that, but those will be ignored below.
+ if (k >= elements.length) {
+ k = elements.length - 1;
+ }
- while (k >= 0) {
- try {
- const element: JSAny = LoadWithHoleCheck<Elements>(elements, k)
- otherwise Hole;
+ while (k >= 0) {
+ try {
+ const element: JSAny = LoadWithHoleCheck<Elements>(elements, k)
+ otherwise Hole;
- const same: Boolean = StrictEqual(searchElement, element);
- if (same == True) {
- assert(Is<FastJSArray>(array));
- return k;
- }
+ const same: Boolean = StrictEqual(searchElement, element);
+ if (same == True) {
+ assert(Is<FastJSArray>(array));
+ return k;
}
- label Hole {} // Do nothing for holes.
-
- --k;
- }
+ } label Hole {} // Do nothing for holes.
- assert(Is<FastJSArray>(array));
- return -1;
+ --k;
}
- transitioning macro
- GetFromIndex(context: Context, length: Number, arguments: Arguments): Number {
- // 4. If fromIndex is present, let n be ? ToInteger(fromIndex);
- // else let n be len - 1.
- const n: Number =
- arguments.length < 2 ? length - 1 : ToInteger_Inline(arguments[1]);
-
- // 5. If n >= 0, then.
- let k: Number = SmiConstant(0);
- if (n >= 0) {
- // a. If n is -0, let k be +0; else let k be min(n, len - 1).
- // If n was -0 it got truncated to 0.0, so taking the minimum is fine.
- k = Min(n, length - 1);
- } else {
- // a. Let k be len + n.
- k = length + n;
- }
- return k;
+ assert(Is<FastJSArray>(array));
+ return -1;
+}
+
+transitioning macro
+GetFromIndex(context: Context, length: Number, arguments: Arguments): Number {
+ // 4. If fromIndex is present, let n be ? ToInteger(fromIndex);
+ // else let n be len - 1.
+ const n: Number =
+ arguments.length < 2 ? length - 1 : ToInteger_Inline(arguments[1]);
+
+ // 5. If n >= 0, then.
+ let k: Number = SmiConstant(0);
+ if (n >= 0) {
+ // a. If n is -0, let k be +0; else let k be min(n, len - 1).
+ // If n was -0 it got truncated to 0.0, so taking the minimum is fine.
+ k = Min(n, length - 1);
+ } else {
+ // a. Let k be len + n.
+ k = length + n;
}
+ return k;
+}
- macro TryFastArrayLastIndexOf(
- context: Context, receiver: JSReceiver, searchElement: JSAny,
- from: Number): JSAny
- labels Slow {
- const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
- const length: Smi = array.length;
- if (length == 0) return SmiConstant(-1);
-
- const fromSmi: Smi = Cast<Smi>(from) otherwise Slow;
- const kind: ElementsKind = array.map.elements_kind;
- if (IsFastSmiOrTaggedElementsKind(kind)) {
- return FastArrayLastIndexOf<FixedArray>(
- context, array, fromSmi, searchElement);
- }
- assert(IsDoubleElementsKind(kind));
- return FastArrayLastIndexOf<FixedDoubleArray>(
+macro TryFastArrayLastIndexOf(
+ context: Context, receiver: JSReceiver, searchElement: JSAny,
+ from: Number): JSAny
+ labels Slow {
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ const length: Smi = array.length;
+ if (length == 0) return SmiConstant(-1);
+
+ const fromSmi: Smi = Cast<Smi>(from) otherwise Slow;
+ const kind: ElementsKind = array.map.elements_kind;
+ if (IsFastSmiOrTaggedElementsKind(kind)) {
+ return FastArrayLastIndexOf<FixedArray>(
context, array, fromSmi, searchElement);
}
+ assert(IsDoubleElementsKind(kind));
+ return FastArrayLastIndexOf<FixedDoubleArray>(
+ context, array, fromSmi, searchElement);
+}
- transitioning macro GenericArrayLastIndexOf(
- context: Context, object: JSReceiver, searchElement: JSAny,
- from: Number): JSAny {
- let k: Number = from;
-
- // 7. Repeat, while k >= 0.
- while (k >= 0) {
- // a. Let kPresent be ? HasProperty(O, ! ToString(k)).
- const kPresent: Boolean = HasProperty(object, k);
+transitioning macro GenericArrayLastIndexOf(
+ context: Context, object: JSReceiver, searchElement: JSAny,
+ from: Number): JSAny {
+ let k: Number = from;
- // b. If kPresent is true, then.
- if (kPresent == True) {
- // i. Let elementK be ? Get(O, ! ToString(k)).
- const element: JSAny = GetProperty(object, k);
+ // 7. Repeat, while k >= 0.
+ while (k >= 0) {
+ // a. Let kPresent be ? HasProperty(O, ! ToString(k)).
+ const kPresent: Boolean = HasProperty(object, k);
- // ii. Let same be the result of performing Strict Equality Comparison
- // searchElement === elementK.
- const same: Boolean = StrictEqual(searchElement, element);
+ // b. If kPresent is true, then.
+ if (kPresent == True) {
+ // i. Let elementK be ? Get(O, ! ToString(k)).
+ const element: JSAny = GetProperty(object, k);
- // iii. If same is true, return k.
- if (same == True) return k;
- }
+ // ii. Let same be the result of performing Strict Equality Comparison
+ // searchElement === elementK.
+ const same: Boolean = StrictEqual(searchElement, element);
- // c. Decrease k by 1.
- --k;
+ // iii. If same is true, return k.
+ if (same == True) return k;
}
- // 8. Return -1.
- return SmiConstant(-1);
+ // c. Decrease k by 1.
+ --k;
}
- // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
- transitioning javascript builtin ArrayPrototypeLastIndexOf(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // 1. Let O be ? ToObject(this value).
- const object: JSReceiver = ToObject_Inline(context, receiver);
+ // 8. Return -1.
+ return SmiConstant(-1);
+}
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(object);
+// https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
+transitioning javascript builtin ArrayPrototypeLastIndexOf(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
- // 3. If len is 0, return -1.
- if (length == SmiConstant(0)) return SmiConstant(-1);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(object);
- // Step 4 - 6.
- const from: Number = GetFromIndex(context, length, arguments);
+ // 3. If len is 0, return -1.
+ if (length == SmiConstant(0)) return SmiConstant(-1);
- const searchElement: JSAny = arguments[0];
+ // Step 4 - 6.
+ const from: Number = GetFromIndex(context, length, arguments);
- try {
- return TryFastArrayLastIndexOf(context, object, searchElement, from)
- otherwise Baseline;
- }
- label Baseline {
- return GenericArrayLastIndexOf(context, object, searchElement, from);
- }
+ const searchElement: JSAny = arguments[0];
+
+ try {
+ return TryFastArrayLastIndexOf(context, object, searchElement, from)
+ otherwise Baseline;
+ } label Baseline {
+ return GenericArrayLastIndexOf(context, object, searchElement, from);
}
}
+}
diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq
index 9b45341c0e..8ff3cbaccd 100644
--- a/deps/v8/src/builtins/array-map.tq
+++ b/deps/v8/src/builtins/array-map.tq
@@ -3,271 +3,265 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayMapLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
- length: JSAny): JSAny {
- // All continuation points in the optimized filter implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArrayMapLoopContinuation(
- jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
- numberLength);
- }
-
- transitioning javascript builtin
- ArrayMapLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
- length: JSAny, result: JSAny): JSAny {
- // All continuation points in the optimized filter implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // This custom lazy deopt point is right after the callback. map() needs
- // to pick up at the next step, which is setting the callback result in
- // the output array. After incrementing k, we can glide into the loop
- // continuation builtin.
-
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- FastCreateDataProperty(outputArray, numberK, result);
-
- // 7d. Increase k by 1.
- numberK = numberK + 1;
-
- return ArrayMapLoopContinuation(
- jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
- numberLength);
- }
+transitioning javascript builtin
+ArrayMapLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+ length: JSAny): JSAny {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayMapLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength);
+}
- transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- array: JSReceiver, o: JSReceiver, initialK: Number,
- length: Number): JSAny {
- // 6. Let k be 0.
- // 7. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 7a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 7b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty_Inline(o, k);
-
- // 7c. If kPresent is true, then:
- if (kPresent == True) {
- // i. Let kValue be ? Get(O, Pk).
- const kValue: JSAny = GetProperty(o, k);
-
- // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
- const mappedValue: JSAny =
- Call(context, callbackfn, thisArg, kValue, k, o);
-
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
- FastCreateDataProperty(array, k, mappedValue);
- }
+transitioning javascript builtin
+ArrayMapLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny,
+ length: JSAny, result: JSAny): JSAny {
+ // All continuation points in the optimized filter implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const outputArray = Cast<JSReceiver>(array) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. map() needs
+ // to pick up at the next step, which is setting the callback result in
+ // the output array. After incrementing k, we can glide into the loop
+ // continuation builtin.
+
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ FastCreateDataProperty(outputArray, numberK, result);
+
+ // 7d. Increase k by 1.
+ numberK = numberK + 1;
+
+ return ArrayMapLoopContinuation(
+ jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, numberK,
+ numberLength);
+}
- // 7d. Increase k by 1. (done by the loop).
+transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
+ array: JSReceiver, o: JSReceiver, initialK: Number, length: Number): JSAny {
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 7a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
+
+ // 7b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
+
+ // 7c. If kPresent is true, then:
+ if (kPresent == True) {
+ // i. Let kValue be ? Get(O, Pk).
+ const kValue: JSAny = GetProperty(o, k);
+
+ // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
+ const mappedValue: JSAny =
+ Call(context, callbackfn, thisArg, kValue, k, o);
+
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ FastCreateDataProperty(array, k, mappedValue);
}
- // 8. Return A.
- return array;
+ // 7d. Increase k by 1. (done by the loop).
}
- struct Vector {
- macro ReportSkippedElement() {
- this.skippedElements = true;
- }
-
- macro CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
- const length: Smi = this.fixedArray.length;
- assert(validLength <= length);
- let kind: ElementsKind = ElementsKind::PACKED_SMI_ELEMENTS;
- if (!this.onlySmis) {
- if (this.onlyNumbers) {
- kind = ElementsKind::PACKED_DOUBLE_ELEMENTS;
- } else {
- kind = ElementsKind::PACKED_ELEMENTS;
- }
- }
+ // 8. Return A.
+ return array;
+}
- if (this.skippedElements || validLength < length) {
- // We also need to create a holey output array if we are
- // bailing out of the fast path partway through the array.
- // This is indicated by {validLength} < {length}.
- // Who knows if the bailout condition will continue to fill in
- // every element?
- kind = FastHoleyElementsKind(kind);
- }
+struct Vector {
+ macro ReportSkippedElement() {
+ this.skippedElements = true;
+ }
- const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- let a: JSArray;
-
- if (IsDoubleElementsKind(kind)) {
- // We need to allocate and copy.
- // First, initialize the elements field before allocation to prevent
- // heap corruption.
- const elements: FixedDoubleArray = AllocateFixedDoubleArrayWithHoles(
- SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation);
- a = NewJSArray(map, this.fixedArray);
- for (let i: Smi = 0; i < validLength; i++) {
- typeswitch (
- UnsafeCast<(Number | TheHole)>(this.fixedArray.objects[i])) {
- case (n: Number): {
- elements.floats[i] = Convert<float64_or_hole>(n);
- }
- case (TheHole): {
- }
- }
- }
- a.elements = elements;
+ macro CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
+ const length: Smi = this.fixedArray.length;
+ assert(validLength <= length);
+ let kind: ElementsKind = ElementsKind::PACKED_SMI_ELEMENTS;
+ if (!this.onlySmis) {
+ if (this.onlyNumbers) {
+ kind = ElementsKind::PACKED_DOUBLE_ELEMENTS;
} else {
- // Simply install the given fixedArray in {vector}.
- a = NewJSArray(map, this.fixedArray);
+ kind = ElementsKind::PACKED_ELEMENTS;
}
+ }
- // Paranoia. the FixedArray now "belongs" to JSArray {a}.
- this.fixedArray = kEmptyFixedArray;
- return a;
+ if (this.skippedElements || validLength < length) {
+ // We also need to create a holey output array if we are
+ // bailing out of the fast path partway through the array.
+ // This is indicated by {validLength} < {length}.
+ // Who knows if the bailout condition will continue to fill in
+ // every element?
+ kind = FastHoleyElementsKind(kind);
}
- macro StoreResult(implicit context: Context)(index: Smi, result: JSAny) {
- typeswitch (result) {
- case (s: Smi): {
- this.fixedArray.objects[index] = s;
- }
- case (s: HeapNumber): {
- this.onlySmis = false;
- this.fixedArray.objects[index] = s;
- }
- case (s: JSAnyNotNumber): {
- this.onlySmis = false;
- this.onlyNumbers = false;
- this.fixedArray.objects[index] = s;
+ const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ let a: JSArray;
+
+ if (IsDoubleElementsKind(kind)) {
+ // We need to allocate and copy.
+ // First, initialize the elements field before allocation to prevent
+ // heap corruption.
+ const elements: FixedDoubleArray = AllocateFixedDoubleArrayWithHoles(
+ SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation);
+ a = NewJSArray(map, this.fixedArray);
+ for (let i: Smi = 0; i < validLength; i++) {
+ typeswitch (
+ UnsafeCast<(Number | TheHole)>(this.fixedArray.objects[i])) {
+ case (n: Number): {
+ elements.floats[i] = Convert<float64_or_hole>(n);
+ }
+ case (TheHole): {
+ }
}
}
+ a.elements = elements;
+ } else {
+ // Simply install the given fixedArray in {vector}.
+ a = NewJSArray(map, this.fixedArray);
}
- fixedArray: FixedArray;
- onlySmis: bool; // initially true.
- onlyNumbers: bool; // initially true.
- skippedElements: bool; // initially false.
+ // Paranoia. the FixedArray now "belongs" to JSArray {a}.
+ this.fixedArray = kEmptyFixedArray;
+ return a;
}
- macro NewVector(implicit context: Context)(length: Smi): Vector {
- const fixedArray = length > 0 ?
- AllocateFixedArrayWithHoles(
- SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation) :
- kEmptyFixedArray;
- return Vector{
- fixedArray,
- onlySmis: true,
- onlyNumbers: true,
- skippedElements: false
- };
+ macro StoreResult(implicit context: Context)(index: Smi, result: JSAny) {
+ typeswitch (result) {
+ case (s: Smi): {
+ this.fixedArray.objects[index] = s;
+ }
+ case (s: HeapNumber): {
+ this.onlySmis = false;
+ this.fixedArray.objects[index] = s;
+ }
+ case (s: JSAnyNotNumber): {
+ this.onlySmis = false;
+ this.onlyNumbers = false;
+ this.fixedArray.objects[index] = s;
+ }
+ }
}
- transitioning macro FastArrayMap(implicit context: Context)(
- fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable,
- thisArg: JSAny): JSArray
- labels Bailout(JSArray, Smi) {
- let k: Smi = 0;
- let fastOW = NewFastJSArrayForReadWitness(fastO);
- let vector = NewVector(len);
+ fixedArray: FixedArray;
+ onlySmis: bool; // initially true.
+ onlyNumbers: bool; // initially true.
+ skippedElements: bool; // initially false.
+}
- // Build a fast loop over the smi array.
- // 7. Repeat, while k < len.
- try {
- for (; k < len; k++) {
- fastOW.Recheck() otherwise goto PrepareBailout(k);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto PrepareBailout(k);
-
- try {
- const value: JSAny = fastOW.LoadElementNoHole(k)
- otherwise FoundHole;
- const result: JSAny =
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
- vector.StoreResult(k, result);
- }
- label FoundHole {
- // Our output array must necessarily be holey because of holes in
- // the input array.
- vector.ReportSkippedElement();
- }
+macro NewVector(implicit context: Context)(length: Smi): Vector {
+ const fixedArray = length > 0 ?
+ AllocateFixedArrayWithHoles(
+ SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation) :
+ kEmptyFixedArray;
+ return Vector{
+ fixedArray,
+ onlySmis: true,
+ onlyNumbers: true,
+ skippedElements: false
+ };
+}
+
+transitioning macro FastArrayMap(implicit context: Context)(
+ fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable,
+ thisArg: JSAny): JSArray
+ labels Bailout(JSArray, Smi) {
+ let k: Smi = 0;
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
+ let vector = NewVector(len);
+
+ // Build a fast loop over the smi array.
+ // 7. Repeat, while k < len.
+ try {
+ for (; k < len; k++) {
+ fastOW.Recheck() otherwise goto PrepareBailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto PrepareBailout(k);
+
+ try {
+ const value: JSAny = fastOW.LoadElementNoHole(k)
+ otherwise FoundHole;
+ const result: JSAny =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ vector.StoreResult(k, result);
+ } label FoundHole {
+ // Our output array must necessarily be holey because of holes in
+ // the input array.
+ vector.ReportSkippedElement();
}
}
- label PrepareBailout(k: Smi) deferred {
- // Transform {vector} into a JSArray and bail out.
- goto Bailout(vector.CreateJSArray(k), k);
- }
-
- return vector.CreateJSArray(len);
+ } label PrepareBailout(k: Smi) deferred {
+ // Transform {vector} into a JSArray and bail out.
+ goto Bailout(vector.CreateJSArray(k), k);
}
- // https://tc39.github.io/ecma262/#sec-array.prototype.map
- transitioning javascript builtin
- ArrayMap(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
- JSAny {
- try {
- RequireObjectCoercible(receiver, 'Array.prototype.map');
+ return vector.CreateJSArray(len);
+}
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+// https://tc39.github.io/ecma262/#sec-array.prototype.map
+transitioning javascript builtin
+ArrayMap(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.map');
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) goto TypeError;
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) goto TypeError;
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
- let array: JSReceiver;
- let k: Number = 0;
- try {
- // 5. Let A be ? ArraySpeciesCreate(O, len).
- if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate;
- const o: FastJSArrayForRead = Cast<FastJSArrayForRead>(receiver)
- otherwise SlowSpeciesCreate;
- const smiLength: Smi = Cast<Smi>(len)
- otherwise SlowSpeciesCreate;
-
- return FastArrayMap(o, smiLength, callbackfn, thisArg)
- otherwise Bailout;
- }
- label SlowSpeciesCreate {
- array = ArraySpeciesCreate(context, receiver, len);
- }
- label Bailout(output: JSArray, kValue: Smi) deferred {
- array = output;
- k = kValue;
- }
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
- return ArrayMapLoopContinuation(o, callbackfn, thisArg, array, o, k, len);
- }
- label TypeError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ let array: JSReceiver;
+ let k: Number = 0;
+ try {
+ // 5. Let A be ? ArraySpeciesCreate(O, len).
+ if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate;
+ const o: FastJSArrayForRead = Cast<FastJSArrayForRead>(receiver)
+ otherwise SlowSpeciesCreate;
+ const smiLength: Smi = Cast<Smi>(len)
+ otherwise SlowSpeciesCreate;
+
+ return FastArrayMap(o, smiLength, callbackfn, thisArg)
+ otherwise Bailout;
+ } label SlowSpeciesCreate {
+ array = ArraySpeciesCreate(context, receiver, len);
+ } label Bailout(output: JSArray, kValue: Smi) deferred {
+ array = output;
+ k = kValue;
}
+
+ return ArrayMapLoopContinuation(o, callbackfn, thisArg, array, o, k, len);
+ } label TypeError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
index 2df961b995..49c67bd823 100644
--- a/deps/v8/src/builtins/array-of.tq
+++ b/deps/v8/src/builtins/array-of.tq
@@ -3,53 +3,53 @@
// found in the LICENSE file.
namespace array {
- // https://tc39.github.io/ecma262/#sec-array.of
- transitioning javascript builtin
- ArrayOf(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
- JSAny {
- // 1. Let len be the actual number of arguments passed to this function.
- const len: Smi = Convert<Smi>(arguments.length);
-
- // 2. Let items be the List of arguments passed to this function.
- const items: Arguments = arguments;
-
- // 3. Let C be the this value.
- const c: JSAny = receiver;
-
- let a: JSReceiver;
-
- // 4. If IsConstructor(C) is true, then
- typeswitch (c) {
- case (c: Constructor): {
- // a. Let A be ? Construct(C, Ā« len Ā»).
- a = Construct(c, len);
- }
- case (JSAny): {
- // a. Let A be ? ArrayCreate(len).
- a = ArrayCreate(len);
- }
+// https://tc39.github.io/ecma262/#sec-array.of
+transitioning javascript builtin
+ArrayOf(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. Let len be the actual number of arguments passed to this function.
+ const len: Smi = Convert<Smi>(arguments.length);
+
+ // 2. Let items be the List of arguments passed to this function.
+ const items: Arguments = arguments;
+
+ // 3. Let C be the this value.
+ const c: JSAny = receiver;
+
+ let a: JSReceiver;
+
+ // 4. If IsConstructor(C) is true, then
+ typeswitch (c) {
+ case (c: Constructor): {
+ // a. Let A be ? Construct(C, Ā« len Ā»).
+ a = Construct(c, len);
}
+ case (JSAny): {
+ // a. Let A be ? ArrayCreate(len).
+ a = ArrayCreate(len);
+ }
+ }
- // 6. Let k be 0.
- let k: Smi = 0;
+ // 6. Let k be 0.
+ let k: Smi = 0;
- // 7. Repeat, while k < len
- while (k < len) {
- // a. Let kValue be items[k].
- const kValue: JSAny = items[Convert<intptr>(k)];
+ // 7. Repeat, while k < len
+ while (k < len) {
+ // a. Let kValue be items[k].
+ const kValue: JSAny = items[Convert<intptr>(k)];
- // b. Let Pk be ! ToString(k).
- // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
- FastCreateDataProperty(a, k, kValue);
+ // b. Let Pk be ! ToString(k).
+ // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
+ FastCreateDataProperty(a, k, kValue);
- // d. Increase k by 1.
- k++;
- }
+ // d. Increase k by 1.
+ k++;
+ }
- // 8. Perform ? Set(A, "length", len, true).
- array::SetPropertyLength(a, len);
+ // 8. Perform ? Set(A, "length", len, true).
+ array::SetPropertyLength(a, len);
- // 9. Return A.
- return a;
- }
+ // 9. Return A.
+ return a;
+}
}
diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq
index 4505586272..90e0e496f8 100644
--- a/deps/v8/src/builtins/array-reduce-right.tq
+++ b/deps/v8/src/builtins/array-reduce-right.tq
@@ -3,198 +3,195 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayReduceRightPreLoopEagerDeoptContinuation(
- js-implicit context: NativeContext,
- receiver: JSAny)(callback: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
- const initialK = numberLength - 1;
-
- // Simulate starting the loop at {length - 1}, but ensuring that the
- // accumulator is the hole. The continuation stub will search for the
- // last non-hole element, rightly throwing an exception if not found.
- return ArrayReduceRightLoopContinuation(
- jsreceiver, callbackfn, TheHole, jsreceiver, initialK, numberLength);
- }
-
- transitioning javascript builtin
- ArrayReduceRightLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, initialK: JSAny, length: JSAny,
- accumulator: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArrayReduceRightLoopContinuation(
- jsreceiver, callbackfn, accumulator, jsreceiver, numberK, numberLength);
- }
-
- transitioning javascript builtin
- ArrayReduceRightLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // The accumulator is the result from the callback call which just occured.
- const r = ArrayReduceRightLoopContinuation(
- jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
- return r;
- }
+transitioning javascript builtin
+ArrayReduceRightPreLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+ const initialK = numberLength - 1;
+
+ // Simulate starting the loop at {length - 1}, but ensuring that the
+ // accumulator is the hole. The continuation stub will search for the
+ // last non-hole element, rightly throwing an exception if not found.
+ return ArrayReduceRightLoopContinuation(
+ jsreceiver, callbackfn, TheHole, jsreceiver, initialK, numberLength);
+}
- transitioning builtin ArrayReduceRightLoopContinuation(implicit context:
- Context)(
- _receiver: JSReceiver, callbackfn: Callable,
- initialAccumulator: JSAny|TheHole, o: JSReceiver, initialK: Number,
- _length: Number): JSAny {
- let accumulator = initialAccumulator;
-
- // 8b and 9. Repeat, while k >= 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 8b i and 9a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 8b ii and 9b. Set kPresent to ? HasProperty(O, Pk).
- const present: Boolean = HasProperty_Inline(o, k);
-
- // 8b iii and 9c. If kPresent is true, then
- if (present == True) {
- // 8b iii and 9c i. Let kValue be ? Get(O, Pk).
- const value: JSAny = GetProperty(o, k);
-
- typeswitch (accumulator) {
- case (TheHole): {
- // 8b iii 1.
- accumulator = value;
- }
- case (accumulatorNotHole: JSAny): {
- // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
- // <accumulator, kValue, k, O>).
- accumulator = Call(
- context, callbackfn, Undefined, accumulatorNotHole, value, k,
- o);
- }
- }
- }
+transitioning javascript builtin
+ArrayReduceRightLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, initialK: JSAny, length: JSAny,
+ accumulator: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayReduceRightLoopContinuation(
+ jsreceiver, callbackfn, accumulator, jsreceiver, numberK, numberLength);
+}
- // 8b iv and 9d. Decrease k by 1. (done by the loop).
- }
+transitioning javascript builtin
+ArrayReduceRightLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // The accumulator is the result from the callback call which just occured.
+ const r = ArrayReduceRightLoopContinuation(
+ jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
+ return r;
+}
- // 8c. if kPresent is false, throw a TypeError exception.
- // If the accumulator is discovered with the sentinel hole value,
- // this means kPresent is false.
- typeswitch (accumulator) {
- case (TheHole): {
- ThrowTypeError(
- MessageTemplate::kReduceNoInitial, 'Array.prototype.reduceRight');
- }
- case (accumulator: JSAny): {
- return accumulator;
- }
- }
- }
+transitioning builtin ArrayReduceRightLoopContinuation(
+ implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable,
+ initialAccumulator: JSAny|TheHole, o: JSReceiver, initialK: Number,
+ _length: Number): JSAny {
+ let accumulator = initialAccumulator;
- transitioning macro FastArrayReduceRight(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable,
- initialAccumulator: JSAny|TheHole): JSAny
- labels Bailout(Number, JSAny|TheHole) {
- let accumulator = initialAccumulator;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1, accumulator);
- const fastO = Cast<FastJSArrayForRead>(o)
- otherwise goto Bailout(len - 1, accumulator);
- let fastOW = NewFastJSArrayForReadWitness(fastO);
+ // 8b and 9. Repeat, while k >= 0
+ for (let k: Number = initialK; k >= 0; k--) {
+ // 8b i and 9a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
- // Build a fast loop over the array.
- for (let k: Smi = smiLen - 1; k >= 0; k--) {
- fastOW.Recheck() otherwise goto Bailout(k, accumulator);
+ // 8b ii and 9b. Set kPresent to ? HasProperty(O, Pk).
+ const present: Boolean = HasProperty_Inline(o, k);
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
+ // 8b iii and 9c. If kPresent is true, then
+ if (present == True) {
+ // 8b iii and 9c i. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
- const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
typeswitch (accumulator) {
case (TheHole): {
+ // 8b iii 1.
accumulator = value;
}
case (accumulatorNotHole: JSAny): {
+ // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
+ // <accumulator, kValue, k, O>).
accumulator = Call(
- context, callbackfn, Undefined, accumulatorNotHole, value, k,
- fastOW.Get());
+ context, callbackfn, Undefined, accumulatorNotHole, value, k, o);
}
}
}
+
+ // 8b iv and 9d. Decrease k by 1. (done by the loop).
+ }
+
+ // 8c. if kPresent is false, throw a TypeError exception.
+ // If the accumulator is discovered with the sentinel hole value,
+ // this means kPresent is false.
+ typeswitch (accumulator) {
+ case (TheHole): {
+ ThrowTypeError(
+ MessageTemplate::kReduceNoInitial, 'Array.prototype.reduceRight');
+ }
+ case (accumulator: JSAny): {
+ return accumulator;
+ }
+ }
+}
+
+transitioning macro FastArrayReduceRight(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable,
+ initialAccumulator: JSAny|TheHole): JSAny
+ labels Bailout(Number, JSAny | TheHole) {
+ let accumulator = initialAccumulator;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1, accumulator);
+ const fastO = Cast<FastJSArrayForRead>(o)
+ otherwise goto Bailout(len - 1, accumulator);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
+
+ // Build a fast loop over the array.
+ for (let k: Smi = smiLen - 1; k >= 0; k--) {
+ fastOW.Recheck() otherwise goto Bailout(k, accumulator);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
+
+ const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
typeswitch (accumulator) {
case (TheHole): {
- ThrowTypeError(
- MessageTemplate::kReduceNoInitial, 'Array.prototype.reduceRight');
+ accumulator = value;
}
- case (accumulator: JSAny): {
- return accumulator;
+ case (accumulatorNotHole: JSAny): {
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulatorNotHole, value, k,
+ fastOW.Get());
}
}
}
+ typeswitch (accumulator) {
+ case (TheHole): {
+ ThrowTypeError(
+ MessageTemplate::kReduceNoInitial, 'Array.prototype.reduceRight');
+ }
+ case (accumulator: JSAny): {
+ return accumulator;
+ }
+ }
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight
- transitioning javascript builtin
- ArrayReduceRight(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- try {
- RequireObjectCoercible(receiver, 'Array.prototype.reduceRight');
+// https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight
+transitioning javascript builtin
+ArrayReduceRight(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.reduceRight');
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto NoCallableError;
- }
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NoCallableError;
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NoCallableError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NoCallableError;
- // 4. If len is 0 and initialValue is not present, throw a TypeError
- // exception. (This case is handled at the end of
- // ArrayReduceRightLoopContinuation).
+ // 4. If len is 0 and initialValue is not present, throw a TypeError
+ // exception. (This case is handled at the end of
+ // ArrayReduceRightLoopContinuation).
- const initialValue: JSAny|TheHole =
- arguments.length > 1 ? arguments[1] : TheHole;
+ const initialValue: JSAny|TheHole =
+ arguments.length > 1 ? arguments[1] : TheHole;
- try {
- return FastArrayReduceRight(o, len, callbackfn, initialValue)
- otherwise Bailout;
- }
- label Bailout(value: Number, accumulator: JSAny|TheHole) {
- return ArrayReduceRightLoopContinuation(
- o, callbackfn, accumulator, o, value, len);
- }
- }
- label NoCallableError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ try {
+ return FastArrayReduceRight(o, len, callbackfn, initialValue)
+ otherwise Bailout;
+ } label Bailout(value: Number, accumulator: JSAny|TheHole) {
+ return ArrayReduceRightLoopContinuation(
+ o, callbackfn, accumulator, o, value, len);
}
+ } label NoCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq
index 4a03776bd4..8ab85a0cb6 100644
--- a/deps/v8/src/builtins/array-reduce.tq
+++ b/deps/v8/src/builtins/array-reduce.tq
@@ -3,197 +3,194 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArrayReducePreLoopEagerDeoptContinuation(
- js-implicit context: NativeContext,
- receiver: JSAny)(callback: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // Simulate starting the loop at 0, but ensuring that the accumulator is
- // the hole. The continuation stub will search for the initial non-hole
- // element, rightly throwing an exception if not found.
- return ArrayReduceLoopContinuation(
- jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength);
- }
+transitioning javascript builtin
+ArrayReducePreLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ return ArrayReduceLoopContinuation(
+ jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength);
+}
- transitioning javascript builtin
- ArrayReduceLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, initialK: JSAny, length: JSAny,
- accumulator: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArrayReduceLoopContinuation(
- jsreceiver, callbackfn, accumulator, jsreceiver, numberK, numberLength);
- }
+transitioning javascript builtin
+ArrayReduceLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, initialK: JSAny, length: JSAny,
+ accumulator: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArrayReduceLoopContinuation(
+ jsreceiver, callbackfn, accumulator, jsreceiver, numberK, numberLength);
+}
- transitioning javascript builtin
- ArrayReduceLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny {
- // All continuation points in the optimized every implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // The accumulator is the result from the callback call which just occured.
- const r = ArrayReduceLoopContinuation(
- jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
- return r;
- }
+transitioning javascript builtin
+ArrayReduceLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny {
+ // All continuation points in the optimized every implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // The accumulator is the result from the callback call which just occured.
+ const r = ArrayReduceLoopContinuation(
+ jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
+ return r;
+}
- transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable,
- initialAccumulator: JSAny|TheHole, o: JSReceiver, initialK: Number,
- length: Number): JSAny {
- let accumulator = initialAccumulator;
-
- // 8b and 9. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 8b i and 9a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 8b ii and 9b. Set kPresent to ? HasProperty(O, Pk).
- const present: Boolean = HasProperty_Inline(o, k);
-
- // 6c. If kPresent is true, then
- if (present == True) {
- // 6c. i. Let kValue be ? Get(O, Pk).
- const value: JSAny = GetProperty(o, k);
-
- typeswitch (accumulator) {
- case (TheHole): {
- // 8b.
- accumulator = value;
- }
- case (accumulatorNotHole: JSAny): {
- // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
- // <accumulator, kValue, k, O>).
- accumulator = Call(
- context, callbackfn, Undefined, accumulatorNotHole, value, k,
- o);
- }
- }
- }
+transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable,
+ initialAccumulator: JSAny|TheHole, o: JSReceiver, initialK: Number,
+ length: Number): JSAny {
+ let accumulator = initialAccumulator;
- // 8b iv and 9d. Increase k by 1. (done by the loop).
- }
+ // 8b and 9. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 8b i and 9a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
- // 8c. if kPresent is false, throw a TypeError exception.
- // If the accumulator is discovered with the sentinel hole value,
- // this means kPresent is false.
- typeswitch (accumulator) {
- case (TheHole): {
- ThrowTypeError(
- MessageTemplate::kReduceNoInitial, 'Array.prototype.reduce');
- }
- case (accumulator: JSAny): {
- return accumulator;
- }
- }
- }
+ // 8b ii and 9b. Set kPresent to ? HasProperty(O, Pk).
+ const present: Boolean = HasProperty_Inline(o, k);
+
+ // 6c. If kPresent is true, then
+ if (present == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const value: JSAny = GetProperty(o, k);
- transitioning macro FastArrayReduce(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable,
- initialAccumulator: JSAny|TheHole): JSAny
- labels Bailout(Number, JSAny|TheHole) {
- const k = 0;
- let accumulator = initialAccumulator;
- Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
- const fastO =
- Cast<FastJSArrayForRead>(o) otherwise goto Bailout(k, accumulator);
- let fastOW = NewFastJSArrayForReadWitness(fastO);
-
- // Build a fast loop over the array.
- for (let k: Smi = 0; k < len; k++) {
- fastOW.Recheck() otherwise goto Bailout(k, accumulator);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
-
- const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
typeswitch (accumulator) {
case (TheHole): {
+ // 8b.
accumulator = value;
}
case (accumulatorNotHole: JSAny): {
+ // 9c. ii. Set accumulator to ? Call(callbackfn, undefined,
+ // <accumulator, kValue, k, O>).
accumulator = Call(
- context, callbackfn, Undefined, accumulatorNotHole, value, k,
- fastOW.Get());
+ context, callbackfn, Undefined, accumulatorNotHole, value, k, o);
}
}
}
+
+ // 8b iv and 9d. Increase k by 1. (done by the loop).
+ }
+
+ // 8c. if kPresent is false, throw a TypeError exception.
+ // If the accumulator is discovered with the sentinel hole value,
+ // this means kPresent is false.
+ typeswitch (accumulator) {
+ case (TheHole): {
+ ThrowTypeError(
+ MessageTemplate::kReduceNoInitial, 'Array.prototype.reduce');
+ }
+ case (accumulator: JSAny): {
+ return accumulator;
+ }
+ }
+}
+
+transitioning macro FastArrayReduce(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable,
+ initialAccumulator: JSAny|TheHole): JSAny
+ labels Bailout(Number, JSAny | TheHole) {
+ const k = 0;
+ let accumulator = initialAccumulator;
+ Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
+ const fastO =
+ Cast<FastJSArrayForRead>(o) otherwise goto Bailout(k, accumulator);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
+
+ // Build a fast loop over the array.
+ for (let k: Smi = 0; k < len; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k, accumulator);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
+
+ const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
typeswitch (accumulator) {
case (TheHole): {
- ThrowTypeError(
- MessageTemplate::kReduceNoInitial, 'Array.prototype.reduce');
+ accumulator = value;
}
- case (accumulator: JSAny): {
- return accumulator;
+ case (accumulatorNotHole: JSAny): {
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulatorNotHole, value, k,
+ fastOW.Get());
}
}
}
+ typeswitch (accumulator) {
+ case (TheHole): {
+ ThrowTypeError(
+ MessageTemplate::kReduceNoInitial, 'Array.prototype.reduce');
+ }
+ case (accumulator: JSAny): {
+ return accumulator;
+ }
+ }
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.reduce
- transitioning javascript builtin
- ArrayReduce(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- try {
- RequireObjectCoercible(receiver, 'Array.prototype.reduce');
+// https://tc39.github.io/ecma262/#sec-array.prototype.reduce
+transitioning javascript builtin
+ArrayReduce(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.reduce');
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto NoCallableError;
- }
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NoCallableError;
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto NoCallableError;
+ }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NoCallableError;
- // 4. If len is 0 and initialValue is not present, throw a TypeError
- // exception. (This case is handled at the end of
- // ArrayReduceLoopContinuation).
+ // 4. If len is 0 and initialValue is not present, throw a TypeError
+ // exception. (This case is handled at the end of
+ // ArrayReduceLoopContinuation).
- const initialValue: JSAny|TheHole =
- arguments.length > 1 ? arguments[1] : TheHole;
+ const initialValue: JSAny|TheHole =
+ arguments.length > 1 ? arguments[1] : TheHole;
- try {
- return FastArrayReduce(o, len, callbackfn, initialValue)
- otherwise Bailout;
- }
- label Bailout(value: Number, accumulator: JSAny|TheHole) {
- return ArrayReduceLoopContinuation(
- o, callbackfn, accumulator, o, value, len);
- }
- }
- label NoCallableError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ try {
+ return FastArrayReduce(o, len, callbackfn, initialValue)
+ otherwise Bailout;
+ } label Bailout(value: Number, accumulator: JSAny|TheHole) {
+ return ArrayReduceLoopContinuation(
+ o, callbackfn, accumulator, o, value, len);
}
+ } label NoCallableError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index 8c7c61f2ee..11c325140e 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -3,175 +3,170 @@
// found in the LICENSE file.
namespace array {
- macro LoadElement<ElementsAccessor : type extends ElementsKind, T: type>(
- elements: FixedArrayBase, index: Smi): T;
+macro LoadElement<ElementsAccessor : type extends ElementsKind, T: type>(
+ elements: FixedArrayBase, index: Smi): T;
- LoadElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi): Smi {
- const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- return UnsafeCast<Smi>(elements.objects[index]);
- }
+LoadElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): Smi {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ return UnsafeCast<Smi>(elements.objects[index]);
+}
- LoadElement<array::FastPackedObjectElements, JSAny>(
- implicit context: Context)(elements: FixedArrayBase, index: Smi): JSAny {
- const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- return UnsafeCast<JSAny>(elements.objects[index]);
- }
+LoadElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi): JSAny {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ return UnsafeCast<JSAny>(elements.objects[index]);
+}
- LoadElement<array::FastPackedDoubleElements, float64>(
- implicit context: Context)(elements: FixedArrayBase, index: Smi):
- float64 {
- const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- // This macro is only used for PACKED_DOUBLE, loading the hole should
- // be impossible.
- return elements.floats[index].Value() otherwise unreachable;
- }
+LoadElement<array::FastPackedDoubleElements, float64>(
+ implicit context: Context)(elements: FixedArrayBase, index: Smi): float64 {
+ const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
+ // This macro is only used for PACKED_DOUBLE, loading the hole should
+ // be impossible.
+ return elements.floats[index].Value() otherwise unreachable;
+}
- macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
- implicit context:
- Context)(elements: FixedArrayBase, index: Smi, value: T);
+macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
+ implicit context: Context)(elements: FixedArrayBase, index: Smi, value: T);
- StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: Smi) {
- const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER);
- }
+StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: Smi) {
+ const elems: FixedArray = UnsafeCast<FixedArray>(elements);
+ StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER);
+}
- StoreElement<array::FastPackedObjectElements, JSAny>(
- implicit context:
- Context)(elements: FixedArrayBase, index: Smi, value: JSAny) {
- const elements: FixedArray = UnsafeCast<FixedArray>(elements);
- elements.objects[index] = value;
- }
+StoreElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: JSAny) {
+ const elements: FixedArray = UnsafeCast<FixedArray>(elements);
+ elements.objects[index] = value;
+}
+
+StoreElement<array::FastPackedDoubleElements, float64>(
+ implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: float64) {
+ const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
+ StoreFixedDoubleArrayElementSmi(elems, index, value);
+}
- StoreElement<array::FastPackedDoubleElements, float64>(
- implicit context:
- Context)(elements: FixedArrayBase, index: Smi, value: float64) {
- const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
- StoreFixedDoubleArrayElementSmi(elems, index, value);
+// Fast-path for all PACKED_* elements kinds. These do not need to check
+// whether a property is present, so we can simply swap them using fast
+// FixedArray loads/stores.
+macro FastPackedArrayReverse<Accessor: type, T: type>(
+ implicit context: Context)(elements: FixedArrayBase, length: Smi) {
+ let lower: Smi = 0;
+ let upper: Smi = length - 1;
+
+ while (lower < upper) {
+ const lowerValue: T = LoadElement<Accessor, T>(elements, lower);
+ const upperValue: T = LoadElement<Accessor, T>(elements, upper);
+ StoreElement<Accessor>(elements, lower, upperValue);
+ StoreElement<Accessor>(elements, upper, lowerValue);
+ ++lower;
+ --upper;
}
+}
- // Fast-path for all PACKED_* elements kinds. These do not need to check
- // whether a property is present, so we can simply swap them using fast
- // FixedArray loads/stores.
- macro FastPackedArrayReverse<Accessor: type, T: type>(
- implicit context: Context)(elements: FixedArrayBase, length: Smi) {
- let lower: Smi = 0;
- let upper: Smi = length - 1;
-
- while (lower < upper) {
- const lowerValue: T = LoadElement<Accessor, T>(elements, lower);
- const upperValue: T = LoadElement<Accessor, T>(elements, upper);
- StoreElement<Accessor>(elements, lower, upperValue);
- StoreElement<Accessor>(elements, upper, lowerValue);
- ++lower;
- --upper;
+transitioning macro GenericArrayReverse(
+ context: Context, receiver: JSAny): JSAny {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(object);
+
+ // 3. Let middle be floor(len / 2).
+ // 4. Let lower be 0.
+ // 5. Repeat, while lower != middle.
+ // a. Let upper be len - lower - 1.
+
+ // Instead of calculating the middle value, we simply initialize upper
+ // with len - 1 and decrement it after each iteration.
+ let lower: Number = 0;
+ let upper: Number = length - 1;
+
+ while (lower < upper) {
+ let lowerValue: JSAny = Undefined;
+ let upperValue: JSAny = Undefined;
+
+ // b. Let upperP be ! ToString(upper).
+ // c. Let lowerP be ! ToString(lower).
+ // d. Let lowerExists be ? HasProperty(O, lowerP).
+ const lowerExists: Boolean = HasProperty(object, lower);
+
+ // e. If lowerExists is true, then.
+ if (lowerExists == True) {
+ // i. Let lowerValue be ? Get(O, lowerP).
+ lowerValue = GetProperty(object, lower);
}
- }
- transitioning macro GenericArrayReverse(context: Context, receiver: JSAny):
- JSAny {
- // 1. Let O be ? ToObject(this value).
- const object: JSReceiver = ToObject_Inline(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(object);
-
- // 3. Let middle be floor(len / 2).
- // 4. Let lower be 0.
- // 5. Repeat, while lower != middle.
- // a. Let upper be len - lower - 1.
-
- // Instead of calculating the middle value, we simply initialize upper
- // with len - 1 and decrement it after each iteration.
- let lower: Number = 0;
- let upper: Number = length - 1;
-
- while (lower < upper) {
- let lowerValue: JSAny = Undefined;
- let upperValue: JSAny = Undefined;
-
- // b. Let upperP be ! ToString(upper).
- // c. Let lowerP be ! ToString(lower).
- // d. Let lowerExists be ? HasProperty(O, lowerP).
- const lowerExists: Boolean = HasProperty(object, lower);
-
- // e. If lowerExists is true, then.
- if (lowerExists == True) {
- // i. Let lowerValue be ? Get(O, lowerP).
- lowerValue = GetProperty(object, lower);
- }
-
- // f. Let upperExists be ? HasProperty(O, upperP).
- const upperExists: Boolean = HasProperty(object, upper);
-
- // g. If upperExists is true, then.
- if (upperExists == True) {
- // i. Let upperValue be ? Get(O, upperP).
- upperValue = GetProperty(object, upper);
- }
-
- // h. If lowerExists is true and upperExists is true, then
- if (lowerExists == True && upperExists == True) {
- // i. Perform ? Set(O, lowerP, upperValue, true).
- SetProperty(object, lower, upperValue);
-
- // ii. Perform ? Set(O, upperP, lowerValue, true).
- SetProperty(object, upper, lowerValue);
- } else if (lowerExists == False && upperExists == True) {
- // i. Perform ? Set(O, lowerP, upperValue, true).
- SetProperty(object, lower, upperValue);
-
- // ii. Perform ? DeletePropertyOrThrow(O, upperP).
- DeleteProperty(object, upper, LanguageMode::kStrict);
- } else if (lowerExists == True && upperExists == False) {
- // i. Perform ? DeletePropertyOrThrow(O, lowerP).
- DeleteProperty(object, lower, LanguageMode::kStrict);
-
- // ii. Perform ? Set(O, upperP, lowerValue, true).
- SetProperty(object, upper, lowerValue);
- }
-
- // l. Increase lower by 1.
- ++lower;
- --upper;
+ // f. Let upperExists be ? HasProperty(O, upperP).
+ const upperExists: Boolean = HasProperty(object, upper);
+
+ // g. If upperExists is true, then.
+ if (upperExists == True) {
+ // i. Let upperValue be ? Get(O, upperP).
+ upperValue = GetProperty(object, upper);
}
- // 6. Return O.
- return object;
+ // h. If lowerExists is true and upperExists is true, then
+ if (lowerExists == True && upperExists == True) {
+ // i. Perform ? Set(O, lowerP, upperValue, true).
+ SetProperty(object, lower, upperValue);
+
+ // ii. Perform ? Set(O, upperP, lowerValue, true).
+ SetProperty(object, upper, lowerValue);
+ } else if (lowerExists == False && upperExists == True) {
+ // i. Perform ? Set(O, lowerP, upperValue, true).
+ SetProperty(object, lower, upperValue);
+
+ // ii. Perform ? DeletePropertyOrThrow(O, upperP).
+ DeleteProperty(object, upper, LanguageMode::kStrict);
+ } else if (lowerExists == True && upperExists == False) {
+ // i. Perform ? DeletePropertyOrThrow(O, lowerP).
+ DeleteProperty(object, lower, LanguageMode::kStrict);
+
+ // ii. Perform ? Set(O, upperP, lowerValue, true).
+ SetProperty(object, upper, lowerValue);
+ }
+
+ // l. Increase lower by 1.
+ ++lower;
+ --upper;
}
- macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny)
- labels Slow {
- const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
-
- const kind: ElementsKind = array.map.elements_kind;
- if (kind == ElementsKind::PACKED_SMI_ELEMENTS) {
- array::EnsureWriteableFastElements(array);
- FastPackedArrayReverse<array::FastPackedSmiElements, Smi>(
- array.elements, array.length);
- } else if (kind == ElementsKind::PACKED_ELEMENTS) {
- array::EnsureWriteableFastElements(array);
- FastPackedArrayReverse<array::FastPackedObjectElements, JSAny>(
- array.elements, array.length);
- } else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
- FastPackedArrayReverse<array::FastPackedDoubleElements, float64>(
- array.elements, array.length);
- } else {
- goto Slow;
- }
+ // 6. Return O.
+ return object;
+}
+
+macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny)
+ labels Slow {
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+
+ const kind: ElementsKind = array.map.elements_kind;
+ if (kind == ElementsKind::PACKED_SMI_ELEMENTS) {
+ array::EnsureWriteableFastElements(array);
+ FastPackedArrayReverse<array::FastPackedSmiElements, Smi>(
+ array.elements, array.length);
+ } else if (kind == ElementsKind::PACKED_ELEMENTS) {
+ array::EnsureWriteableFastElements(array);
+ FastPackedArrayReverse<array::FastPackedObjectElements, JSAny>(
+ array.elements, array.length);
+ } else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
+ FastPackedArrayReverse<array::FastPackedDoubleElements, float64>(
+ array.elements, array.length);
+ } else {
+ goto Slow;
}
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.reverse
- transitioning javascript builtin ArrayPrototypeReverse(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- try {
- TryFastPackedArrayReverse(receiver) otherwise Baseline;
- return receiver;
- }
- label Baseline {
- return GenericArrayReverse(context, receiver);
- }
+// https://tc39.github.io/ecma262/#sec-array.prototype.reverse
+transitioning javascript builtin ArrayPrototypeReverse(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ TryFastPackedArrayReverse(receiver) otherwise Baseline;
+ return receiver;
+ } label Baseline {
+ return GenericArrayReverse(context, receiver);
}
}
+}
diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq
index d32d6be32e..ed1087a85a 100644
--- a/deps/v8/src/builtins/array-shift.tq
+++ b/deps/v8/src/builtins/array-shift.tq
@@ -3,110 +3,107 @@
// found in the LICENSE file.
namespace array {
- extern builtin ArrayShift(Context, JSFunction, JSAny, int32): JSAny;
+extern builtin ArrayShift(Context, JSFunction, JSAny, int32): JSAny;
- macro TryFastArrayShift(implicit context: Context)(receiver: JSAny): JSAny
- labels Slow, Runtime {
- const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
- let witness = NewFastJSArrayWitness(array);
+macro TryFastArrayShift(implicit context: Context)(receiver: JSAny): JSAny
+ labels Slow, Runtime {
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ let witness = NewFastJSArrayWitness(array);
- witness.EnsureArrayPushable() otherwise Slow;
+ witness.EnsureArrayPushable() otherwise Slow;
- if (array.length == 0) {
- return Undefined;
- }
+ if (array.length == 0) {
+ return Undefined;
+ }
- const newLength = array.length - 1;
+ const newLength = array.length - 1;
- // Check that we're not supposed to right-trim the backing store, as
- // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
- if ((newLength + newLength + kMinAddedElementsCapacity) <
- array.elements.length) {
- goto Runtime;
- }
+ // Check that we're not supposed to right-trim the backing store, as
+ // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+ if ((newLength + newLength + kMinAddedElementsCapacity) <
+ array.elements.length) {
+ goto Runtime;
+ }
- // Check that we're not supposed to left-trim the backing store, as
- // implemented in elements.cc:FastElementsAccessor::MoveElements.
- if (newLength > kMaxCopyElements) goto Runtime;
+ // Check that we're not supposed to left-trim the backing store, as
+ // implemented in elements.cc:FastElementsAccessor::MoveElements.
+ if (newLength > kMaxCopyElements) goto Runtime;
- const result = witness.LoadElementOrUndefined(0);
- witness.ChangeLength(newLength);
- witness.MoveElements(0, 1, Convert<intptr>(newLength));
- witness.StoreHole(newLength);
- return result;
- }
+ const result = witness.LoadElementOrUndefined(0);
+ witness.ChangeLength(newLength);
+ witness.MoveElements(0, 1, Convert<intptr>(newLength));
+ witness.StoreHole(newLength);
+ return result;
+}
- transitioning macro GenericArrayShift(implicit context:
- Context)(receiver: JSAny): JSAny {
- // 1. Let O be ? ToObject(this value).
- const object: JSReceiver = ToObject_Inline(context, receiver);
+transitioning macro GenericArrayShift(implicit context: Context)(
+ receiver: JSAny): JSAny {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(object);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(object);
- // 3. If len is zero, then
- if (length == 0) {
- // a. Perform ? Set(O, "length", 0, true).
- SetProperty(object, kLengthString, Convert<Smi>(0));
- // b. Return undefined.
- return Undefined;
- }
+ // 3. If len is zero, then
+ if (length == 0) {
+ // a. Perform ? Set(O, "length", 0, true).
+ SetProperty(object, kLengthString, Convert<Smi>(0));
+ // b. Return undefined.
+ return Undefined;
+ }
- // 4. Let first be ? Get(O, "0").
- const first = GetProperty(object, Convert<Smi>(0));
- // 5. Let k be 1.
- let k: Number = 1;
- // 6. Repeat, while k < len
- while (k < length) {
- // a. Let from be ! ToString(k).
- const from: Number = k;
-
- // b. Let to be ! ToString(k - 1).
- const to: Number = k - 1;
-
- // c. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(object, from);
-
- // d. If fromPresent is true, then
- if (fromPresent == True) {
- // i. Let fromVal be ? Get(O, from).
- const fromValue: JSAny = GetProperty(object, from);
-
- // ii. Perform ? Set(O, to, fromValue, true).
- SetProperty(object, to, fromValue);
- } else {
- // i. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(object, to, LanguageMode::kStrict);
- }
-
- // f. Increase k by 1.
- k++;
+ // 4. Let first be ? Get(O, "0").
+ const first = GetProperty(object, Convert<Smi>(0));
+ // 5. Let k be 1.
+ let k: Number = 1;
+ // 6. Repeat, while k < len
+ while (k < length) {
+ // a. Let from be ! ToString(k).
+ const from: Number = k;
+
+ // b. Let to be ! ToString(k - 1).
+ const to: Number = k - 1;
+
+ // c. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(object, from);
+
+ // d. If fromPresent is true, then
+ if (fromPresent == True) {
+ // i. Let fromVal be ? Get(O, from).
+ const fromValue: JSAny = GetProperty(object, from);
+
+ // ii. Perform ? Set(O, to, fromValue, true).
+ SetProperty(object, to, fromValue);
+ } else {
+ // i. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(object, to, LanguageMode::kStrict);
}
- // 7. Perform ? DeletePropertyOrThrow(O, ! ToString(len - 1)).
- DeleteProperty(object, length - 1, LanguageMode::kStrict);
+ // f. Increase k by 1.
+ k++;
+ }
- // 8. Perform ? Set(O, "length", len - 1, true).
- SetProperty(object, kLengthString, length - 1);
+ // 7. Perform ? DeletePropertyOrThrow(O, ! ToString(len - 1)).
+ DeleteProperty(object, length - 1, LanguageMode::kStrict);
- // 9. Return first.
- return first;
- }
+ // 8. Perform ? Set(O, "length", len - 1, true).
+ SetProperty(object, kLengthString, length - 1);
- // https://tc39.github.io/ecma262/#sec-array.prototype.shift
- transitioning javascript builtin ArrayPrototypeShift(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- try {
- return TryFastArrayShift(receiver) otherwise Slow, Runtime;
- }
- label Slow {
- return GenericArrayShift(receiver);
- }
- label Runtime {
- tail ArrayShift(
- context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
- }
+ // 9. Return first.
+ return first;
+}
+
+// https://tc39.github.io/ecma262/#sec-array.prototype.shift
+transitioning javascript builtin ArrayPrototypeShift(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ return TryFastArrayShift(receiver) otherwise Slow, Runtime;
+ } label Slow {
+ return GenericArrayShift(receiver);
+ } label Runtime {
+ tail ArrayShift(
+ context, LoadTargetFromFrame(), Undefined,
+ Convert<int32>(arguments.length));
}
}
+}
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index b11b07e48b..147dae6f72 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -3,225 +3,222 @@
// found in the LICENSE file.
namespace array {
- macro HandleSimpleArgumentsSlice(
- context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi,
- count: Smi): JSArray
- labels Bailout {
- // If the resulting array doesn't fit in new space, use the slow path.
- if (count >= kMaxNewSpaceFixedArrayElements) goto Bailout;
-
- const end: Smi = start + count;
- const sourceElements: FixedArray =
- Cast<FixedArray>(args.elements) otherwise Bailout;
- if (SmiAbove(end, sourceElements.length)) goto Bailout;
-
- const arrayMap: Map =
- LoadJSArrayElementsMap(ElementsKind::HOLEY_ELEMENTS, context);
- const result: JSArray =
- AllocateJSArray(ElementsKind::HOLEY_ELEMENTS, arrayMap, count, count);
- const newElements: FixedArray =
- Cast<FixedArray>(result.elements) otherwise Bailout;
- CopyElements(
- ElementsKind::PACKED_ELEMENTS, newElements, 0, sourceElements,
- Convert<intptr>(start), Convert<intptr>(count));
- return result;
- }
-
- macro HandleFastAliasedSloppyArgumentsSlice(
- context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi,
- count: Smi): JSArray
- labels Bailout {
- // If the resulting array doesn't fit in new space, use the slow path.
- if (count >= kMaxNewSpaceFixedArrayElements) goto Bailout;
-
- const sloppyElements: SloppyArgumentsElements =
- Cast<SloppyArgumentsElements>(args.elements) otherwise Bailout;
- const sloppyElementsLength: Smi = sloppyElements.length;
- const parameterMapLength: Smi =
- sloppyElementsLength - kSloppyArgumentsParameterMapStart;
-
- // Check to make sure that the extraction will not access outside the
- // defined arguments
- const end: Smi = start + count;
- const unmappedElements: FixedArray =
- Cast<FixedArray>(sloppyElements.objects[kSloppyArgumentsArgumentsIndex])
- otherwise Bailout;
- const unmappedElementsLength: Smi = unmappedElements.length;
- if (SmiAbove(end, unmappedElementsLength)) goto Bailout;
-
- const argumentsContext: Context = UnsafeCast<Context>(
- sloppyElements.objects[kSloppyArgumentsContextIndex]);
-
- const arrayMap: Map =
- LoadJSArrayElementsMap(ElementsKind::HOLEY_ELEMENTS, context);
- const result: JSArray =
- AllocateJSArray(ElementsKind::HOLEY_ELEMENTS, arrayMap, count, count);
-
- let indexOut: Smi = 0;
- const resultElements: FixedArray = UnsafeCast<FixedArray>(result.elements);
- const to: Smi = SmiMin(parameterMapLength, end);
-
- // Fill in the part of the result that map to context-mapped parameters.
- for (let current: Smi = start; current < to; ++current) {
- const e: Object =
- sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
- const newElement = UnsafeCast<(JSAny | TheHole)>(
- e != TheHole ? argumentsContext[UnsafeCast<Smi>(e)] :
- unmappedElements.objects[current]);
- // It is safe to skip the write barrier here because resultElements was
- // allocated together with result in a folded allocation.
- // TODO(tebbi): The verification of this fails at the moment due to
- // missing load elimination.
- StoreFixedArrayElement(
- resultElements, indexOut++, newElement, UNSAFE_SKIP_WRITE_BARRIER);
- }
+macro HandleSimpleArgumentsSlice(
+ context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi,
+ count: Smi): JSArray
+ labels Bailout {
+ // If the resulting array doesn't fit in new space, use the slow path.
+ if (count >= kMaxNewSpaceFixedArrayElements) goto Bailout;
+
+ const end: Smi = start + count;
+ const sourceElements: FixedArray =
+ Cast<FixedArray>(args.elements) otherwise Bailout;
+ if (SmiAbove(end, sourceElements.length)) goto Bailout;
+
+ const arrayMap: Map =
+ LoadJSArrayElementsMap(ElementsKind::HOLEY_ELEMENTS, context);
+ const result: JSArray =
+ AllocateJSArray(ElementsKind::HOLEY_ELEMENTS, arrayMap, count, count);
+ const newElements: FixedArray =
+ Cast<FixedArray>(result.elements) otherwise Bailout;
+ CopyElements(
+ ElementsKind::PACKED_ELEMENTS, newElements, 0, sourceElements,
+ Convert<intptr>(start), Convert<intptr>(count));
+ return result;
+}
- // Fill in the rest of the result that contains the unmapped parameters
- // above the formal parameters.
- const unmappedFrom: Smi = SmiMin(SmiMax(parameterMapLength, start), end);
- const restCount: Smi = end - unmappedFrom;
- CopyElements(
- ElementsKind::PACKED_ELEMENTS, resultElements,
- Convert<intptr>(indexOut), unmappedElements,
- Convert<intptr>(unmappedFrom), Convert<intptr>(restCount));
- return result;
+macro HandleFastAliasedSloppyArgumentsSlice(
+ context: NativeContext, args: JSArgumentsObjectWithLength, start: Smi,
+ count: Smi): JSArray
+ labels Bailout {
+ // If the resulting array doesn't fit in new space, use the slow path.
+ if (count >= kMaxNewSpaceFixedArrayElements) goto Bailout;
+
+ const sloppyElements: SloppyArgumentsElements =
+ Cast<SloppyArgumentsElements>(args.elements) otherwise Bailout;
+ const sloppyElementsLength: Smi = sloppyElements.length;
+ const parameterMapLength: Smi =
+ sloppyElementsLength - kSloppyArgumentsParameterMapStart;
+
+ // Check to make sure that the extraction will not access outside the
+ // defined arguments
+ const end: Smi = start + count;
+ const unmappedElements: FixedArray =
+ Cast<FixedArray>(sloppyElements.objects[kSloppyArgumentsArgumentsIndex])
+ otherwise Bailout;
+ const unmappedElementsLength: Smi = unmappedElements.length;
+ if (SmiAbove(end, unmappedElementsLength)) goto Bailout;
+
+ const argumentsContext: Context =
+ UnsafeCast<Context>(sloppyElements.objects[kSloppyArgumentsContextIndex]);
+
+ const arrayMap: Map =
+ LoadJSArrayElementsMap(ElementsKind::HOLEY_ELEMENTS, context);
+ const result: JSArray =
+ AllocateJSArray(ElementsKind::HOLEY_ELEMENTS, arrayMap, count, count);
+
+ let indexOut: Smi = 0;
+ const resultElements: FixedArray = UnsafeCast<FixedArray>(result.elements);
+ const to: Smi = SmiMin(parameterMapLength, end);
+
+ // Fill in the part of the result that map to context-mapped parameters.
+ for (let current: Smi = start; current < to; ++current) {
+ const e: Object =
+ sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
+ const newElement = UnsafeCast<(JSAny | TheHole)>(
+ e != TheHole ? argumentsContext[UnsafeCast<Smi>(e)] :
+ unmappedElements.objects[current]);
+ // It is safe to skip the write barrier here because resultElements was
+ // allocated together with result in a folded allocation.
+ // TODO(tebbi): The verification of this fails at the moment due to
+ // missing load elimination.
+ StoreFixedArrayElement(
+ resultElements, indexOut++, newElement, UNSAFE_SKIP_WRITE_BARRIER);
}
- macro HandleFastSlice(
- context: NativeContext, o: JSAny, startNumber: Number,
- countNumber: Number): JSArray
- labels Bailout {
- const start: Smi = Cast<Smi>(startNumber) otherwise Bailout;
- const count: Smi = Cast<Smi>(countNumber) otherwise Bailout;
- assert(start >= 0);
-
- try {
- typeswitch (o) {
- case (a: FastJSArrayForCopy): {
- // It's possible to modify the array length from a valueOf
- // callback between the original array length read and this
- // point. That can change the length of the array backing store,
- // in the worst case, making it smaller than the region that needs
- // to be copied out. Therefore, re-check the length before calling
- // the appropriate fast path. See regress-785804.js
- if (SmiAbove(start + count, a.length)) goto Bailout;
- return ExtractFastJSArray(context, a, start, count);
- }
- case (a: JSStrictArgumentsObject): {
+ // Fill in the rest of the result that contains the unmapped parameters
+ // above the formal parameters.
+ const unmappedFrom: Smi = SmiMin(SmiMax(parameterMapLength, start), end);
+ const restCount: Smi = end - unmappedFrom;
+ CopyElements(
+ ElementsKind::PACKED_ELEMENTS, resultElements, Convert<intptr>(indexOut),
+ unmappedElements, Convert<intptr>(unmappedFrom),
+ Convert<intptr>(restCount));
+ return result;
+}
+
+macro HandleFastSlice(
+ context: NativeContext, o: JSAny, startNumber: Number,
+ countNumber: Number): JSArray
+ labels Bailout {
+ const start: Smi = Cast<Smi>(startNumber) otherwise Bailout;
+ const count: Smi = Cast<Smi>(countNumber) otherwise Bailout;
+ assert(start >= 0);
+
+ try {
+ typeswitch (o) {
+ case (a: FastJSArrayForCopy): {
+ // It's possible to modify the array length from a valueOf
+ // callback between the original array length read and this
+ // point. That can change the length of the array backing store,
+ // in the worst case, making it smaller than the region that needs
+ // to be copied out. Therefore, re-check the length before calling
+ // the appropriate fast path. See regress-785804.js
+ if (SmiAbove(start + count, a.length)) goto Bailout;
+ return ExtractFastJSArray(context, a, start, count);
+ }
+ case (a: JSStrictArgumentsObject): {
+ goto HandleSimpleArgumentsSlice(a);
+ }
+ case (a: JSSloppyArgumentsObject): {
+ const map: Map = a.map;
+ if (IsFastAliasedArgumentsMap(map)) {
+ return HandleFastAliasedSloppyArgumentsSlice(context, a, start, count)
+ otherwise Bailout;
+ } else if (IsSloppyArgumentsMap(map)) {
goto HandleSimpleArgumentsSlice(a);
}
- case (a: JSSloppyArgumentsObject): {
- const map: Map = a.map;
- if (IsFastAliasedArgumentsMap(map)) {
- return HandleFastAliasedSloppyArgumentsSlice(
- context, a, start, count)
- otherwise Bailout;
- } else if (IsSloppyArgumentsMap(map)) {
- goto HandleSimpleArgumentsSlice(a);
- }
- goto Bailout;
- }
- case (JSAny): {
- goto Bailout;
- }
+ goto Bailout;
+ }
+ case (JSAny): {
+ goto Bailout;
}
}
- label HandleSimpleArgumentsSlice(a: JSArgumentsObjectWithLength) {
- return HandleSimpleArgumentsSlice(context, a, start, count)
- otherwise Bailout;
- }
+ } label HandleSimpleArgumentsSlice(a: JSArgumentsObjectWithLength) {
+ return HandleSimpleArgumentsSlice(context, a, start, count)
+ otherwise Bailout;
}
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.slice
- transitioning javascript builtin
- ArrayPrototypeSlice(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // Handle array cloning case if the receiver is a fast array.
- if (arguments.length == 0) {
- typeswitch (receiver) {
- case (a: FastJSArrayForCopy): {
- return CloneFastJSArray(context, a);
- }
- case (JSAny): {
- }
+// https://tc39.github.io/ecma262/#sec-array.prototype.slice
+transitioning javascript builtin
+ArrayPrototypeSlice(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // Handle array cloning case if the receiver is a fast array.
+ if (arguments.length == 0) {
+ typeswitch (receiver) {
+ case (a: FastJSArrayForCopy): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
}
}
+ }
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
-
- // 3. Let relativeStart be ? ToInteger(start).
- const start: JSAny = arguments[0];
- const relativeStart: Number = ToInteger_Inline(start);
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 4. If relativeStart < 0, let k be max((len + relativeStart), 0);
- // else let k be min(relativeStart, len).
- let k: Number = relativeStart < 0 ? Max((len + relativeStart), 0) :
- Min(relativeStart, len);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // 5. If end is undefined, let relativeEnd be len;
- // else let relativeEnd be ? ToInteger(end).
- const end: JSAny = arguments[1];
- const relativeEnd: Number = end == Undefined ? len : ToInteger_Inline(end);
+ // 3. Let relativeStart be ? ToInteger(start).
+ const start: JSAny = arguments[0];
+ const relativeStart: Number = ToInteger_Inline(start);
- // 6. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
- // else let final be min(relativeEnd, len).
- const final: Number =
- relativeEnd < 0 ? Max((len + relativeEnd), 0) : Min(relativeEnd, len);
+ // 4. If relativeStart < 0, let k be max((len + relativeStart), 0);
+ // else let k be min(relativeStart, len).
+ let k: Number = relativeStart < 0 ? Max((len + relativeStart), 0) :
+ Min(relativeStart, len);
- // 7. Let count be max(final - k, 0).
- const count: Number = Max(final - k, 0);
+ // 5. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ const end: JSAny = arguments[1];
+ const relativeEnd: Number = end == Undefined ? len : ToInteger_Inline(end);
- assert(0 <= k);
- assert(k <= len);
- assert(0 <= final);
- assert(final <= len);
- assert(0 <= count);
- assert(count <= len);
+ // 6. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ const final: Number =
+ relativeEnd < 0 ? Max((len + relativeEnd), 0) : Min(relativeEnd, len);
- try {
- return HandleFastSlice(context, o, k, count)
- otherwise Slow;
- }
- label Slow {}
+ // 7. Let count be max(final - k, 0).
+ const count: Number = Max(final - k, 0);
- // 8. Let A be ? ArraySpeciesCreate(O, count).
- const a: JSReceiver = ArraySpeciesCreate(context, o, count);
+ assert(0 <= k);
+ assert(k <= len);
+ assert(0 <= final);
+ assert(final <= len);
+ assert(0 <= count);
+ assert(count <= len);
- // 9. Let n be 0.
- let n: Number = 0;
+ try {
+ return HandleFastSlice(context, o, k, count)
+ otherwise Slow;
+ } label Slow {}
- // 10. Repeat, while k < final
- while (k < final) {
- // a. Let Pk be ! ToString(k).
- const pK: Number = k;
+ // 8. Let A be ? ArraySpeciesCreate(O, count).
+ const a: JSReceiver = ArraySpeciesCreate(context, o, count);
- // b. Let kPresent be ? HasProperty(O, Pk).
- const fromPresent: Boolean = HasProperty(o, pK);
+ // 9. Let n be 0.
+ let n: Number = 0;
- // c. If kPresent is true, then
- if (fromPresent == True) {
- // i. Let kValue be ? Get(O, Pk).
- const kValue: JSAny = GetProperty(o, pK);
+ // 10. Repeat, while k < final
+ while (k < final) {
+ // a. Let Pk be ! ToString(k).
+ const pK: Number = k;
- // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(n), kValue).
- FastCreateDataProperty(a, n, kValue);
- }
+ // b. Let kPresent be ? HasProperty(O, Pk).
+ const fromPresent: Boolean = HasProperty(o, pK);
- // d. Increase k by 1.
- k++;
+ // c. If kPresent is true, then
+ if (fromPresent == True) {
+ // i. Let kValue be ? Get(O, Pk).
+ const kValue: JSAny = GetProperty(o, pK);
- // e. Increase n by 1.
- n++;
+ // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(n), kValue).
+ FastCreateDataProperty(a, n, kValue);
}
- // 11. Perform ? Set(A, "length", n, true).
- SetProperty(a, kLengthString, n);
+ // d. Increase k by 1.
+ k++;
- // 12. Return A.
- return a;
+ // e. Increase n by 1.
+ n++;
}
+
+ // 11. Perform ? Set(A, "length", n, true).
+ SetProperty(a, kLengthString, n);
+
+ // 12. Return A.
+ return a;
+}
}
diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq
index 59b8294f74..69467bba27 100644
--- a/deps/v8/src/builtins/array-some.tq
+++ b/deps/v8/src/builtins/array-some.tq
@@ -3,145 +3,142 @@
// found in the LICENSE file.
namespace array {
- transitioning javascript builtin
- ArraySomeLoopEagerDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
- // All continuation points in the optimized some implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- //
- // Also, this great mass of casts is necessary because the signature
- // of Torque javascript builtins requires JSAny type for all parameters
- // other than {context}.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- const numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- return ArraySomeLoopContinuation(
- jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
- numberLength, Undefined);
+transitioning javascript builtin
+ArraySomeLoopEagerDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny): JSAny {
+ // All continuation points in the optimized some implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ //
+ // Also, this great mass of casts is necessary because the signature
+ // of Torque javascript builtins requires JSAny type for all parameters
+ // other than {context}.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ return ArraySomeLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+}
+
+transitioning javascript builtin
+ArraySomeLoopLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
+ result: JSAny): JSAny {
+ // All continuation points in the optimized some implementation are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
+ const callbackfn = Cast<Callable>(callback) otherwise unreachable;
+ let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberLength = Cast<Number>(length) otherwise unreachable;
+
+ // This custom lazy deopt point is right after the callback. some() needs
+ // to pick up at the next step: if the result is true, then return,
+ // otherwise, keep going through the array starting from k + 1.
+ if (ToBoolean(result)) {
+ return True;
}
- transitioning javascript builtin
- ArraySomeLoopLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- callback: JSAny, thisArg: JSAny, initialK: JSAny, length: JSAny,
- result: JSAny): JSAny {
- // All continuation points in the optimized some implementation are
- // after the ToObject(O) call that ensures we are dealing with a
- // JSReceiver.
- const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
- const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
- const numberLength = Cast<Number>(length) otherwise unreachable;
-
- // This custom lazy deopt point is right after the callback. some() needs
- // to pick up at the next step: if the result is true, then return,
- // otherwise, keep going through the array starting from k + 1.
- if (ToBoolean(result)) {
- return True;
- }
+ numberK = numberK + 1;
- numberK = numberK + 1;
+ return ArraySomeLoopContinuation(
+ jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
+ numberLength, Undefined);
+}
- return ArraySomeLoopContinuation(
- jsreceiver, callbackfn, thisArg, Undefined, jsreceiver, numberK,
- numberLength, Undefined);
- }
+transitioning builtin ArraySomeLoopContinuation(implicit context: Context)(
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny, _array: JSAny,
+ o: JSReceiver, initialK: Number, length: Number, _initialTo: JSAny): JSAny {
+ // 5. Let k be 0.
+ // 6. Repeat, while k < len
+ for (let k: Number = initialK; k < length; k++) {
+ // 6a. Let Pk be ! ToString(k).
+ // k is guaranteed to be a positive integer, hence ToString is
+ // side-effect free and HasProperty/GetProperty do the conversion inline.
- transitioning builtin ArraySomeLoopContinuation(implicit context: Context)(
- _receiver: JSReceiver, callbackfn: Callable, thisArg: JSAny,
- _array: JSAny, o: JSReceiver, initialK: Number, length: Number,
- _initialTo: JSAny): JSAny {
- // 5. Let k be 0.
- // 6. Repeat, while k < len
- for (let k: Number = initialK; k < length; k++) {
- // 6a. Let Pk be ! ToString(k).
- // k is guaranteed to be a positive integer, hence ToString is
- // side-effect free and HasProperty/GetProperty do the conversion inline.
-
- // 6b. Let kPresent be ? HasProperty(O, Pk).
- const kPresent: Boolean = HasProperty_Inline(o, k);
-
- // 6c. If kPresent is true, then
- if (kPresent == True) {
- // 6c. i. Let kValue be ? Get(O, Pk).
- const kValue: JSAny = GetProperty(o, k);
-
- // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
- const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
-
- // iii. If selected is true, then...
- if (ToBoolean(result)) {
- return True;
- }
- }
+ // 6b. Let kPresent be ? HasProperty(O, Pk).
+ const kPresent: Boolean = HasProperty_Inline(o, k);
- // 6d. Increase k by 1. (done by the loop).
- }
- return False;
- }
+ // 6c. If kPresent is true, then
+ if (kPresent == True) {
+ // 6c. i. Let kValue be ? Get(O, Pk).
+ const kValue: JSAny = GetProperty(o, k);
+
+ // 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
+ const result: JSAny = Call(context, callbackfn, thisArg, kValue, k, o);
- transitioning macro FastArraySome(implicit context: Context)(
- o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
- labels Bailout(Smi) {
- let k: Smi = 0;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
- let fastOW = NewFastJSArrayWitness(fastO);
-
- // Build a fast loop over the smi array.
- for (; k < smiLen; k++) {
- fastOW.Recheck() otherwise goto Bailout(k);
-
- // Ensure that we haven't walked beyond a possibly updated length.
- if (k >= fastOW.Get().length) goto Bailout(k);
- const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
- const result: JSAny =
- Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ // iii. If selected is true, then...
if (ToBoolean(result)) {
return True;
}
}
- return False;
- }
- // https://tc39.github.io/ecma262/#sec-array.prototype.some
- transitioning javascript builtin
- ArraySome(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
- JSAny {
- try {
- RequireObjectCoercible(receiver, 'Array.prototype.some');
+ // 6d. Increase k by 1. (done by the loop).
+ }
+ return False;
+}
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject_Inline(context, receiver);
+transitioning macro FastArraySome(implicit context: Context)(
+ o: JSReceiver, len: Number, callbackfn: Callable, thisArg: JSAny): JSAny
+ labels Bailout(Smi) {
+ let k: Smi = 0;
+ const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ let fastOW = NewFastJSArrayWitness(fastO);
+
+ // Build a fast loop over the smi array.
+ for (; k < smiLen; k++) {
+ fastOW.Recheck() otherwise goto Bailout(k);
+
+ // Ensure that we haven't walked beyond a possibly updated length.
+ if (k >= fastOW.Get().length) goto Bailout(k);
+ const value: JSAny = fastOW.LoadElementNoHole(k) otherwise continue;
+ const result: JSAny =
+ Call(context, callbackfn, thisArg, value, k, fastOW.Get());
+ if (ToBoolean(result)) {
+ return True;
+ }
+ }
+ return False;
+}
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
+// https://tc39.github.io/ecma262/#sec-array.prototype.some
+transitioning javascript builtin
+ArraySome(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ RequireObjectCoercible(receiver, 'Array.prototype.some');
- // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
- if (arguments.length == 0) {
- goto TypeError;
- }
- const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject_Inline(context, receiver);
- // 4. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
- // Special cases.
- try {
- return FastArraySome(o, len, callbackfn, thisArg)
- otherwise Bailout;
- }
- label Bailout(kValue: Smi) deferred {
- return ArraySomeLoopContinuation(
- o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined);
- }
+ // 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ if (arguments.length == 0) {
+ goto TypeError;
}
- label TypeError deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise TypeError;
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
+
+ // Special cases.
+ try {
+ return FastArraySome(o, len, callbackfn, thisArg)
+ otherwise Bailout;
+ } label Bailout(kValue: Smi) deferred {
+ return ArraySomeLoopContinuation(
+ o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined);
}
+ } label TypeError deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
+}
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
index 9d7a223d97..92eace071c 100644
--- a/deps/v8/src/builtins/array-splice.tq
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -3,416 +3,418 @@
// found in the LICENSE file.
namespace array {
- // Given {elements}, we want to create a non-zero length array of type
- // FixedArrayType. Most of this behavior is outsourced to ExtractFixedArray(),
- // but the special case of wanting to have a FixedDoubleArray when given a
- // zero-length input FixedArray is handled here.
- macro Extract(implicit context: Context)(
- elements: FixedArray, first: Smi, count: Smi, capacity: Smi): FixedArray {
- return ExtractFixedArray(
- elements, Convert<intptr>(first), Convert<intptr>(count),
- Convert<intptr>(capacity));
- }
+// Given {source}, we want to create a non-zero length array of type
+// FixedArrayType with the specified {result_capacity}. Starting from
+// {startIndex}, {count} number of elements are copied to the newly
+// created result array. Most of this behavior is outsourced to
+// ExtractFixedArray(). We handle the case where the {source} is
+// EmptyFixedArray but result is expected to be a FixedDoubleArray.
+macro Extract(implicit context: Context)(
+ source: FixedArray, startIndex: Smi, count: Smi,
+ resultCapacity: Smi): FixedArray {
+ return ExtractFixedArray(
+ source, Convert<intptr>(startIndex), Convert<intptr>(count),
+ Convert<intptr>(resultCapacity));
+}
- macro Extract(implicit context: Context)(
- elements: FixedDoubleArray|EmptyFixedArray, first: Smi, count: Smi,
- capacity: Smi): FixedDoubleArray|EmptyFixedArray {
- typeswitch (elements) {
- case (EmptyFixedArray): {
- return AllocateZeroedFixedDoubleArray(Convert<intptr>(capacity));
- }
- case (elements: FixedDoubleArray): {
- return ExtractFixedDoubleArray(
- elements, Convert<intptr>(first), Convert<intptr>(count),
- Convert<intptr>(capacity));
- }
+macro Extract(implicit context: Context)(
+ source: FixedDoubleArray|EmptyFixedArray, startIndex: Smi, count: Smi,
+ resultCapacity: Smi): FixedDoubleArray|EmptyFixedArray {
+ typeswitch (source) {
+ case (EmptyFixedArray): {
+ // ExtractFixedDoubleArray expects {source} to be a FixedDoubleArray.
+ // Handle the case where {source} is empty here.
+ return AllocateFixedDoubleArrayWithHoles(
+ Convert<intptr>(resultCapacity),
+ AllocationFlag::kAllowLargeObjectAllocation);
+ }
+ case (source: FixedDoubleArray): {
+ return ExtractFixedDoubleArray(
+ source, Convert<intptr>(startIndex), Convert<intptr>(count),
+ Convert<intptr>(resultCapacity));
}
}
+}
- macro DoMoveElements<FixedArrayType : type extends FixedArrayBase>(
- elements: FixedArrayType, dstIndex: Smi, srcIndex: Smi,
- count: Smi): void {
- TorqueMoveElements(
- elements, Convert<intptr>(dstIndex), Convert<intptr>(srcIndex),
- Convert<intptr>(count));
- }
+macro DoMoveElements<FixedArrayType : type extends FixedArrayBase>(
+ elements: FixedArrayType, dstIndex: Smi, srcIndex: Smi, count: Smi): void {
+ TorqueMoveElements(
+ elements, Convert<intptr>(dstIndex), Convert<intptr>(srcIndex),
+ Convert<intptr>(count));
+}
- macro StoreHoles<FixedArrayType : type extends FixedArrayBase>(
- elements: FixedArrayType, holeStartIndex: Smi, holeEndIndex: Smi): void {
- for (let i: Smi = holeStartIndex; i < holeEndIndex; i++) {
- array::StoreArrayHole(elements, i);
- }
+macro StoreHoles<FixedArrayType : type extends FixedArrayBase>(
+ elements: FixedArrayType, holeStartIndex: Smi, holeEndIndex: Smi): void {
+ for (let i: Smi = holeStartIndex; i < holeEndIndex; i++) {
+ array::StoreArrayHole(elements, i);
}
+}
- macro DoCopyElements<FixedArrayType : type extends FixedArrayBase>(
- dstElements: FixedArrayType, dstIndex: Smi, srcElements: FixedArrayType,
- srcIndex: Smi, count: Smi): void {
- TorqueCopyElements(
- dstElements, Convert<intptr>(dstIndex), srcElements,
- Convert<intptr>(srcIndex), Convert<intptr>(count));
- }
+macro DoCopyElements<FixedArrayType : type extends FixedArrayBase>(
+ dstElements: FixedArrayType, dstIndex: Smi, srcElements: FixedArrayType,
+ srcIndex: Smi, count: Smi): void {
+ TorqueCopyElements(
+ dstElements, Convert<intptr>(dstIndex), srcElements,
+ Convert<intptr>(srcIndex), Convert<intptr>(count));
+}
- macro
- FastSplice<FixedArrayType : type extends FixedArrayBase, ElementType: type>(
- implicit context: Context)(
- args: Arguments, a: JSArray, length: Smi, newLength: Smi,
- actualStart: Smi, insertCount: Smi, actualDeleteCount: Smi): void {
- // Make sure elements are writable.
- array::EnsureWriteableFastElements(a);
-
- if (insertCount != actualDeleteCount) {
- const elements =
- UnsafeCast<(FixedArrayType | EmptyFixedArray)>(a.elements);
- const dstIndex: Smi = actualStart + insertCount;
- const srcIndex: Smi = actualStart + actualDeleteCount;
- const count: Smi = length - actualDeleteCount - actualStart;
- if (insertCount < actualDeleteCount) {
- // Shrink.
+macro
+FastSplice<FixedArrayType : type extends FixedArrayBase, ElementType: type>(
+ implicit context: Context)(
+ args: Arguments, a: JSArray, length: Smi, newLength: Smi, actualStart: Smi,
+ insertCount: Smi, actualDeleteCount: Smi): void {
+ // Make sure elements are writable.
+ array::EnsureWriteableFastElements(a);
+
+ if (insertCount != actualDeleteCount) {
+ const elements = UnsafeCast<(FixedArrayType | EmptyFixedArray)>(a.elements);
+ const dstIndex: Smi = actualStart + insertCount;
+ const srcIndex: Smi = actualStart + actualDeleteCount;
+ const count: Smi = length - actualDeleteCount - actualStart;
+ if (insertCount < actualDeleteCount) {
+ // Shrink.
+ DoMoveElements(
+ UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
+ StoreHoles(UnsafeCast<FixedArrayType>(elements), newLength, length);
+ } else if (insertCount > actualDeleteCount) {
+ // If the backing store is big enough, then moving elements is enough.
+ if (newLength <= elements.length) {
DoMoveElements(
UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
- StoreHoles(UnsafeCast<FixedArrayType>(elements), newLength, length);
- } else if (insertCount > actualDeleteCount) {
- // If the backing store is big enough, then moving elements is enough.
- if (newLength <= elements.length) {
- DoMoveElements(
- UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
- } else {
- // Grow.
- const capacity: Smi = CalculateNewElementsCapacity(newLength);
- const newElements: FixedArrayType = UnsafeCast<FixedArrayType>(
- Extract(elements, 0, actualStart, capacity));
- a.elements = newElements;
- if (elements.length > 0) {
- DoCopyElements(
- newElements, dstIndex, UnsafeCast<FixedArrayType>(elements),
- srcIndex, count);
- }
+ } else {
+ // Grow.
+ const capacity: Smi = CalculateNewElementsCapacity(newLength);
+ const newElements: FixedArrayType = UnsafeCast<FixedArrayType>(
+ Extract(elements, 0, actualStart, capacity));
+ a.elements = newElements;
+ if (elements.length > 0) {
+ DoCopyElements(
+ newElements, dstIndex, UnsafeCast<FixedArrayType>(elements),
+ srcIndex, count);
}
}
}
-
- // Copy arguments.
- let k: Smi = actualStart;
- if (insertCount > 0) {
- const typedNewElements: FixedArrayType =
- UnsafeCast<FixedArrayType>(a.elements);
- for (let i: intptr = 2; i < args.length; ++i) {
- const e: JSAny = args[i];
- // The argument elements were already validated to be an appropriate
- // {ElementType} to store in {FixedArrayType}.
- typedNewElements[k++] = UnsafeCast<ElementType>(e);
- }
- }
-
- // Update the array's length after all the FixedArray shuffling is done.
- a.length = newLength;
}
- transitioning macro FastArraySplice(
- context: Context, args: Arguments, o: JSReceiver,
- originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
- actualDeleteCountNumber: Number): JSAny
- labels Bailout {
- const originalLength: Smi =
- Cast<Smi>(originalLengthNumber) otherwise Bailout;
- const actualStart: Smi = Cast<Smi>(actualStartNumber) otherwise Bailout;
- const actualDeleteCount: Smi =
- Cast<Smi>(actualDeleteCountNumber) otherwise Bailout;
- const lengthDelta: Smi = insertCount - actualDeleteCount;
- const newLength: Smi = originalLength + lengthDelta;
-
- const a: JSArray = Cast<JSArray>(o) otherwise Bailout;
-
- const map: Map = a.map;
- if (!IsPrototypeInitialArrayPrototype(map)) goto Bailout;
- if (IsNoElementsProtectorCellInvalid()) goto Bailout;
- if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
-
- // Fast path only works on fast elements kind and with writable length.
- let elementsKind: ElementsKind = EnsureArrayPushable(map) otherwise Bailout;
- if (!IsFastElementsKind(elementsKind)) goto Bailout;
-
- const oldElementsKind: ElementsKind = elementsKind;
+ // Copy arguments.
+ let k: Smi = actualStart;
+ if (insertCount > 0) {
+ const typedNewElements: FixedArrayType =
+ UnsafeCast<FixedArrayType>(a.elements);
for (let i: intptr = 2; i < args.length; ++i) {
const e: JSAny = args[i];
- if (IsFastSmiElementsKind(elementsKind)) {
- if (TaggedIsNotSmi(e)) {
- const heapObject: HeapObject = UnsafeCast<HeapObject>(e);
- elementsKind = IsHeapNumber(heapObject) ?
- AllowDoubleElements(elementsKind) :
- AllowNonNumberElements(elementsKind);
- }
- } else if (IsDoubleElementsKind(elementsKind)) {
- if (!IsNumber(e)) {
- elementsKind = AllowNonNumberElements(elementsKind);
- }
- }
+ // The argument elements were already validated to be an appropriate
+ // {ElementType} to store in {FixedArrayType}.
+ typedNewElements[k++] = UnsafeCast<ElementType>(e);
}
+ }
- if (elementsKind != oldElementsKind) {
- const smiElementsKind: Smi = Convert<Smi>(Convert<int32>(elementsKind));
- TransitionElementsKindWithKind(context, a, smiElementsKind);
- }
+ // Update the array's length after all the FixedArray shuffling is done.
+ a.length = newLength;
+}
- // Make sure that the length hasn't been changed by side-effect.
- const length: Smi = Cast<Smi>(a.length) otherwise Bailout;
- if (originalLength != length) goto Bailout;
+transitioning macro FastArraySplice(
+ context: Context, args: Arguments, o: JSReceiver,
+ originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
+ actualDeleteCountNumber: Number): JSAny
+ labels Bailout {
+ const originalLength: Smi = Cast<Smi>(originalLengthNumber) otherwise Bailout;
+ const actualStart: Smi = Cast<Smi>(actualStartNumber) otherwise Bailout;
+ const actualDeleteCount: Smi =
+ Cast<Smi>(actualDeleteCountNumber) otherwise Bailout;
+ const lengthDelta: Smi = insertCount - actualDeleteCount;
+ const newLength: Smi = originalLength + lengthDelta;
+
+ const a: JSArray = Cast<JSArray>(o) otherwise Bailout;
+
+ const map: Map = a.map;
+ if (!IsPrototypeInitialArrayPrototype(map)) goto Bailout;
+ if (IsNoElementsProtectorCellInvalid()) goto Bailout;
+ if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
+
+ // Fast path only works on fast elements kind and with writable length.
+ let elementsKind: ElementsKind = EnsureArrayPushable(map) otherwise Bailout;
+ if (!IsFastElementsKind(elementsKind)) goto Bailout;
+
+ const oldElementsKind: ElementsKind = elementsKind;
+ for (let i: intptr = 2; i < args.length; ++i) {
+ const e: JSAny = args[i];
+ if (IsFastSmiElementsKind(elementsKind)) {
+ if (TaggedIsNotSmi(e)) {
+ const heapObject: HeapObject = UnsafeCast<HeapObject>(e);
+ elementsKind = IsHeapNumber(heapObject) ?
+ AllowDoubleElements(elementsKind) :
+ AllowNonNumberElements(elementsKind);
+ }
+ } else if (IsDoubleElementsKind(elementsKind)) {
+ if (!IsNumber(e)) {
+ elementsKind = AllowNonNumberElements(elementsKind);
+ }
+ }
+ }
- const deletedResult: JSArray =
- ExtractFastJSArray(context, a, actualStart, actualDeleteCount);
+ if (elementsKind != oldElementsKind) {
+ const smiElementsKind: Smi = Convert<Smi>(Convert<int32>(elementsKind));
+ TransitionElementsKindWithKind(context, a, smiElementsKind);
+ }
- if (newLength == 0) {
- a.elements = kEmptyFixedArray;
- a.length = 0;
- return deletedResult;
- }
+ // Make sure that the length hasn't been changed by side-effect.
+ const length: Smi = Cast<Smi>(a.length) otherwise Bailout;
+ if (originalLength != length) goto Bailout;
- if (IsFastSmiOrTaggedElementsKind(elementsKind)) {
- FastSplice<FixedArray, JSAny>(
- args, a, length, newLength, actualStart, insertCount,
- actualDeleteCount);
- } else {
- FastSplice<FixedDoubleArray, Number>(
- args, a, length, newLength, actualStart, insertCount,
- actualDeleteCount);
- }
+ const deletedResult: JSArray =
+ ExtractFastJSArray(context, a, actualStart, actualDeleteCount);
+ if (newLength == 0) {
+ a.elements = kEmptyFixedArray;
+ a.length = 0;
return deletedResult;
}
- transitioning macro FillDeletedElementsArray(
- context: Context, o: JSReceiver, actualStart: Number,
- actualDeleteCount: Number, a: JSReceiver): JSAny {
- // 10. Let k be 0.
- let k: Number = 0;
+ if (IsFastSmiOrTaggedElementsKind(elementsKind)) {
+ FastSplice<FixedArray, JSAny>(
+ args, a, length, newLength, actualStart, insertCount,
+ actualDeleteCount);
+ } else {
+ FastSplice<FixedDoubleArray, Number>(
+ args, a, length, newLength, actualStart, insertCount,
+ actualDeleteCount);
+ }
+
+ return deletedResult;
+}
- // 11. Repeat, while k < actualDeleteCount
- while (k < actualDeleteCount) {
- // a. Let from be ! ToString(actualStart + k).
- const from: Number = actualStart + k;
+transitioning macro FillDeletedElementsArray(
+ context: Context, o: JSReceiver, actualStart: Number,
+ actualDeleteCount: Number, a: JSReceiver): JSAny {
+ // 10. Let k be 0.
+ let k: Number = 0;
- // b. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(o, from);
+ // 11. Repeat, while k < actualDeleteCount
+ while (k < actualDeleteCount) {
+ // a. Let from be ! ToString(actualStart + k).
+ const from: Number = actualStart + k;
- // c. If fromPresent is true, then
- if (fromPresent == True) {
- // i. Let fromValue be ? Get(O, from).
- const fromValue: JSAny = GetProperty(o, from);
+ // b. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(o, from);
- // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
- FastCreateDataProperty(a, k, fromValue);
- }
+ // c. If fromPresent is true, then
+ if (fromPresent == True) {
+ // i. Let fromValue be ? Get(O, from).
+ const fromValue: JSAny = GetProperty(o, from);
- // d. Increment k by 1.
- k++;
+ // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue).
+ FastCreateDataProperty(a, k, fromValue);
}
- // 12. Perform ? Set(A, "length", actualDeleteCount, true).
- SetProperty(a, kLengthString, actualDeleteCount);
- return a;
+
+ // d. Increment k by 1.
+ k++;
}
+ // 12. Perform ? Set(A, "length", actualDeleteCount, true).
+ SetProperty(a, kLengthString, actualDeleteCount);
+ return a;
+}
- // HandleForwardCase implements step 15. "If itemCount < actualDeleteCount,
- // then...""
- transitioning macro HandleForwardCase(
- context: Context, o: JSReceiver, len: Number, itemCount: Number,
- actualStart: Number, actualDeleteCount: Number): void {
- // 15. If itemCount < actualDeleteCount, then
- // a. Let k be actualStart.
- let k: Number = actualStart;
-
- // b. Repeat, while k < (len - actualDeleteCount)
- while (k < (len - actualDeleteCount)) {
- // i. Let from be ! ToString(k + actualDeleteCount).
- const from: Number = k + actualDeleteCount;
- // ii. Let to be ! ToString(k + itemCount).
- const to: Number = k + itemCount;
-
- // iii. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(o, from);
-
- // iv. If fromPresent is true, then
- if (fromPresent == True) {
- // 1. Let fromValue be ? Get(O, from).
- const fromValue: JSAny = GetProperty(o, from);
-
- // 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(o, to, fromValue);
-
- // v. Else fromPresent is false,
- } else {
- // 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(o, to, LanguageMode::kStrict);
- }
- // vi. Increase k by 1.
- k++;
+// HandleForwardCase implements step 15. "If itemCount < actualDeleteCount,
+// then...""
+transitioning macro HandleForwardCase(
+ context: Context, o: JSReceiver, len: Number, itemCount: Number,
+ actualStart: Number, actualDeleteCount: Number): void {
+ // 15. If itemCount < actualDeleteCount, then
+ // a. Let k be actualStart.
+ let k: Number = actualStart;
+
+ // b. Repeat, while k < (len - actualDeleteCount)
+ while (k < (len - actualDeleteCount)) {
+ // i. Let from be ! ToString(k + actualDeleteCount).
+ const from: Number = k + actualDeleteCount;
+ // ii. Let to be ! ToString(k + itemCount).
+ const to: Number = k + itemCount;
+
+ // iii. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(o, from);
+
+ // iv. If fromPresent is true, then
+ if (fromPresent == True) {
+ // 1. Let fromValue be ? Get(O, from).
+ const fromValue: JSAny = GetProperty(o, from);
+
+ // 2. Perform ? Set(O, to, fromValue, true).
+ SetProperty(o, to, fromValue);
+
+ // v. Else fromPresent is false,
+ } else {
+ // 1. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(o, to, LanguageMode::kStrict);
}
+ // vi. Increase k by 1.
+ k++;
+ }
- // c. Let k be len.
- k = len;
+ // c. Let k be len.
+ k = len;
- // d. Repeat, while k > (len - actualDeleteCount + itemCount)
- while (k > (len - actualDeleteCount + itemCount)) {
- // i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
- DeleteProperty(o, k - 1, LanguageMode::kStrict);
- // ii. Decrease k by 1.
- k--;
- }
+ // d. Repeat, while k > (len - actualDeleteCount + itemCount)
+ while (k > (len - actualDeleteCount + itemCount)) {
+ // i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
+ DeleteProperty(o, k - 1, LanguageMode::kStrict);
+ // ii. Decrease k by 1.
+ k--;
}
+}
- // HandleBackwardCase implements step 16. "Else if itemCount >
- // actualDeleteCount, then..."
- transitioning macro HandleBackwardCase(
- context: Context, o: JSReceiver, len: Number, itemCount: Number,
- actualStart: Number, actualDeleteCount: Number): void {
- // 16. Else if itemCount > actualDeleteCount, then
- // a. Let k be (len - actualDeleteCount).
- let k: Number = len - actualDeleteCount;
-
- // b. Repeat, while k > actualStart
- while (k > actualStart) {
- // i. Let from be ! ToString(k + actualDeleteCount - 1).
- const from: Number = k + actualDeleteCount - 1;
+// HandleBackwardCase implements step 16. "Else if itemCount >
+// actualDeleteCount, then..."
+transitioning macro HandleBackwardCase(
+ context: Context, o: JSReceiver, len: Number, itemCount: Number,
+ actualStart: Number, actualDeleteCount: Number): void {
+ // 16. Else if itemCount > actualDeleteCount, then
+ // a. Let k be (len - actualDeleteCount).
+ let k: Number = len - actualDeleteCount;
- // ii. Let to be ! ToString(k + itemCount - 1).
- const to: Number = k + itemCount - 1;
+ // b. Repeat, while k > actualStart
+ while (k > actualStart) {
+ // i. Let from be ! ToString(k + actualDeleteCount - 1).
+ const from: Number = k + actualDeleteCount - 1;
- // iii. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(o, from);
+ // ii. Let to be ! ToString(k + itemCount - 1).
+ const to: Number = k + itemCount - 1;
- // iv. If fromPresent is true, then
- if (fromPresent == True) {
- // 1. Let fromValue be ? Get(O, from).
- const fromValue: JSAny = GetProperty(o, from);
+ // iii. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(o, from);
- // 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(o, to, fromValue);
+ // iv. If fromPresent is true, then
+ if (fromPresent == True) {
+ // 1. Let fromValue be ? Get(O, from).
+ const fromValue: JSAny = GetProperty(o, from);
- // v. Else fromPresent is false,
- } else {
- // 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(o, to, LanguageMode::kStrict);
- }
+ // 2. Perform ? Set(O, to, fromValue, true).
+ SetProperty(o, to, fromValue);
- // vi. Decrease k by 1.
- k--;
+ // v. Else fromPresent is false,
+ } else {
+ // 1. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(o, to, LanguageMode::kStrict);
}
+
+ // vi. Decrease k by 1.
+ k--;
}
+}
- transitioning macro SlowSplice(
- context: Context, arguments: Arguments, o: JSReceiver, len: Number,
- actualStart: Number, insertCount: Smi, actualDeleteCount: Number): JSAny {
- // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
- const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount);
- const itemCount: Number = insertCount;
-
- // Steps 9 through 12: creating the array of deleted elements.
- FillDeletedElementsArray(context, o, actualStart, actualDeleteCount, a);
-
- // 13. Let items be a List whose elements are, in left-to-right order,
- // the portion of the actual argument list starting with the third
- // argument. The list is empty if fewer than three arguments were
- // passed.
- // 14. Let itemCount be the Number of elements in items.
- // (done above).
-
- // 15. If itemCount < actualDeleteCount, then
- if (itemCount < actualDeleteCount) {
- HandleForwardCase(
- context, o, len, itemCount, actualStart, actualDeleteCount);
- // 16. Else if itemCount > actualDeleteCount, then
- } else if (itemCount > actualDeleteCount) {
- HandleBackwardCase(
- context, o, len, itemCount, actualStart, actualDeleteCount);
- }
+transitioning macro SlowSplice(
+ context: Context, arguments: Arguments, o: JSReceiver, len: Number,
+ actualStart: Number, insertCount: Smi, actualDeleteCount: Number): JSAny {
+ // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
+ const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount);
+ const itemCount: Number = insertCount;
+
+ // Steps 9 through 12: creating the array of deleted elements.
+ FillDeletedElementsArray(context, o, actualStart, actualDeleteCount, a);
+
+ // 13. Let items be a List whose elements are, in left-to-right order,
+ // the portion of the actual argument list starting with the third
+ // argument. The list is empty if fewer than three arguments were
+ // passed.
+ // 14. Let itemCount be the Number of elements in items.
+ // (done above).
+
+ // 15. If itemCount < actualDeleteCount, then
+ if (itemCount < actualDeleteCount) {
+ HandleForwardCase(
+ context, o, len, itemCount, actualStart, actualDeleteCount);
+ // 16. Else if itemCount > actualDeleteCount, then
+ } else if (itemCount > actualDeleteCount) {
+ HandleBackwardCase(
+ context, o, len, itemCount, actualStart, actualDeleteCount);
+ }
- // 17. Let k be actualStart.
- let k: Number = actualStart;
+ // 17. Let k be actualStart.
+ let k: Number = actualStart;
- // 18. Repeat, while items is not empty
- // a. Remove the first element from items and let E be the value of that
- // element.
- if (arguments.length > 2) {
- for (let i: intptr = 2; i < arguments.length; ++i) {
- const e: JSAny = arguments[i];
- // b. Perform ? Set(O, ! ToString(k), E, true).
- SetProperty(o, k, e);
+ // 18. Repeat, while items is not empty
+ // a. Remove the first element from items and let E be the value of that
+ // element.
+ if (arguments.length > 2) {
+ for (let i: intptr = 2; i < arguments.length; ++i) {
+ const e: JSAny = arguments[i];
+ // b. Perform ? Set(O, ! ToString(k), E, true).
+ SetProperty(o, k, e);
- // c. Increase k by 1.
- k = k + 1;
- }
+ // c. Increase k by 1.
+ k = k + 1;
}
-
- // 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
- // true).
- SetProperty(o, kLengthString, len - actualDeleteCount + itemCount);
-
- return a;
}
- // https://tc39.github.io/ecma262/#sec-array.prototype.splice
- transitioning javascript builtin
- ArrayPrototypeSplice(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // 1. Let O be ? ToObject(this value).
- const o: JSReceiver = ToObject(context, receiver);
-
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const len: Number = GetLengthProperty(o);
-
- // 3. Let relativeStart be ? ToInteger(start).
- const start: JSAny = arguments[0];
- const relativeStart: Number = ToInteger_Inline(start);
-
- // 4. If relativeStart < 0, let actualStart be max((len + relativeStart),
- // 0);
- // else let actualStart be min(relativeStart, len).
- const actualStart: Number = relativeStart < 0 ?
- Max((len + relativeStart), 0) :
- Min(relativeStart, len);
-
- let insertCount: Smi;
- let actualDeleteCount: Number;
- // 5. If the Number of actual arguments is 0, then
- if (arguments.length == 0) {
- // a. Let insertCount be 0.
- insertCount = 0;
- // b. Let actualDeleteCount be 0.
- actualDeleteCount = 0;
- // 6. Else if the Number of actual arguments is 1, then
- } else if (arguments.length == 1) {
- // a. Let insertCount be 0.
- insertCount = 0;
- // b. Let actualDeleteCount be len - actualStart.
- actualDeleteCount = len - actualStart;
- // 7. Else,
- } else {
- // a. Let insertCount be the Number of actual arguments minus 2.
- insertCount = Convert<Smi>(arguments.length) - 2;
- // b. Let dc be ? ToInteger(deleteCount).
- const deleteCount: JSAny = arguments[1];
- const dc: Number = ToInteger_Inline(deleteCount);
- // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart).
- actualDeleteCount = Min(Max(dc, 0), len - actualStart);
- }
+ // 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
+ // true).
+ SetProperty(o, kLengthString, len - actualDeleteCount + itemCount);
- // 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a
- // Bailout exception.
- const newLength: Number = len + insertCount - actualDeleteCount;
- if (newLength > kMaxSafeInteger) {
- ThrowTypeError(MessageTemplate::kInvalidArrayLength, start);
- }
+ return a;
+}
- try {
- return FastArraySplice(
- context, arguments, o, len, actualStart, insertCount,
- actualDeleteCount) otherwise Bailout;
- }
- label Bailout {}
+// https://tc39.github.io/ecma262/#sec-array.prototype.splice
+transitioning javascript builtin
+ArrayPrototypeSplice(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. Let O be ? ToObject(this value).
+ const o: JSReceiver = ToObject(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const len: Number = GetLengthProperty(o);
+
+ // 3. Let relativeStart be ? ToInteger(start).
+ const start: JSAny = arguments[0];
+ const relativeStart: Number = ToInteger_Inline(start);
+
+ // 4. If relativeStart < 0, let actualStart be max((len + relativeStart),
+ // 0);
+ // else let actualStart be min(relativeStart, len).
+ const actualStart: Number = relativeStart < 0 ?
+ Max((len + relativeStart), 0) :
+ Min(relativeStart, len);
+
+ let insertCount: Smi;
+ let actualDeleteCount: Number;
+ // 5. If the Number of actual arguments is 0, then
+ if (arguments.length == 0) {
+ // a. Let insertCount be 0.
+ insertCount = 0;
+ // b. Let actualDeleteCount be 0.
+ actualDeleteCount = 0;
+ // 6. Else if the Number of actual arguments is 1, then
+ } else if (arguments.length == 1) {
+ // a. Let insertCount be 0.
+ insertCount = 0;
+ // b. Let actualDeleteCount be len - actualStart.
+ actualDeleteCount = len - actualStart;
+ // 7. Else,
+ } else {
+ // a. Let insertCount be the Number of actual arguments minus 2.
+ insertCount = Convert<Smi>(arguments.length) - 2;
+ // b. Let dc be ? ToInteger(deleteCount).
+ const deleteCount: JSAny = arguments[1];
+ const dc: Number = ToInteger_Inline(deleteCount);
+ // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart).
+ actualDeleteCount = Min(Max(dc, 0), len - actualStart);
+ }
- // If the fast case fails, just continue with the slow, correct,
- // spec-compliant case.
- return SlowSplice(
- context, arguments, o, len, actualStart, insertCount,
- actualDeleteCount);
+ // 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a
+ // Bailout exception.
+ const newLength: Number = len + insertCount - actualDeleteCount;
+ if (newLength > kMaxSafeInteger) {
+ ThrowTypeError(MessageTemplate::kInvalidArrayLength, start);
}
+
+ try {
+ return FastArraySplice(
+ context, arguments, o, len, actualStart, insertCount, actualDeleteCount)
+ otherwise Bailout;
+ } label Bailout {}
+
+ // If the fast case fails, just continue with the slow, correct,
+ // spec-compliant case.
+ return SlowSplice(
+ context, arguments, o, len, actualStart, insertCount, actualDeleteCount);
+}
}
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index 3b66015d3b..7afeeb0627 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -3,97 +3,95 @@
// found in the LICENSE file.
namespace array {
- extern builtin ArrayUnshift(Context, JSFunction, JSAny, int32): JSAny;
+extern builtin ArrayUnshift(Context, JSFunction, JSAny, int32): JSAny;
- transitioning macro GenericArrayUnshift(
- context: Context, receiver: JSAny, arguments: Arguments): Number {
- // 1. Let O be ? ToObject(this value).
- const object: JSReceiver = ToObject_Inline(context, receiver);
+transitioning macro GenericArrayUnshift(
+ context: Context, receiver: JSAny, arguments: Arguments): Number {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
- // 2. Let len be ? ToLength(? Get(O, "length")).
- const length: Number = GetLengthProperty(object);
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(object);
- // 3. Let argCount be the number of actual arguments.
- const argCount: Smi = Convert<Smi>(arguments.length);
+ // 3. Let argCount be the number of actual arguments.
+ const argCount: Smi = Convert<Smi>(arguments.length);
- // 4. If argCount > 0, then.
- if (argCount > 0) {
- // a. If len + argCount > 2**53 - 1, throw a TypeError exception.
- if (length + argCount > kMaxSafeInteger) {
- ThrowTypeError(MessageTemplate::kInvalidArrayLength);
- }
-
- // b. Let k be len.
- let k: Number = length;
+ // 4. If argCount > 0, then.
+ if (argCount > 0) {
+ // a. If len + argCount > 2**53 - 1, throw a TypeError exception.
+ if (length + argCount > kMaxSafeInteger) {
+ ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+ }
- // c. Repeat, while k > 0.
- while (k > 0) {
- // i. Let from be ! ToString(k - 1).
- const from: Number = k - 1;
+ // b. Let k be len.
+ let k: Number = length;
- // ii. Let to be ! ToString(k + argCount - 1).
- const to: Number = k + argCount - 1;
+ // c. Repeat, while k > 0.
+ while (k > 0) {
+ // i. Let from be ! ToString(k - 1).
+ const from: Number = k - 1;
- // iii. Let fromPresent be ? HasProperty(O, from).
- const fromPresent: Boolean = HasProperty(object, from);
+ // ii. Let to be ! ToString(k + argCount - 1).
+ const to: Number = k + argCount - 1;
- // iv. If fromPresent is true, then
- if (fromPresent == True) {
- // 1. Let fromValue be ? Get(O, from).
- const fromValue: JSAny = GetProperty(object, from);
+ // iii. Let fromPresent be ? HasProperty(O, from).
+ const fromPresent: Boolean = HasProperty(object, from);
- // 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(object, to, fromValue);
- } else {
- // 1. Perform ? DeletePropertyOrThrow(O, to).
- DeleteProperty(object, to, LanguageMode::kStrict);
- }
+ // iv. If fromPresent is true, then
+ if (fromPresent == True) {
+ // 1. Let fromValue be ? Get(O, from).
+ const fromValue: JSAny = GetProperty(object, from);
- // vi. Decrease k by 1.
- --k;
+ // 2. Perform ? Set(O, to, fromValue, true).
+ SetProperty(object, to, fromValue);
+ } else {
+ // 1. Perform ? DeletePropertyOrThrow(O, to).
+ DeleteProperty(object, to, LanguageMode::kStrict);
}
- // d. Let j be 0.
- let j: Smi = 0;
+ // vi. Decrease k by 1.
+ --k;
+ }
- // e. Let items be a List whose elements are, in left to right order,
- // the arguments that were passed to this function invocation.
- // f. Repeat, while items is not empty
- while (j < argCount) {
- // ii .Perform ? Set(O, ! ToString(j), E, true).
- SetProperty(object, j, arguments[Convert<intptr>(j)]);
+ // d. Let j be 0.
+ let j: Smi = 0;
- // iii. Increase j by 1.
- ++j;
- }
+ // e. Let items be a List whose elements are, in left to right order,
+ // the arguments that were passed to this function invocation.
+ // f. Repeat, while items is not empty
+ while (j < argCount) {
+ // ii .Perform ? Set(O, ! ToString(j), E, true).
+ SetProperty(object, j, arguments[Convert<intptr>(j)]);
+
+ // iii. Increase j by 1.
+ ++j;
}
+ }
- // 5. Perform ? Set(O, "length", len + argCount, true).
- const newLength: Number = length + argCount;
- SetProperty(object, kLengthString, newLength);
+ // 5. Perform ? Set(O, "length", len + argCount, true).
+ const newLength: Number = length + argCount;
+ SetProperty(object, kLengthString, newLength);
- // 6. Return length + argCount.
- return newLength;
- }
+ // 6. Return length + argCount.
+ return newLength;
+}
- // https://tc39.github.io/ecma262/#sec-array.prototype.unshift
- transitioning javascript builtin ArrayPrototypeUnshift(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- try {
- const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
- array::EnsureWriteableFastElements(array);
-
- const map: Map = array.map;
- if (!IsExtensibleMap(map)) goto Slow;
- EnsureArrayLengthWritable(map) otherwise Slow;
-
- tail ArrayUnshift(
- context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
- }
- label Slow {
- return GenericArrayUnshift(context, receiver, arguments);
- }
+// https://tc39.github.io/ecma262/#sec-array.prototype.unshift
+transitioning javascript builtin ArrayPrototypeUnshift(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ try {
+ const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ array::EnsureWriteableFastElements(array);
+
+ const map: Map = array.map;
+ if (!IsExtensibleMap(map)) goto Slow;
+ EnsureArrayLengthWritable(map) otherwise Slow;
+
+ tail ArrayUnshift(
+ context, LoadTargetFromFrame(), Undefined,
+ Convert<int32>(arguments.length));
+ } label Slow {
+ return GenericArrayUnshift(context, receiver, arguments);
}
}
+}
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index a5ffcfe40e..92b2c520e2 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -5,78 +5,85 @@
#include 'src/builtins/builtins-array-gen.h'
namespace array {
- // Naming convention from elements.cc. We have a similar intent but implement
- // fastpaths using generics instead of using a class hierarchy for elements
- // kinds specific implementations.
- type GenericElementsAccessor extends ElementsKind;
- type FastPackedSmiElements extends ElementsKind;
- type FastPackedObjectElements extends ElementsKind;
- type FastPackedDoubleElements extends ElementsKind;
- type FastSmiOrObjectElements extends ElementsKind;
- type FastDoubleElements extends ElementsKind;
- type DictionaryElements extends ElementsKind;
+// Naming convention from elements.cc. We have a similar intent but implement
+// fastpaths using generics instead of using a class hierarchy for elements
+// kinds specific implementations.
+type GenericElementsAccessor extends ElementsKind;
+type FastPackedSmiElements extends ElementsKind;
+type FastPackedObjectElements extends ElementsKind;
+type FastPackedDoubleElements extends ElementsKind;
+type FastSmiOrObjectElements extends ElementsKind;
+type FastDoubleElements extends ElementsKind;
+type DictionaryElements extends ElementsKind;
- macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray) {
- assert(IsFastElementsKind(array.map.elements_kind));
+macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray) {
+ assert(IsFastElementsKind(array.map.elements_kind));
- const elements: FixedArrayBase = array.elements;
- if (elements.map != kCOWMap) return;
+ const elements: FixedArrayBase = array.elements;
+ if (elements.map != kCOWMap) return;
- // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
- // extract FixedArrays and don't have to worry about FixedDoubleArrays.
- assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
+ // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
+ // extract FixedArrays and don't have to worry about FixedDoubleArrays.
+ assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
- const length =
- Convert<intptr>(Cast<Smi>(array.length) otherwise unreachable);
- array.elements =
- ExtractFixedArray(UnsafeCast<FixedArray>(elements), 0, length, length);
- assert(array.elements.map != kCOWMap);
- }
+ const length = Convert<intptr>(Cast<Smi>(array.length) otherwise unreachable);
+ array.elements =
+ ExtractFixedArray(UnsafeCast<FixedArray>(elements), 0, length, length);
+ assert(array.elements.map != kCOWMap);
+}
- macro LoadElementOrUndefined(implicit context:
- Context)(a: FixedArray, i: Smi): JSAny {
- const e = UnsafeCast<(JSAny | TheHole)>(a.objects[i]);
- return ReplaceTheHoleWithUndefined(e);
- }
+macro LoadElementOrUndefined(implicit context: Context)(
+ a: FixedArray, i: Smi): JSAny {
+ const e = UnsafeCast<(JSAny | TheHole)>(a.objects[i]);
+ return ReplaceTheHoleWithUndefined(e);
+}
- macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
- const f: float64 = a.floats[i].Value() otherwise return Undefined;
- return AllocateHeapNumberWithValue(f);
- }
+macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
+ const f: float64 = a.floats[i].Value() otherwise return Undefined;
+ return AllocateHeapNumberWithValue(f);
+}
- macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
- elements.floats[k] = kDoubleHole;
- }
+macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
+ elements.floats[k] = kDoubleHole;
+}
- macro StoreArrayHole(elements: FixedArray, k: Smi): void {
- elements.objects[k] = TheHole;
- }
+macro StoreArrayHole(elements: FixedArray, k: Smi): void {
+ elements.objects[k] = TheHole;
+}
- extern macro SetPropertyLength(implicit context: Context)(JSAny, Number);
+extern macro SetPropertyLength(implicit context: Context)(JSAny, Number);
- const kLengthDescriptorIndex:
- constexpr int31 generates 'JSArray::kLengthDescriptorIndex';
- const kAttributesReadOnlyMask: constexpr int31
- generates 'PropertyDetails::kAttributesReadOnlyMask';
+const kLengthDescriptorIndex:
+ constexpr int31 generates 'JSArray::kLengthDescriptorIndex';
+const kAttributesReadOnlyMask: constexpr int31
+ generates 'PropertyDetails::kAttributesReadOnlyMask';
- @export
- macro EnsureArrayLengthWritable(implicit context: Context)(map: Map):
- void labels Bailout {
- // Don't support arrays in dictionary named property mode.
- if (IsDictionaryMap(map)) {
- goto Bailout;
- }
+@export
+macro EnsureArrayLengthWritable(implicit context: Context)(map: Map):
+ void labels Bailout {
+ // Don't support arrays in dictionary named property mode.
+ if (IsDictionaryMap(map)) {
+ goto Bailout;
+ }
- // Check whether the length property is writable. The length property is the
- // only default named property on arrays. It's nonconfigurable, hence is
- // guaranteed to stay the first property.
- const descriptors: DescriptorArray = map.instance_descriptors;
- const descriptor:&DescriptorEntry =
- & descriptors.descriptors[kLengthDescriptorIndex];
- assert(TaggedEqual(descriptor->key, LengthStringConstant()));
- const details: Smi = UnsafeCast<Smi>(descriptor->details);
- if ((details & kAttributesReadOnlyMask) != 0) {
- goto Bailout;
- }
+ // Check whether the length property is writable. The length property is the
+ // only default named property on arrays. It's nonconfigurable, hence is
+ // guaranteed to stay the first property.
+ const descriptors: DescriptorArray = map.instance_descriptors;
+ const descriptor:&DescriptorEntry =
+ & descriptors.descriptors[kLengthDescriptorIndex];
+ assert(TaggedEqual(descriptor->key, LengthStringConstant()));
+ const details: Smi = UnsafeCast<Smi>(descriptor->details);
+ if ((details & kAttributesReadOnlyMask) != 0) {
+ goto Bailout;
}
}
+
+macro CreateJSArrayWithElements(implicit context: Context)(array: FixedArray):
+ JSArray {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ const map: Map =
+ LoadJSArrayElementsMap(ElementsKind::PACKED_ELEMENTS, nativeContext);
+ return AllocateJSArray(map, array, array.length);
+}
+}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 7d87a55e88..1d2c454646 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -52,6 +52,7 @@ extern macro MakeWeak(HeapObject): WeakHeapObject;
extern macro GetHeapObjectAssumeWeak(WeakHeapObject):
HeapObject labels ClearedWeakPointer;
extern macro IsWeakOrCleared(MaybeObject): bool;
+extern macro IsWeakReferenceToObject(MaybeObject, Object): bool;
macro StrongToWeak<T: type>(x: T): Weak<T> {
return %RawDownCast<Weak<T>>(MakeWeak(x));
@@ -147,6 +148,8 @@ type ObjectHashTable extends HashTable
extern class NumberDictionary extends HashTable;
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
+type ExternalPointer
+ generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
extern class Code extends HeapObject;
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
@@ -253,10 +256,12 @@ constexpr 'CodeStubAssembler::ExtractFixedArrayFlag' {
const kBigIntMaxLength: constexpr intptr generates 'BigInt::kMaxLength';
extern enum MessageTemplate {
+ kAllPromisesRejected,
kInvalidArrayBufferLength,
kInvalidArrayLength,
kInvalidIndex,
kNotConstructor,
+ kNotGeneric,
kCalledNonCallable,
kCalledOnNullOrUndefined,
kProtoObjectOrNull,
@@ -287,12 +292,11 @@ extern enum MessageTemplate {
kPromiseNonCallable,
kNotAPromise,
kResolverNotAFunction,
- kTooManyElementsInPromiseAll,
+ kTooManyElementsInPromiseCombinator,
kToRadixFormatRange,
kCalledOnNonObject,
kRegExpGlobalInvokedOnNonGlobal,
kProxyNonObject,
- kProxyHandlerOrTargetRevoked,
kProxyRevoked,
kProxyTrapReturnedFalsishFor,
kProxyPrivate,
@@ -303,6 +307,24 @@ extern enum MessageTemplate {
kProxyGetPrototypeOfNonExtensible,
kProxySetPrototypeOfNonExtensible,
kProxyDeletePropertyNonExtensible,
+ kWeakRefsCleanupMustBeCallable,
+ kWasmTrapUnreachable,
+ kWasmTrapMemOutOfBounds,
+ kWasmTrapUnalignedAccess,
+ kWasmTrapDivByZero,
+ kWasmTrapDivUnrepresentable,
+ kWasmTrapRemByZero,
+ kWasmTrapFloatUnrepresentable,
+ kWasmTrapFuncInvalid,
+ kWasmTrapFuncSigMismatch,
+ kWasmTrapDataSegmentDropped,
+ kWasmTrapElemSegmentDropped,
+ kWasmTrapTableOutOfBounds,
+ kWasmTrapBrOnExnNullRef,
+ kWasmTrapRethrowNullRef,
+ kWasmTrapNullDereference,
+ kWasmTrapIllegalCast,
+ kWasmTrapArrayOutOfBounds,
...
}
@@ -329,6 +351,8 @@ const kStringMaxLengthUintptr:
constexpr uintptr generates 'String::kMaxLength';
const kFixedArrayMaxLength:
constexpr int31 generates 'FixedArray::kMaxLength';
+const kFixedDoubleArrayMaxLength:
+ constexpr int31 generates 'FixedDoubleArray::kMaxLength';
const kObjectAlignmentMask: constexpr intptr
generates 'kObjectAlignmentMask';
const kMinAddedElementsCapacity:
@@ -363,19 +387,20 @@ type Boolean = True|False;
type NumberOrUndefined = Number|Undefined;
-extern macro TheHoleConstant(): TheHole;
-extern macro NullConstant(): Null;
-extern macro UndefinedConstant(): Undefined;
-extern macro TrueConstant(): True;
+extern macro EmptyStringConstant(): EmptyString;
extern macro FalseConstant(): False;
-extern macro Int32TrueConstant(): bool;
extern macro Int32FalseConstant(): bool;
-extern macro EmptyStringConstant(): EmptyString;
-extern macro LengthStringConstant(): String;
-extern macro NanConstant(): NaN;
+extern macro Int32TrueConstant(): bool;
extern macro IteratorSymbolConstant(): PublicSymbol;
+extern macro LengthStringConstant(): String;
extern macro MatchSymbolConstant(): Symbol;
+extern macro MessageStringConstant(): String;
+extern macro NanConstant(): NaN;
+extern macro NullConstant(): Null;
extern macro ReturnStringConstant(): String;
+extern macro TheHoleConstant(): TheHole;
+extern macro TrueConstant(): True;
+extern macro UndefinedConstant(): Undefined;
const TheHole: TheHole = TheHoleConstant();
const Null: Null = NullConstant();
@@ -384,6 +409,7 @@ const True: True = TrueConstant();
const False: False = FalseConstant();
const kEmptyString: EmptyString = EmptyStringConstant();
const kLengthString: String = LengthStringConstant();
+const kMessageString: String = MessageStringConstant();
const kReturnString: String = ReturnStringConstant();
const kNaN: NaN = NanConstant();
@@ -491,9 +517,9 @@ extern transitioning macro ToThisValue(implicit context: Context)(
extern transitioning macro GetProperty(implicit context: Context)(
JSAny, JSAny): JSAny;
extern transitioning builtin SetProperty(implicit context: Context)(
- JSAny, JSAny, JSAny);
+ JSAny, JSAny, JSAny): JSAny;
extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
- JSAny, JSAny, JSAny);
+ JSAny, JSAny, JSAny): JSAny;
extern transitioning builtin DeleteProperty(implicit context: Context)(
JSAny, JSAny | PrivateSymbol, LanguageModeSmi): Boolean;
extern transitioning builtin HasProperty(implicit context: Context)(
@@ -502,8 +528,6 @@ extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, JSAny): Boolean;
extern builtin LoadIC(
Context, JSAny, JSAny, TaggedIndex, FeedbackVector): JSAny;
-extern macro CollectCallFeedback(
- JSAny, Context, Undefined | FeedbackVector, uintptr);
extern macro ThrowRangeError(implicit context: Context)(
constexpr MessageTemplate): never;
@@ -640,6 +664,7 @@ extern macro IsFastAliasedArgumentsMap(implicit context: Context)(Map): bool;
extern macro IsSlowAliasedArgumentsMap(implicit context: Context)(Map): bool;
extern macro IsSloppyArgumentsMap(implicit context: Context)(Map): bool;
extern macro IsStrictArgumentsMap(implicit context: Context)(Map): bool;
+extern macro IsTuple2Map(Map): bool;
extern macro SmiAbove(Smi, Smi): bool;
@@ -701,7 +726,8 @@ macro Float64IsNaN(n: float64): bool {
}
// The type of all tagged values that can safely be compared with TaggedEqual.
-type TaggedWithIdentity = JSReceiver|FixedArrayBase|Oddball|Map|EmptyString;
+type TaggedWithIdentity =
+ JSReceiver|FixedArrayBase|Oddball|Map|WeakCell|Context|EmptyString;
extern operator '==' macro TaggedEqual(TaggedWithIdentity, Object): bool;
extern operator '==' macro TaggedEqual(Object, TaggedWithIdentity): bool;
@@ -877,10 +903,14 @@ extern macro TruncateIntPtrToInt32(intptr): int32;
extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
extern macro SmiFromUint32(uint32): Smi;
+extern macro SmiFromIntPtr(intptr): Smi;
extern macro SmiUntag(Smi): intptr;
macro SmiUntag<T: type>(value: SmiTagged<T>): T {
return %RawDownCast<T>(Unsigned(SmiToInt32(Convert<Smi>(value))));
}
+macro SmiTag<T : type extends uint31>(value: T): SmiTagged<T> {
+ return %RawDownCast<SmiTagged<T>>(SmiFromUint32(value));
+}
extern macro SmiToInt32(Smi): int32;
extern macro TaggedIndexToIntPtr(TaggedIndex): intptr;
extern macro IntPtrToTaggedIndex(intptr): TaggedIndex;
@@ -889,9 +919,13 @@ extern macro SmiToTaggedIndex(Smi): TaggedIndex;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
+extern macro ChangeTaggedNonSmiToInt32(implicit context: Context)(JSAnyNotSmi):
+ int32;
+extern macro ChangeTaggedToFloat64(implicit context: Context)(JSAny): float64;
extern macro ChangeFloat64ToTagged(float64): Number;
extern macro ChangeFloat64ToUintPtr(float64): uintptr;
extern macro ChangeFloat64ToIntPtr(float64): intptr;
+extern macro ChangeInt32ToFloat64(int32): float64;
extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
@@ -1241,8 +1275,7 @@ macro ChangeUintPtrNumberToUintPtr(value: Number): uintptr {
try {
return TryNumberToUintPtr(value, kModeValueIsSafeIntegerUintPtr)
otherwise InvalidValue, InvalidValue, InvalidValue;
- }
- label InvalidValue {
+ } label InvalidValue {
unreachable;
}
}
@@ -1253,8 +1286,7 @@ macro ChangeSafeIntegerNumberToUintPtr(value: Number):
try {
return TryNumberToUintPtr(value, kModeValueIsSafeInteger)
otherwise InvalidValue, IfUIntPtrOverflow, InvalidValue;
- }
- label InvalidValue {
+ } label InvalidValue {
unreachable;
}
}
@@ -1300,8 +1332,7 @@ transitioning macro GetLengthProperty(implicit context: Context)(o: JSAny):
goto ToLength(GetProperty(o, kLengthString));
}
}
- }
- label ToLength(length: JSAny) deferred {
+ } label ToLength(length: JSAny) deferred {
return ToLength_Inline(length);
}
}
@@ -1321,8 +1352,7 @@ transitioning macro GetMethod(implicit context: Context)(
o: JSAny, name: String): Callable labels IfNullOrUndefined {
try {
return GetMethod(o, name) otherwise IfNullOrUndefined, IfMethodNotCallable;
- }
- label IfMethodNotCallable(value: JSAny) deferred {
+ } label IfMethodNotCallable(value: JSAny) deferred {
ThrowTypeError(MessageTemplate::kPropertyNotFunction, value, name, o);
}
}
@@ -1428,7 +1458,7 @@ macro ClampToIndexRange(indexNumber: Number, limit: uintptr): uintptr {
}
}
-extern builtin ObjectToString(Context, JSAny): JSAny;
+extern builtin ObjectToString(Context, JSAny): String;
extern builtin StringRepeat(Context, String, Number): String;
@export
@@ -1485,8 +1515,8 @@ extern transitioning runtime
CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny);
namespace runtime {
- extern runtime
- GetDerivedMap(Context, JSFunction, JSReceiver): Map;
+extern runtime
+GetDerivedMap(Context, JSFunction, JSReceiver): Map;
}
transitioning builtin FastCreateDataProperty(implicit context: Context)(
@@ -1537,8 +1567,7 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
elements[index] = value;
}
}
- }
- label Slow {
+ } label Slow {
CreateDataProperty(receiver, key, value);
}
return Undefined;
diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq
index 02dca7543e..d52de7f84e 100644
--- a/deps/v8/src/builtins/bigint.tq
+++ b/deps/v8/src/builtins/bigint.tq
@@ -23,225 +23,218 @@ Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
namespace bigint {
- const kPositiveSign: uint32 = 0;
- const kNegativeSign: uint32 = 1;
+const kPositiveSign: uint32 = 0;
+const kNegativeSign: uint32 = 1;
+
+extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare(
+ BigIntBase, BigIntBase): int32;
+
+extern macro BigIntBuiltinsAssembler::ReadBigIntSign(BigIntBase): uint32;
+extern macro BigIntBuiltinsAssembler::ReadBigIntLength(BigIntBase): intptr;
+extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength(
+ MutableBigInt, uint32, intptr): void;
+
+extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt;
+extern macro CodeStubAssembler::StoreBigIntDigit(
+ MutableBigInt, intptr, uintptr): void;
+extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr;
+
+macro IsCanonicalized(bigint: BigIntBase): bool {
+ const length = ReadBigIntLength(bigint);
+
+ if (length == 0) {
+ return ReadBigIntSign(bigint) == kPositiveSign;
+ }
+
+ return LoadBigIntDigit(bigint, length - 1) != 0;
+}
+
+macro InvertSign(sign: uint32): uint32 {
+ return sign == kPositiveSign ? kNegativeSign : kPositiveSign;
+}
+
+macro AllocateEmptyBigIntNoThrow(implicit context: Context)(
+ sign: uint32, length: intptr): MutableBigInt labels BigIntTooBig {
+ if (length > kBigIntMaxLength) {
+ goto BigIntTooBig;
+ }
+ const result: MutableBigInt = AllocateBigInt(length);
+
+ WriteBigIntSignAndLength(result, sign, length);
+ return result;
+}
+
+macro AllocateEmptyBigInt(implicit context: Context)(
+ sign: uint32, length: intptr): MutableBigInt {
+ try {
+ return AllocateEmptyBigIntNoThrow(sign, length) otherwise BigIntTooBig;
+ } label BigIntTooBig {
+ ThrowRangeError(MessageTemplate::kBigIntTooBig);
+ }
+}
+
+macro MutableBigIntAbsoluteCompare(x: BigIntBase, y: BigIntBase): int32 {
+ return CppAbsoluteCompare(x, y);
+}
+
+macro MutableBigIntAbsoluteSub(implicit context: Context)(
+ x: BigInt, y: BigInt, resultSign: uint32): BigInt {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+ const xsign = ReadBigIntSign(x);
+
+ assert(MutableBigIntAbsoluteCompare(x, y) >= 0);
+ if (xlength == 0) {
+ assert(ylength == 0);
+ return x;
+ }
+
+ if (ylength == 0) {
+ return resultSign == xsign ? x : BigIntUnaryMinus(x);
+ }
+
+ const result = AllocateEmptyBigInt(resultSign, xlength);
+ CppAbsoluteSubAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+}
+
+macro MutableBigIntAbsoluteAdd(implicit context: Context)(
+ xBigint: BigInt, yBigint: BigInt,
+ resultSign: uint32): BigInt labels BigIntTooBig {
+ let xlength = ReadBigIntLength(xBigint);
+ let ylength = ReadBigIntLength(yBigint);
+
+ let x = xBigint;
+ let y = yBigint;
+ if (xlength < ylength) {
+ // Swap x and y so that x is longer.
+ x = yBigint;
+ y = xBigint;
+ const tempLength = xlength;
+ xlength = ylength;
+ ylength = tempLength;
+ }
+
+ // case: 0n + 0n
+ if (xlength == 0) {
+ assert(ylength == 0);
+ return x;
+ }
+
+ // case: x + 0n
+ if (ylength == 0) {
+ return resultSign == ReadBigIntSign(x) ? x : BigIntUnaryMinus(x);
+ }
- extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize(
- MutableBigInt, BigIntBase, BigIntBase): void;
- extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize(
- MutableBigInt, BigIntBase, BigIntBase): void;
- extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare(
- BigIntBase, BigIntBase): int32;
-
- extern macro BigIntBuiltinsAssembler::ReadBigIntSign(BigIntBase): uint32;
- extern macro BigIntBuiltinsAssembler::ReadBigIntLength(BigIntBase): intptr;
- extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength(
- MutableBigInt, uint32, intptr): void;
+ // case: x + y
+ const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + 1)
+ otherwise BigIntTooBig;
+ CppAbsoluteAddAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+}
+
+macro BigIntAddImpl(implicit context: Context)(x: BigInt, y: BigInt): BigInt
+ labels BigIntTooBig {
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+ if (xsign == ysign) {
+ // x + y == x + y
+ // -x + -y == -(x + y)
+ return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig;
+ }
+
+ // x + -y == x - y == -(y - x)
+ // -x + y == y - x == -(x - y)
+ if (MutableBigIntAbsoluteCompare(x, y) >= 0) {
+ return MutableBigIntAbsoluteSub(x, y, xsign);
+ }
+ return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign));
+}
+
+builtin BigIntAddNoThrow(implicit context: Context)(
+ x: BigInt, y: BigInt): Numeric {
+ try {
+ return BigIntAddImpl(x, y) otherwise BigIntTooBig;
+ } label BigIntTooBig {
+ // Smi sentinal is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+}
+
+builtin BigIntAdd(implicit context: Context)(
+ xNum: Numeric, yNum: Numeric): BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntAddImpl(x, y) otherwise BigIntTooBig;
+ } label MixedTypes {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ } label BigIntTooBig {
+ ThrowRangeError(MessageTemplate::kBigIntTooBig);
+ }
+}
+
+macro BigIntSubtractImpl(implicit context: Context)(
+ x: BigInt, y: BigInt): BigInt labels BigIntTooBig {
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+ if (xsign != ysign) {
+ // x - (-y) == x + y
+ // (-x) - y == -(x + y)
+ return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig;
+ }
- extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt;
- extern macro CodeStubAssembler::StoreBigIntDigit(
- MutableBigInt, intptr, uintptr): void;
- extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr;
-
- macro IsCanonicalized(bigint: BigIntBase): bool {
- const length = ReadBigIntLength(bigint);
-
- if (length == 0) {
- return ReadBigIntSign(bigint) == kPositiveSign;
- }
-
- return LoadBigIntDigit(bigint, length - 1) != 0;
- }
-
- macro InvertSign(sign: uint32): uint32 {
- return sign == kPositiveSign ? kNegativeSign : kPositiveSign;
- }
-
- macro AllocateEmptyBigIntNoThrow(implicit context: Context)(
- sign: uint32, length: intptr): MutableBigInt labels BigIntTooBig {
- if (length > kBigIntMaxLength) {
- goto BigIntTooBig;
- }
- const result: MutableBigInt = AllocateBigInt(length);
-
- WriteBigIntSignAndLength(result, sign, length);
- return result;
- }
-
- macro AllocateEmptyBigInt(implicit context: Context)(
- sign: uint32, length: intptr): MutableBigInt {
- try {
- return AllocateEmptyBigIntNoThrow(sign, length) otherwise BigIntTooBig;
- }
- label BigIntTooBig {
- ThrowRangeError(MessageTemplate::kBigIntTooBig);
- }
- }
-
- macro MutableBigIntAbsoluteCompare(x: BigIntBase, y: BigIntBase): int32 {
- return CppAbsoluteCompare(x, y);
- }
-
- macro MutableBigIntAbsoluteSub(implicit context: Context)(
- x: BigInt, y: BigInt, resultSign: uint32): BigInt {
- const xlength = ReadBigIntLength(x);
- const ylength = ReadBigIntLength(y);
- const xsign = ReadBigIntSign(x);
-
- assert(MutableBigIntAbsoluteCompare(x, y) >= 0);
- if (xlength == 0) {
- assert(ylength == 0);
- return x;
- }
-
- if (ylength == 0) {
- return resultSign == xsign ? x : BigIntUnaryMinus(x);
- }
-
- const result = AllocateEmptyBigInt(resultSign, xlength);
- CppAbsoluteSubAndCanonicalize(result, x, y);
- return Convert<BigInt>(result);
- }
-
- macro MutableBigIntAbsoluteAdd(implicit context: Context)(
- xBigint: BigInt, yBigint: BigInt,
- resultSign: uint32): BigInt labels BigIntTooBig {
- let xlength = ReadBigIntLength(xBigint);
- let ylength = ReadBigIntLength(yBigint);
-
- let x = xBigint;
- let y = yBigint;
- if (xlength < ylength) {
- // Swap x and y so that x is longer.
- x = yBigint;
- y = xBigint;
- const tempLength = xlength;
- xlength = ylength;
- ylength = tempLength;
- }
-
- // case: 0n + 0n
- if (xlength == 0) {
- assert(ylength == 0);
- return x;
- }
-
- // case: x + 0n
- if (ylength == 0) {
- return resultSign == ReadBigIntSign(x) ? x : BigIntUnaryMinus(x);
- }
-
- // case: x + y
- const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + 1)
- otherwise BigIntTooBig;
- CppAbsoluteAddAndCanonicalize(result, x, y);
- return Convert<BigInt>(result);
- }
-
- macro BigIntAddImpl(implicit context: Context)(x: BigInt, y: BigInt): BigInt
- labels BigIntTooBig {
- const xsign = ReadBigIntSign(x);
- const ysign = ReadBigIntSign(y);
- if (xsign == ysign) {
- // x + y == x + y
- // -x + -y == -(x + y)
- return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig;
- }
-
- // x + -y == x - y == -(y - x)
- // -x + y == y - x == -(x - y)
- if (MutableBigIntAbsoluteCompare(x, y) >= 0) {
- return MutableBigIntAbsoluteSub(x, y, xsign);
- }
- return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign));
- }
-
- builtin BigIntAddNoThrow(implicit context: Context)(x: BigInt, y: BigInt):
- Numeric {
- try {
- return BigIntAddImpl(x, y) otherwise BigIntTooBig;
- }
- label BigIntTooBig {
- // Smi sentinal is used to signal BigIntTooBig exception.
- return Convert<Smi>(0);
- }
- }
-
- builtin BigIntAdd(implicit context: Context)(xNum: Numeric, yNum: Numeric):
- BigInt {
- try {
- const x = Cast<BigInt>(xNum) otherwise MixedTypes;
- const y = Cast<BigInt>(yNum) otherwise MixedTypes;
-
- return BigIntAddImpl(x, y) otherwise BigIntTooBig;
- }
- label MixedTypes {
- ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
- }
- label BigIntTooBig {
- ThrowRangeError(MessageTemplate::kBigIntTooBig);
- }
- }
-
- macro BigIntSubtractImpl(implicit context: Context)(x: BigInt, y: BigInt):
- BigInt labels BigIntTooBig {
- const xsign = ReadBigIntSign(x);
- const ysign = ReadBigIntSign(y);
- if (xsign != ysign) {
- // x - (-y) == x + y
- // (-x) - y == -(x + y)
- return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig;
- }
-
- // x - y == -(y - x)
- // (-x) - (-y) == y - x == -(x - y)
- if (MutableBigIntAbsoluteCompare(x, y) >= 0) {
- return MutableBigIntAbsoluteSub(x, y, xsign);
- }
- return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign));
- }
-
- builtin BigIntSubtractNoThrow(implicit context:
- Context)(x: BigInt, y: BigInt): Numeric {
- try {
- return BigIntSubtractImpl(x, y) otherwise BigIntTooBig;
- }
- label BigIntTooBig {
- // Smi sentinal is used to signal BigIntTooBig exception.
- return Convert<Smi>(0);
- }
- }
-
- builtin BigIntSubtract(implicit context:
- Context)(xNum: Numeric, yNum: Numeric): BigInt {
- try {
- const x = Cast<BigInt>(xNum) otherwise MixedTypes;
- const y = Cast<BigInt>(yNum) otherwise MixedTypes;
-
- return BigIntSubtractImpl(x, y) otherwise BigIntTooBig;
- }
- label MixedTypes {
- ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
- }
- label BigIntTooBig {
- ThrowRangeError(MessageTemplate::kBigIntTooBig);
- }
- }
-
- builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt {
- const length = ReadBigIntLength(bigint);
-
- // There is no -0n.
- if (length == 0) {
- return bigint;
- }
-
- const result =
- AllocateEmptyBigInt(InvertSign(ReadBigIntSign(bigint)), length);
- for (let i: intptr = 0; i < length; ++i) {
- StoreBigIntDigit(result, i, LoadBigIntDigit(bigint, i));
- }
- return Convert<BigInt>(result);
+ // x - y == -(y - x)
+ // (-x) - (-y) == y - x == -(x - y)
+ if (MutableBigIntAbsoluteCompare(x, y) >= 0) {
+ return MutableBigIntAbsoluteSub(x, y, xsign);
}
+ return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign));
+}
+
+builtin BigIntSubtractNoThrow(implicit context: Context)(
+ x: BigInt, y: BigInt): Numeric {
+ try {
+ return BigIntSubtractImpl(x, y) otherwise BigIntTooBig;
+ } label BigIntTooBig {
+ // Smi sentinal is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+}
+
+builtin BigIntSubtract(implicit context: Context)(
+ xNum: Numeric, yNum: Numeric): BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntSubtractImpl(x, y) otherwise BigIntTooBig;
+ } label MixedTypes {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ } label BigIntTooBig {
+ ThrowRangeError(MessageTemplate::kBigIntTooBig);
+ }
+}
+
+builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt {
+ const length = ReadBigIntLength(bigint);
+
+ // There is no -0n.
+ if (length == 0) {
+ return bigint;
+ }
+
+ const result =
+ AllocateEmptyBigInt(InvertSign(ReadBigIntSign(bigint)), length);
+ for (let i: intptr = 0; i < length; ++i) {
+ StoreBigIntDigit(result, i, LoadBigIntDigit(bigint, i));
+ }
+ return Convert<BigInt>(result);
+}
} // namespace bigint
diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq
index c8f0d8134a..40a011d4e0 100644
--- a/deps/v8/src/builtins/boolean.tq
+++ b/deps/v8/src/builtins/boolean.tq
@@ -3,43 +3,43 @@
// found in the LICENSE file.
namespace boolean {
- transitioning macro ThisBooleanValue(implicit context: Context)(
- receiver: JSAny, method: constexpr string): Boolean {
- return UnsafeCast<Boolean>(
- ToThisValue(receiver, PrimitiveType::kBoolean, method));
- }
+transitioning macro ThisBooleanValue(implicit context: Context)(
+ receiver: JSAny, method: constexpr string): Boolean {
+ return UnsafeCast<Boolean>(
+ ToThisValue(receiver, PrimitiveType::kBoolean, method));
+}
- javascript builtin
- BooleanConstructor(
- js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
- target: JSFunction)(...arguments): JSAny {
- const value = SelectBooleanConstant(ToBoolean(arguments[0]));
+javascript builtin
+BooleanConstructor(
+ js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
+ target: JSFunction)(...arguments): JSAny {
+ const value = SelectBooleanConstant(ToBoolean(arguments[0]));
- if (newTarget == Undefined) {
- return value;
- }
+ if (newTarget == Undefined) {
+ return value;
+ }
- const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
+ const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
- const obj =
- UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
- obj.value = value;
- return obj;
- }
+ const obj =
+ UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
+ obj.value = value;
+ return obj;
+}
- // ES #sec-boolean.prototype.tostring
- transitioning javascript builtin BooleanPrototypeToString(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- // 1. Let b be ? thisBooleanValue(this value).
- const b = ThisBooleanValue(receiver, 'Boolean.prototype.toString');
- // 2. If b is true, return "true"; else return "false".
- return b.to_string;
- }
+// ES #sec-boolean.prototype.tostring
+transitioning javascript builtin BooleanPrototypeToString(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Let b be ? thisBooleanValue(this value).
+ const b = ThisBooleanValue(receiver, 'Boolean.prototype.toString');
+ // 2. If b is true, return "true"; else return "false".
+ return b.to_string;
+}
- // ES #sec-boolean.prototype.valueof
- transitioning javascript builtin BooleanPrototypeValueOf(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- // 1. Return ? thisBooleanValue(this value).
- return ThisBooleanValue(receiver, 'Boolean.prototype.valueOf');
- }
+// ES #sec-boolean.prototype.valueof
+transitioning javascript builtin BooleanPrototypeValueOf(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Return ? thisBooleanValue(this value).
+ return ThisBooleanValue(receiver, 'Boolean.prototype.valueOf');
+}
}
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 40accae57a..3c2fe33c5b 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/logging.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
@@ -471,6 +472,15 @@ BUILTIN(ArrayPop) {
uint32_t new_length = len - 1;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, JSReceiver::GetElement(isolate, array, new_length));
+
+ // The length could have become read-only during the last GetElement() call,
+ // so check again.
+ if (JSArray::HasReadOnlyLength(array)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kStrictReadOnlyProperty,
+ isolate->factory()->length_string(),
+ Object::TypeOf(isolate, array), array));
+ }
JSArray::SetLength(array, new_length);
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index aec64b9ccd..e84442295c 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -271,10 +271,12 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Goto(&after_debug_hook);
BIND(&after_debug_hook);
- Await(context, async_function_object, value, outer_promise,
- Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
- is_predicted_as_caught);
+ TNode<SharedFunctionInfo> on_resolve_sfi =
+ AsyncFunctionAwaitResolveSharedFunConstant();
+ TNode<SharedFunctionInfo> on_reject_sfi =
+ AsyncFunctionAwaitRejectSharedFunConstant();
+ Await(context, async_function_object, value, outer_promise, on_resolve_sfi,
+ on_reject_sfi, is_predicted_as_caught);
// Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator.
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 785408339e..383289fd0f 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -27,8 +27,8 @@ class ValueUnwrapContext {
TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TNode<Context> context, TNode<JSGeneratorObject> generator,
TNode<Object> value, TNode<JSPromise> outer_promise,
- TNode<IntPtrT> on_resolve_context_index,
- TNode<IntPtrT> on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -90,12 +90,12 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
// Initialize resolve handler
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
+ on_resolve_sfi);
// Initialize reject handler
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
+ on_reject_sfi);
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
@@ -122,8 +122,8 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TNode<Context> context, TNode<JSGeneratorObject> generator,
TNode<JSPromise> promise, TNode<JSPromise> outer_promise,
- TNode<IntPtrT> on_resolve_context_index,
- TNode<IntPtrT> on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -161,12 +161,12 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
// Initialize resolve handler
TNode<HeapObject> on_resolve = InnerAllocate(base, kResolveClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
+ on_resolve_sfi);
// Initialize reject handler
TNode<HeapObject> on_reject = InnerAllocate(base, kRejectClosureOffset);
InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
+ on_reject_sfi);
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
@@ -190,8 +190,8 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TNode<Object> AsyncBuiltinsAssembler::Await(
TNode<Context> context, TNode<JSGeneratorObject> generator,
TNode<Object> value, TNode<JSPromise> outer_promise,
- TNode<IntPtrT> on_resolve_context_index,
- TNode<IntPtrT> on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught) {
TVARIABLE(Object, result);
Label if_old(this), if_new(this), done(this),
@@ -230,15 +230,14 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
}
BIND(&if_old);
- result = AwaitOld(context, generator, value, outer_promise,
- on_resolve_context_index, on_reject_context_index,
- is_predicted_as_caught);
+ result = AwaitOld(context, generator, value, outer_promise, on_resolve_sfi,
+ on_reject_sfi, is_predicted_as_caught);
Goto(&done);
BIND(&if_new);
- result = AwaitOptimized(context, generator, CAST(value), outer_promise,
- on_resolve_context_index, on_reject_context_index,
- is_predicted_as_caught);
+ result =
+ AwaitOptimized(context, generator, CAST(value), outer_promise,
+ on_resolve_sfi, on_reject_sfi, is_predicted_as_caught);
Goto(&done);
BIND(&done);
@@ -247,7 +246,7 @@ TNode<Object> AsyncBuiltinsAssembler::Await(
void AsyncBuiltinsAssembler::InitializeNativeClosure(
TNode<Context> context, TNode<NativeContext> native_context,
- TNode<HeapObject> function, TNode<IntPtrT> context_index) {
+ TNode<HeapObject> function, TNode<SharedFunctionInfo> shared_info) {
TNode<Map> function_map = CAST(LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
// Ensure that we don't have to initialize prototype_or_initial_map field of
@@ -265,8 +264,6 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
StoreObjectFieldRoot(function, JSFunction::kFeedbackCellOffset,
RootIndex::kManyClosuresCell);
- TNode<SharedFunctionInfo> shared_info =
- CAST(LoadContextElement(native_context, context_index));
StoreObjectFieldNoWriteBarrier(
function, JSFunction::kSharedFunctionInfoOffset, shared_info);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
@@ -286,8 +283,8 @@ TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
TNode<NativeContext> native_context, TNode<Oddball> done) {
const TNode<Map> map = CAST(LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
- const TNode<SharedFunctionInfo> on_fulfilled_shared = CAST(LoadContextElement(
- native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN));
+ const TNode<SharedFunctionInfo> on_fulfilled_shared =
+ AsyncIteratorValueUnwrapSharedFunConstant();
const TNode<Context> closure_context =
AllocateAsyncIteratorValueUnwrapContext(native_context, done);
return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 7b9c944f4a..833e78d45d 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -17,34 +17,23 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
protected:
// Perform steps to resume generator after `value` is resolved.
- // `on_reject_context_index` is an index into the Native Context, which should
- // point to a SharedFunctioninfo instance used to create the closure. The
- // value following the reject index should be a similar value for the resolve
- // closure. Returns the Promise-wrapped `value`.
+ // `on_reject` is the SharedFunctioninfo instance used to create the reject
+ // closure. `on_resolve` is the SharedFunctioninfo instance used to create the
+ // resolve closure. Returns the Promise-wrapped `value`.
TNode<Object> Await(TNode<Context> context,
TNode<JSGeneratorObject> generator, TNode<Object> value,
TNode<JSPromise> outer_promise,
- TNode<IntPtrT> on_resolve_context_index,
- TNode<IntPtrT> on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught);
TNode<Object> Await(TNode<Context> context,
TNode<JSGeneratorObject> generator, TNode<Object> value,
TNode<JSPromise> outer_promise,
- int on_resolve_context_index, int on_reject_context_index,
- TNode<Oddball> is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise,
- IntPtrConstant(on_resolve_context_index),
- IntPtrConstant(on_reject_context_index),
- is_predicted_as_caught);
- }
- TNode<Object> Await(TNode<Context> context,
- TNode<JSGeneratorObject> generator, TNode<Object> value,
- TNode<JSPromise> outer_promise,
- int on_resolve_context_index, int on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
bool is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise,
- on_resolve_context_index, on_reject_context_index,
- BooleanConstant(is_predicted_as_caught));
+ return Await(context, generator, value, outer_promise, on_resolve_sfi,
+ on_reject_sfi, BooleanConstant(is_predicted_as_caught));
}
// Return a new built-in function object as defined in
@@ -56,22 +45,22 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
void InitializeNativeClosure(TNode<Context> context,
TNode<NativeContext> native_context,
TNode<HeapObject> function,
- TNode<IntPtrT> context_index);
+ TNode<SharedFunctionInfo> shared_info);
TNode<Context> AllocateAsyncIteratorValueUnwrapContext(
TNode<NativeContext> native_context, TNode<Oddball> done);
TNode<Object> AwaitOld(TNode<Context> context,
TNode<JSGeneratorObject> generator,
TNode<Object> value, TNode<JSPromise> outer_promise,
- TNode<IntPtrT> on_resolve_context_index,
- TNode<IntPtrT> on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught);
TNode<Object> AwaitOptimized(TNode<Context> context,
TNode<JSGeneratorObject> generator,
TNode<JSPromise> promise,
TNode<JSPromise> outer_promise,
- TNode<IntPtrT> on_resolve_context_index,
- TNode<IntPtrT> on_reject_context_index,
+ TNode<SharedFunctionInfo> on_resolve_sfi,
+ TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught);
};
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 592400415b..2b6d720880 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -242,12 +242,10 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
request, AsyncGeneratorRequest::kPromiseOffset);
- const int resolve_index = Context::ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN;
- const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
SetGeneratorAwaiting(async_generator_object);
- Await(context, async_generator_object, value, outer_promise, resolve_index,
- reject_index, is_catchable);
+ Await(context, async_generator_object, value, outer_promise,
+ AsyncGeneratorAwaitResolveSharedFunConstant(),
+ AsyncGeneratorAwaitRejectSharedFunConstant(), is_catchable);
Return(UndefinedConstant());
}
@@ -573,12 +571,10 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
const TNode<JSPromise> outer_promise =
LoadPromiseFromAsyncGeneratorRequest(request);
- const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN;
- const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
SetGeneratorAwaiting(generator);
- Await(context, generator, value, outer_promise, on_resolve, on_reject,
- is_caught);
+ Await(context, generator, value, outer_promise,
+ AsyncGeneratorYieldResolveSharedFunConstant(),
+ AsyncGeneratorAwaitRejectSharedFunConstant(), is_caught);
Return(UndefinedConstant());
}
@@ -623,19 +619,17 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
Label perform_await(this);
- TVARIABLE(IntPtrT, var_on_resolve,
- IntPtrConstant(
- Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN));
- TVARIABLE(
- IntPtrT, var_on_reject,
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN));
+ TVARIABLE(SharedFunctionInfo, var_on_resolve,
+ AsyncGeneratorReturnClosedResolveSharedFunConstant());
+
+ TVARIABLE(SharedFunctionInfo, var_on_reject,
+ AsyncGeneratorReturnClosedRejectSharedFunConstant());
const TNode<Smi> state = LoadGeneratorState(generator);
GotoIf(IsGeneratorStateClosed(state), &perform_await);
- var_on_resolve =
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN);
- var_on_reject =
- IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN);
+ var_on_resolve = AsyncGeneratorReturnResolveSharedFunConstant();
+ var_on_reject = AsyncGeneratorAwaitRejectSharedFunConstant();
+
Goto(&perform_await);
BIND(&perform_await);
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 1b53e9ca8e..d457e03314 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -317,7 +317,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
BIND(&if_generic);
{
Label if_iterator_fn_not_callable(this, Label::kDeferred),
- if_iterator_is_null_or_undefined(this, Label::kDeferred);
+ if_iterator_is_null_or_undefined(this, Label::kDeferred),
+ throw_spread_error(this, Label::kDeferred);
+ TVARIABLE(Smi, message_id);
GotoIf(IsNullOrUndefined(spread), &if_iterator_is_null_or_undefined);
@@ -336,10 +338,18 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
&if_smiorobject, &if_double);
BIND(&if_iterator_fn_not_callable);
- ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
+ message_id = SmiConstant(
+ static_cast<int>(MessageTemplate::kIteratorSymbolNonCallable)),
+ Goto(&throw_spread_error);
BIND(&if_iterator_is_null_or_undefined);
- CallRuntime(Runtime::kThrowSpreadArgIsNullOrUndefined, context, spread);
+ message_id = SmiConstant(
+ static_cast<int>(MessageTemplate::kNotIterableNoSymbolLoad));
+ Goto(&throw_spread_error);
+
+ BIND(&throw_spread_error);
+ CallRuntime(Runtime::kThrowSpreadArgError, context, message_id.value(),
+ spread);
Unreachable();
}
@@ -565,7 +575,7 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
TNode<RawPtrT> callback =
- LoadObjectField<RawPtrT>(foreign, Foreign::kForeignAddressOffset);
+ DecodeExternalPointer(LoadForeignForeignAddress(foreign));
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index d1082291ef..5b7807ed4a 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -76,7 +76,11 @@ BUILTIN(CallSitePrototypeGetFunction) {
GetFrameIndex(isolate, recv));
StackFrameBase* frame = it.Frame();
- if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
+ if (frame->IsStrict() ||
+ (frame->GetFunction()->IsJSFunction() &&
+ JSFunction::cast(*frame->GetFunction()).shared().is_toplevel())) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
isolate->CountUsage(v8::Isolate::kCallSiteAPIGetFunctionSloppyCall);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index df0ebce993..2f0e5a7560 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -334,8 +334,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
}
BIND(&if_exception);
{
- iterator_assembler.IteratorCloseOnException(context, iterator,
- var_exception.value());
+ IteratorCloseOnException(context, iterator);
+ CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ Unreachable();
}
BIND(&exit);
}
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 01f43c7fd9..1718ea97ad 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -102,6 +102,7 @@ BUILTIN(DataViewConstructor) {
// 13. Set O's [[ByteOffset]] internal slot to offset.
Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
Handle<JSDataView>::cast(result)->set_data_pointer(
+ isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
// 14. Return O.
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 98c1343d2c..a320033035 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -50,11 +50,14 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(
BIND(&stamp_mismatch);
}
+ TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
TNode<Smi> field_index_smi = SmiConstant(field_index);
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::get_date_field_function());
TNode<Object> result = CAST(CallCFunction(
function, MachineType::AnyTagged(),
+ std::make_pair(MachineType::Pointer(), isolate_ptr),
std::make_pair(MachineType::AnyTagged(), date_receiver),
std::make_pair(MachineType::AnyTagged(), field_index_smi)));
Return(result);
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 7ed38062c8..84ddf55f6f 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -498,11 +498,6 @@ namespace internal {
CPP(ErrorConstructor) \
CPP(ErrorCaptureStackTrace) \
CPP(ErrorPrototypeToString) \
- CPP(MakeError) \
- CPP(MakeRangeError) \
- CPP(MakeSyntaxError) \
- CPP(MakeTypeError) \
- CPP(MakeURIError) \
\
/* Function */ \
CPP(FunctionConstructor) \
@@ -579,7 +574,9 @@ namespace internal {
/* IterableToList */ \
/* ES #sec-iterabletolist */ \
TFS(IterableToList, kIterable, kIteratorFn) \
+ TFS(IterableToFixedArray, kIterable, kIteratorFn) \
TFS(IterableToListWithSymbolLookup, kIterable) \
+ TFS(IterableToFixedArrayWithSymbolLookupSlow, kIterable) \
TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \
TFS(IterableToFixedArrayForWasm, kIterable, kExpectedLength) \
\
@@ -678,18 +675,12 @@ namespace internal {
TFJ(ObjectKeys, 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
- /* ES6 #sec-object.prototype.tostring */ \
- TFJ(ObjectPrototypeToString, 0, kReceiver) \
- /* ES6 #sec-object.prototype.valueof */ \
- TFJ(ObjectPrototypeValueOf, 0, kReceiver) \
/* ES6 #sec-object.prototype.hasownproperty */ \
TFJ(ObjectPrototypeHasOwnProperty, 1, kReceiver, kKey) \
TFJ(ObjectPrototypeIsPrototypeOf, 1, kReceiver, kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
- /* ES #sec-object.prototype.tolocalestring */ \
- TFJ(ObjectPrototypeToLocaleString, 0, kReceiver) \
CPP(ObjectSeal) \
TFS(ObjectToString, kReceiver) \
TFJ(ObjectValues, 1, kReceiver, kObject) \
@@ -702,9 +693,6 @@ namespace internal {
TFS(ForInEnumerate, kReceiver) \
TFS(ForInFilter, kKey, kObject) \
\
- /* Promise */ \
- CPP(IsPromise) \
- \
/* Reflect */ \
ASM(ReflectApply, JSTrampoline) \
ASM(ReflectConstruct, JSTrampoline) \
@@ -851,35 +839,17 @@ namespace internal {
/* Wasm */ \
ASM(WasmCompileLazy, Dummy) \
ASM(WasmDebugBreak, Dummy) \
+ TFC(WasmFloat32ToNumber, WasmFloat32ToNumber) \
+ TFC(WasmFloat64ToNumber, WasmFloat64ToNumber) \
+ TFS(WasmAllocateArray, kMapIndex, kLength, kElementSize) \
+ TFS(WasmAllocateStruct, kMapIndex) \
TFC(WasmAtomicNotify, WasmAtomicNotify) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \
- TFC(WasmMemoryGrow, WasmMemoryGrow) \
TFC(WasmTableInit, WasmTableInit) \
TFC(WasmTableCopy, WasmTableCopy) \
- TFC(WasmTableGet, WasmTableGet) \
- TFC(WasmTableSet, WasmTableSet) \
- TFC(WasmStackGuard, NoContext) \
- TFC(WasmStackOverflow, NoContext) \
- TFC(WasmThrow, WasmThrow) \
- TFC(WasmRethrow, WasmThrow) \
- TFS(WasmTraceMemory, kMemoryTracingInfo) \
- TFS(ThrowWasmTrapUnreachable) \
- TFS(ThrowWasmTrapMemOutOfBounds) \
- TFS(ThrowWasmTrapUnalignedAccess) \
- TFS(ThrowWasmTrapDivByZero) \
- TFS(ThrowWasmTrapDivUnrepresentable) \
- TFS(ThrowWasmTrapRemByZero) \
- TFS(ThrowWasmTrapFloatUnrepresentable) \
- TFS(ThrowWasmTrapFuncInvalid) \
- TFS(ThrowWasmTrapFuncSigMismatch) \
- TFS(ThrowWasmTrapDataSegmentDropped) \
- TFS(ThrowWasmTrapElemSegmentDropped) \
- TFS(ThrowWasmTrapTableOutOfBounds) \
- TFS(ThrowWasmTrapBrOnExnNullRef) \
- TFS(ThrowWasmTrapRethrowNullRef) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
@@ -976,8 +946,6 @@ namespace internal {
CPP(Trace) \
\
/* Weak refs */ \
- CPP(FinalizationRegistryCleanupIteratorNext) \
- CPP(FinalizationRegistryCleanupSome) \
CPP(FinalizationRegistryConstructor) \
CPP(FinalizationRegistryRegister) \
CPP(FinalizationRegistryUnregister) \
@@ -1164,6 +1132,7 @@ namespace internal {
V(AsyncGeneratorAwaitCaught) \
V(AsyncGeneratorAwaitUncaught) \
V(PromiseAll) \
+ V(PromiseAny) \
V(PromiseConstructor) \
V(PromiseConstructorLazyDeoptContinuation) \
V(PromiseFulfillReactionJob) \
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 3634362205..840298eacb 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -18,24 +18,9 @@ namespace internal {
// ES6 section 19.5.1.1 Error ( message )
BUILTIN(ErrorConstructor) {
HandleScope scope(isolate);
-
- FrameSkipMode mode = SKIP_FIRST;
- Handle<Object> caller;
-
- // When we're passed a JSFunction as new target, we can skip frames until that
- // specific function is seen instead of unconditionally skipping the first
- // frame.
- if (args.new_target()->IsJSFunction()) {
- mode = SKIP_UNTIL_SEEN;
- caller = args.new_target();
- }
-
RETURN_RESULT_OR_FAILURE(
- isolate,
- ErrorUtils::Construct(isolate, args.target(),
- Handle<Object>::cast(args.new_target()),
- args.atOrUndefined(isolate, 1), mode, caller,
- ErrorUtils::StackTraceCollection::kDetailed));
+ isolate, ErrorUtils::Construct(isolate, args.target(), args.new_target(),
+ args.atOrUndefined(isolate, 1)));
}
// static
@@ -85,53 +70,5 @@ BUILTIN(ErrorPrototypeToString) {
ErrorUtils::ToString(isolate, args.receiver()));
}
-namespace {
-
-Object MakeGenericError(Isolate* isolate, BuiltinArguments args,
- Handle<JSFunction> constructor) {
- Handle<Object> template_index = args.atOrUndefined(isolate, 1);
- Handle<Object> arg0 = args.atOrUndefined(isolate, 2);
- Handle<Object> arg1 = args.atOrUndefined(isolate, 3);
- Handle<Object> arg2 = args.atOrUndefined(isolate, 4);
-
- DCHECK(template_index->IsSmi());
-
- return *ErrorUtils::MakeGenericError(
- isolate, constructor, MessageTemplateFromInt(Smi::ToInt(*template_index)),
- arg0, arg1, arg2, SKIP_NONE);
-}
-
-} // namespace
-
-BUILTIN(MakeError) {
- HandleScope scope(isolate);
- return MakeGenericError(isolate, args, isolate->error_function());
-}
-
-BUILTIN(MakeRangeError) {
- HandleScope scope(isolate);
- return MakeGenericError(isolate, args, isolate->range_error_function());
-}
-
-BUILTIN(MakeSyntaxError) {
- HandleScope scope(isolate);
- return MakeGenericError(isolate, args, isolate->syntax_error_function());
-}
-
-BUILTIN(MakeTypeError) {
- HandleScope scope(isolate);
- return MakeGenericError(isolate, args, isolate->type_error_function());
-}
-
-BUILTIN(MakeURIError) {
- HandleScope scope(isolate);
- Handle<JSFunction> constructor = isolate->uri_error_function();
- Handle<Object> undefined = isolate->factory()->undefined_value();
- MessageTemplate template_index = MessageTemplate::kURIMalformed;
- return *ErrorUtils::MakeGenericError(isolate, constructor, template_index,
- undefined, undefined, undefined,
- SKIP_NONE);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 48a137abc6..0325ddab7c 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -158,31 +158,26 @@ TNode<Object> HandlerBuiltinsAssembler::EmitKeyedSloppyArguments(
TNode<IntPtrT> backing_store_length =
LoadAndUntagFixedArrayBaseLength(backing_store);
- if (access_mode == ArgumentsAccessMode::kHas) {
- Label out_of_bounds(this);
- GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length),
- &out_of_bounds);
- TNode<Object> result = LoadFixedArrayElement(backing_store, key);
- var_result =
- SelectBooleanConstant(TaggedNotEqual(result, TheHoleConstant()));
- Goto(&end);
-
- BIND(&out_of_bounds);
- var_result = FalseConstant();
- Goto(&end);
+
+ // Out-of-bounds access may involve prototype chain walk and is handled
+ // in runtime.
+ GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
+
+ // The key falls into unmapped range.
+ if (access_mode == ArgumentsAccessMode::kStore) {
+ StoreFixedArrayElement(backing_store, key, *value);
} else {
- GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
+ TNode<Object> value = LoadFixedArrayElement(backing_store, key);
+ GotoIf(TaggedEqual(value, TheHoleConstant()), bailout);
- // The key falls into unmapped range.
- if (access_mode == ArgumentsAccessMode::kLoad) {
- TNode<Object> result = LoadFixedArrayElement(backing_store, key);
- GotoIf(TaggedEqual(result, TheHoleConstant()), bailout);
- var_result = result;
+ if (access_mode == ArgumentsAccessMode::kHas) {
+ var_result = TrueConstant();
} else {
- StoreFixedArrayElement(backing_store, key, *value);
+ DCHECK_EQ(access_mode, ArgumentsAccessMode::kLoad);
+ var_result = value;
}
- Goto(&end);
}
+ Goto(&end);
}
BIND(&end);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 6f4f54656d..61f03b3f99 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -7,7 +7,8 @@
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/macro-assembler.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/execution/frame-constants.h"
+#include "src/heap/memory-chunk.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/keyed-store-generic.h"
#include "src/logging/counters.h"
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 3b624af91b..7c8cde70dd 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -604,23 +604,35 @@ BUILTIN(ListFormatSupportedLocalesOf) {
JSListFormat::GetAvailableLocales(), locales, options));
}
-namespace {
+// Intl.Locale implementation
+BUILTIN(LocaleConstructor) {
+ HandleScope scope(isolate);
+
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kLocale);
+
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Intl.Locale")));
+ }
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ Handle<Object> tag = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
-MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
- Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target,
- Handle<Object> tag, Handle<Object> options) {
Handle<Map> map;
// 6. Let locale be ? OrdinaryCreateFromConstructor(NewTarget,
// %LocalePrototype%, internalSlotsList).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, map, JSFunction::GetDerivedMap(isolate, constructor, new_target),
- JSLocale);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
// 7. If Type(tag) is not String or Object, throw a TypeError exception.
if (!tag->IsString() && !tag->IsJSReceiver()) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kLocaleNotEmpty),
- JSLocale);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kLocaleNotEmpty));
}
Handle<String> locale_string;
@@ -631,8 +643,8 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
locale_string = JSLocale::ToString(isolate, Handle<JSLocale>::cast(tag));
} else { // 9. Else,
// a. Let tag be ? ToString(tag).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, locale_string,
- Object::ToString(isolate, tag), JSLocale);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, locale_string,
+ Object::ToString(isolate, tag));
}
Handle<JSReceiver> options_object;
@@ -642,60 +654,24 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
options_object = isolate->factory()->NewJSObjectWithNullProto();
} else { // 11. Else
// a. Let options be ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options_object,
- Object::ToObject(isolate, options), JSLocale);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, options_object,
+ Object::ToObject(isolate, options));
}
- return JSLocale::New(isolate, map, locale_string, options_object);
-}
-
-} // namespace
-
-// Intl.Locale implementation
-BUILTIN(LocaleConstructor) {
- HandleScope scope(isolate);
-
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kLocale);
-
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(
- "Intl.Locale")));
- }
- // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
-
- Handle<Object> tag = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
-
RETURN_RESULT_OR_FAILURE(
- isolate, CreateLocale(isolate, target, new_target, tag, options));
+ isolate, JSLocale::New(isolate, map, locale_string, options_object));
}
BUILTIN(LocalePrototypeMaximize) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.maximize");
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_locale_function(), isolate);
- Handle<String> locale_str = JSLocale::ToString(isolate, locale);
- RETURN_RESULT_OR_FAILURE(
- isolate, CreateLocale(isolate, constructor, constructor,
- JSLocale::Maximize(isolate, *locale_str),
- isolate->factory()->NewJSObjectWithNullProto()));
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Maximize(isolate, locale));
}
BUILTIN(LocalePrototypeMinimize) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.minimize");
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_locale_function(), isolate);
- Handle<String> locale_str = JSLocale::ToString(isolate, locale);
- RETURN_RESULT_OR_FAILURE(
- isolate, CreateLocale(isolate, constructor, constructor,
- JSLocale::Minimize(isolate, *locale_str),
- isolate->factory()->NewJSObjectWithNullProto()));
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Minimize(isolate, locale));
}
BUILTIN(RelativeTimeFormatSupportedLocalesOf) {
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 94a79d2a32..9f3ec5c323 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -133,59 +133,34 @@ TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
return var_value.value();
}
-void IteratorBuiltinsAssembler::IteratorCloseOnException(
- TNode<Context> context, const IteratorRecord& iterator, Label* if_exception,
- TVariable<Object>* exception) {
- // Perform ES #sec-iteratorclose when an exception occurs. This simpler
- // algorithm does not include redundant steps which are never reachable from
- // the spec IteratorClose algorithm.
- DCHECK((if_exception != nullptr && exception != nullptr));
- CSA_ASSERT(this, IsNotTheHole(exception->value()));
- CSA_ASSERT(this, IsJSReceiver(iterator.object));
-
- // Let return be ? GetMethod(iterator, "return").
- TNode<Object> method;
- {
- compiler::ScopedExceptionHandler handler(this, if_exception, exception);
- method = GetProperty(context, iterator.object, factory()->return_string());
- }
-
- // If return is undefined, return Completion(completion).
- GotoIf(Word32Or(IsUndefined(method), IsNull(method)), if_exception);
-
- {
- // Let innerResult be Call(return, iterator, Ā« Ā»).
- // If an exception occurs, the original exception remains bound.
- compiler::ScopedExceptionHandler handler(this, if_exception, nullptr);
- Call(context, method, iterator.object);
- }
-
- // (If completion.[[Type]] is throw) return Completion(completion).
- Goto(if_exception);
+TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
+ TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn) {
+ GrowableFixedArray values(state());
+ FillFixedArrayFromIterable(context, iterable, iterator_fn, &values);
+ return values.ToJSArray(context);
}
-void IteratorBuiltinsAssembler::IteratorCloseOnException(
- TNode<Context> context, const IteratorRecord& iterator,
- TNode<Object> exception) {
- Label rethrow(this, Label::kDeferred);
- TVARIABLE(Object, exception_variable, exception);
- IteratorCloseOnException(context, iterator, &rethrow, &exception_variable);
-
- BIND(&rethrow);
- CallRuntime(Runtime::kReThrow, context, exception_variable.value());
- Unreachable();
+TNode<FixedArray> IteratorBuiltinsAssembler::IterableToFixedArray(
+ TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn) {
+ GrowableFixedArray values(state());
+ FillFixedArrayFromIterable(context, iterable, iterator_fn, &values);
+ TNode<FixedArray> new_array = values.ToFixedArray();
+ return new_array;
}
-TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
- TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn) {
+void IteratorBuiltinsAssembler::FillFixedArrayFromIterable(
+ TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn,
+ GrowableFixedArray* values) {
// 1. Let iteratorRecord be ? GetIterator(items, method).
IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn);
// 2. Let values be a new empty List.
- GrowableFixedArray values(state());
- Label loop_start(
- this, {values.var_array(), values.var_length(), values.var_capacity()}),
+ // The GrowableFixedArray has already been created. It's ok if we do this step
+ // out of order, since creating an empty List is not observable.
+
+ Label loop_start(this, {values->var_array(), values->var_length(),
+ values->var_capacity()}),
done(this);
Goto(&loop_start);
// 3. Let next be true.
@@ -198,12 +173,11 @@ TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
// i. Let nextValue be ? IteratorValue(next).
TNode<Object> next_value = IteratorValue(context, next);
// ii. Append nextValue to the end of the List values.
- values.Push(next_value);
+ values->Push(next_value);
Goto(&loop_start);
}
BIND(&done);
- return values.ToJSArray(context);
}
TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) {
@@ -214,31 +188,26 @@ TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) {
Return(IterableToList(context, iterable, iterator_fn));
}
+TF_BUILTIN(IterableToFixedArray, IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+
+ Return(IterableToFixedArray(context, iterable, iterator_fn));
+}
+
TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
TNode<Smi> expected_length = CAST(Parameter(Descriptor::kExpectedLength));
TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
-
- IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn);
-
GrowableFixedArray values(state());
- Label loop_start(
- this, {values.var_array(), values.var_length(), values.var_capacity()}),
- compare_length(this), done(this);
- Goto(&loop_start);
- BIND(&loop_start);
- {
- TNode<JSReceiver> next =
- IteratorStep(context, iterator_record, &compare_length);
- TNode<Object> next_value = IteratorValue(context, next);
- values.Push(next_value);
- Goto(&loop_start);
- }
+ Label done(this);
+
+ FillFixedArrayFromIterable(context, iterable, iterator_fn, &values);
- BIND(&compare_length);
GotoIf(WordEqual(SmiUntag(expected_length), values.var_length()->value()),
&done);
Return(CallRuntime(
@@ -299,7 +268,9 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
// 2. Return ? IteratorClose(iteratorRecord, error).
BIND(&if_exception);
- IteratorCloseOnException(context, iterator_record, var_exception.value());
+ IteratorCloseOnException(context, iterator_record);
+ CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ Unreachable();
}
}
@@ -452,5 +423,17 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
Return(result);
}
+// This builtin creates a FixedArray based on an Iterable and doesn't have a
+// fast path for anything.
+TF_BUILTIN(IterableToFixedArrayWithSymbolLookupSlow,
+ IteratorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+
+ TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
+ TailCallBuiltin(Builtins::kIterableToFixedArray, context, iterable,
+ iterator_fn);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 4d496fa384..6cea2c77ff 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -12,6 +12,8 @@ namespace internal {
using compiler::Node;
+class GrowableFixedArray;
+
class IteratorBuiltinsAssembler : public CodeStubAssembler {
public:
explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
@@ -50,21 +52,21 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
TNode<Context> context, TNode<JSReceiver> result,
base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt);
- // https://tc39.github.io/ecma262/#sec-iteratorclose
- void IteratorCloseOnException(TNode<Context> context,
- const IteratorRecord& iterator,
- Label* if_exception,
- TVariable<Object>* exception);
- void IteratorCloseOnException(TNode<Context> context,
- const IteratorRecord& iterator,
- TNode<Object> exception);
-
// #sec-iterabletolist
// Build a JSArray by iterating over {iterable} using {iterator_fn},
// following the ECMAscript operation with the same name.
TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable,
TNode<Object> iterator_fn);
+ TNode<FixedArray> IterableToFixedArray(TNode<Context> context,
+ TNode<Object> iterable,
+ TNode<Object> iterator_fn);
+
+ void FillFixedArrayFromIterable(TNode<Context> context,
+ TNode<Object> iterable,
+ TNode<Object> iterator_fn,
+ GrowableFixedArray* values);
+
// Currently at https://tc39.github.io/proposal-intl-list-format/
// #sec-createstringlistfromiterable
TNode<JSArray> StringListFromIterable(TNode<Context> context,
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index e6787b2da8..e613ae9c08 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -53,8 +53,8 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
TNode<Context> native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
- return LoadObjectField<RawPtrT>(native_context,
- NativeContext::kMicrotaskQueueOffset);
+ return DecodeExternalPointer(LoadObjectField<ExternalPointerT>(
+ native_context, NativeContext::kMicrotaskQueueOffset));
}
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer(
@@ -198,18 +198,11 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- CAST(promise_to_resolve));
-
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
promise_to_resolve, thenable, then);
}
-
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- CAST(promise_to_resolve));
-
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 9af4affa68..0604549558 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -350,22 +350,6 @@ ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
return TNode<JSArray>::UncheckedCast(array);
}
-TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-
- Label if_null_or_undefined(this, Label::kDeferred);
- GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined);
-
- TNode<Object> method =
- GetProperty(context, receiver, factory()->toString_string());
- Return(Call(context, method, receiver));
-
- BIND(&if_null_or_undefined);
- ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
- "Object.prototype.toLocaleString");
-}
-
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
TNode<Object> object = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> key = CAST(Parameter(Descriptor::kKey));
@@ -724,20 +708,13 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
Return(FalseConstant());
}
-// ES #sec-object.prototype.tostring
-TF_BUILTIN(ObjectPrototypeToString, CodeStubAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Return(CallBuiltin(Builtins::kObjectToString, context, receiver));
-}
-
TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
- Label checkstringtag(this), if_apiobject(this, Label::kDeferred),
- if_arguments(this), if_array(this), if_boolean(this), if_date(this),
- if_error(this), if_function(this), if_number(this, Label::kDeferred),
- if_object(this), if_primitive(this), if_proxy(this, Label::kDeferred),
- if_regexp(this), if_string(this), if_symbol(this, Label::kDeferred),
- if_value(this), if_bigint(this, Label::kDeferred);
+ Label checkstringtag(this), if_arguments(this), if_array(this),
+ if_boolean(this), if_date(this), if_error(this), if_function(this),
+ if_number(this, Label::kDeferred), if_object(this), if_primitive(this),
+ if_proxy(this, Label::kDeferred), if_regexp(this), if_string(this),
+ if_symbol(this, Label::kDeferred), if_value(this),
+ if_bigint(this, Label::kDeferred);
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -763,8 +740,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
{JS_ARGUMENTS_OBJECT_TYPE, &if_arguments},
{JS_DATE_TYPE, &if_date},
{JS_BOUND_FUNCTION_TYPE, &if_function},
- {JS_API_OBJECT_TYPE, &if_apiobject},
- {JS_SPECIAL_API_OBJECT_TYPE, &if_apiobject},
+ {JS_API_OBJECT_TYPE, &if_object},
+ {JS_SPECIAL_API_OBJECT_TYPE, &if_object},
{JS_PROXY_TYPE, &if_proxy},
{JS_ERROR_TYPE, &if_error},
{JS_PRIMITIVE_WRAPPER_TYPE, &if_value}};
@@ -778,25 +755,6 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
Switch(receiver_instance_type, &if_object, case_values, case_labels,
arraysize(case_values));
- BIND(&if_apiobject);
- {
- // Lookup the @@toStringTag property on the {receiver}.
- TVARIABLE(Object, var_tag,
- GetProperty(context, receiver,
- isolate()->factory()->to_string_tag_symbol()));
- Label if_tagisnotstring(this), if_tagisstring(this);
- GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring);
- Branch(IsString(CAST(var_tag.value())), &if_tagisstring,
- &if_tagisnotstring);
- BIND(&if_tagisnotstring);
- {
- var_tag = CallRuntime(Runtime::kClassOf, context, receiver);
- Goto(&if_tagisstring);
- }
- BIND(&if_tagisstring);
- ReturnToStringFormat(context, CAST(var_tag.value()));
- }
-
BIND(&if_arguments);
{
var_default = ArgumentsToStringConstant();
@@ -1053,14 +1011,6 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
}
}
-// ES6 #sec-object.prototype.valueof
-TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Return(ToObject_Inline(context, receiver));
-}
-
// ES #sec-object.create
TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
int const kPrototypeArg = 0;
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
deleted file mode 100644
index 5eca1eb9c0..0000000000
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-promise.h"
-
-#include "src/builtins/builtins-utils-inl.h"
-#include "src/builtins/builtins.h"
-#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/logging/counters.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-BUILTIN(IsPromise) {
- SealHandleScope scope(isolate);
-
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- return isolate->heap()->ToBoolean(object->IsJSPromise());
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h
index a97ab7ad1d..fd938ff841 100644
--- a/deps/v8/src/builtins/builtins-promise.h
+++ b/deps/v8/src/builtins/builtins-promise.h
@@ -40,6 +40,18 @@ class PromiseBuiltins {
kPromiseAllResolveElementLength
};
+ enum PromiseAnyRejectElementContextSlots {
+ // Remaining elements count
+ kPromiseAnyRejectElementRemainingSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // Promise capability from Promise.any
+ kPromiseAnyRejectElementCapabilitySlot,
+
+ // errors array from Promise.any
+ kPromiseAnyRejectElementErrorsArraySlot,
+ kPromiseAnyRejectElementLength
+ };
+
enum FunctionContextSlot {
kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index caafcf6506..f398a6c282 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -76,8 +76,7 @@ TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
CreateProxyRevokeFunctionContext(proxy, native_context);
const TNode<Map> revoke_map = CAST(LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
- const TNode<SharedFunctionInfo> revoke_info = CAST(
- LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN));
+ const TNode<SharedFunctionInfo> revoke_info = ProxyRevokeSharedFunConstant();
return AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
proxy_context);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index d06ced76d2..b9c1b8980e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -528,7 +528,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
data, JSRegExp::kIrregexpCaptureCountIndex));
// capture_count is the number of captures without the match itself.
// Required registers = (capture_count + 1) * 2.
- STATIC_ASSERT(Internals::IsValidSmi((JSRegExp::kMaxCaptures + 1) << 1));
+ STATIC_ASSERT(Internals::IsValidSmi((JSRegExp::kMaxCaptures + 1) * 2));
TNode<Smi> register_count =
SmiShl(SmiAdd(capture_count, SmiConstant(1)), 1);
@@ -729,13 +729,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
// This should only be needed for String.p.(split||matchAll), but we are
// conservative here.
- // Note: we are using the current native context here, which may or may not
- // match the object's native context. That's fine: in case of a mismatch, we
- // will bail in the next step when comparing the object's map against the
- // current native context's initial regexp map.
- TNode<NativeContext> native_context = LoadNativeContext(context);
- GotoIf(IsRegExpSpeciesProtectorCellInvalid(native_context), if_ismodified);
+ GotoIf(IsRegExpSpeciesProtectorCellInvalid(), if_ismodified);
+ TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<JSFunction> regexp_fun =
CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
TNode<Map> initial_map = CAST(
@@ -1336,10 +1332,10 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
// 9. Set iterator.[[Done]] to false.
TNode<Int32T> global_flag =
Word32Shl(ReinterpretCast<Int32T>(global),
- Int32Constant(JSRegExpStringIterator::kGlobalBit));
+ Int32Constant(JSRegExpStringIterator::GlobalBit::kShift));
TNode<Int32T> unicode_flag =
Word32Shl(ReinterpretCast<Int32T>(full_unicode),
- Int32Constant(JSRegExpStringIterator::kUnicodeBit));
+ Int32Constant(JSRegExpStringIterator::UnicodeBit::kShift));
TNode<Int32T> iterator_flags = Word32Or(global_flag, unicode_flag);
StoreObjectFieldNoWriteBarrier(iterator, JSRegExpStringIterator::kFlagsOffset,
SmiFromInt32(iterator_flags));
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 3049b01d2f..010bf965cc 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -89,7 +89,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
BIND(&not_float_or_clamped);
*out_elements_kind = elements_kind;
- TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer);
+ TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStorePtr(array_buffer);
TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(array);
*out_backing_store = RawPtrAdd(backing_store, Signed(byte_offset));
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index e2d1635274..7ccb99792e 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -20,10 +20,10 @@ namespace internal {
using Node = compiler::Node;
-TNode<IntPtrT> StringBuiltinsAssembler::DirectStringData(
+TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData(
TNode<String> string, TNode<Word32T> string_instance_type) {
// Compute the effective offset of the first character.
- TVARIABLE(IntPtrT, var_data);
+ TVARIABLE(RawPtrT, var_data);
Label if_sequential(this), if_external(this), if_join(this);
Branch(Word32Equal(Word32And(string_instance_type,
Int32Constant(kStringRepresentationMask)),
@@ -32,9 +32,9 @@ TNode<IntPtrT> StringBuiltinsAssembler::DirectStringData(
BIND(&if_sequential);
{
- var_data = IntPtrAdd(
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- BitcastTaggedToWord(string));
+ var_data = RawPtrAdd(
+ ReinterpretCast<RawPtrT>(BitcastTaggedToWord(string)),
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
Goto(&if_join);
}
@@ -47,7 +47,7 @@ TNode<IntPtrT> StringBuiltinsAssembler::DirectStringData(
Int32Constant(kUncachedExternalStringMask)),
Int32Constant(kUncachedExternalStringTag)));
var_data =
- LoadObjectField<IntPtrT>(string, ExternalString::kResourceDataOffset);
+ DecodeExternalPointer(LoadExternalStringResourceData(CAST(string)));
Goto(&if_join);
}
@@ -254,8 +254,8 @@ void StringBuiltinsAssembler::StringEqual_Loop(
CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Compute the effective offset of the first character.
- TNode<IntPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
- TNode<IntPtrT> rhs_data = DirectStringData(rhs, rhs_instance_type);
+ TNode<RawPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
+ TNode<RawPtrT> rhs_data = DirectStringData(rhs, rhs_instance_type);
// Loop over the {lhs} and {rhs} strings to see if they are equal.
TVARIABLE(IntPtrT, var_offset, IntPtrConstant(0));
@@ -1635,6 +1635,12 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
ToDirectStringAssembler to_direct(state(), subject_string);
to_direct.TryToDirect(&call_runtime);
+
+ // The extracted direct string may be two-byte even though the wrapping
+ // string is one-byte.
+ GotoIfNot(IsOneByteStringInstanceType(to_direct.instance_type()),
+ &call_runtime);
+
TNode<FixedArray> elements = CAST(AllocateFixedArray(
PACKED_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation));
// Don't allocate anything while {string_data} is live!
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 93b2086dd7..2b4dadbbb0 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -67,7 +67,7 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<Word32T> rhs_instance_type, MachineType rhs_type,
TNode<IntPtrT> length, Label* if_equal,
Label* if_not_equal);
- TNode<IntPtrT> DirectStringData(TNode<String> string,
+ TNode<RawPtrT> DirectStringData(TNode<String> string,
TNode<Word32T> string_instance_type);
void DispatchOnStringEncodings(const TNode<Word32T> lhs_instance_type,
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 61cd984e7f..a4edc94418 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -5,222 +5,215 @@
#include 'src/builtins/builtins-string-gen.h'
namespace string {
- extern macro StringBuiltinsAssembler::SubString(String, uintptr, uintptr):
- String;
-
- // ES6 #sec-string.prototype.tostring
- transitioning javascript builtin
- StringPrototypeToString(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- return ToThisValue(
- receiver, PrimitiveType::kString, 'String.prototype.toString');
- }
-
- // ES6 #sec-string.prototype.valueof
- transitioning javascript builtin
- StringPrototypeValueOf(js-implicit context: NativeContext, receiver: JSAny)():
- JSAny {
- return ToThisValue(
- receiver, PrimitiveType::kString, 'String.prototype.valueOf');
- }
+extern macro StringBuiltinsAssembler::SubString(
+ String, uintptr, uintptr): String;
+
+// ES6 #sec-string.prototype.tostring
+transitioning javascript builtin
+StringPrototypeToString(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return ToThisValue(
+ receiver, PrimitiveType::kString, 'String.prototype.toString');
+}
- extern macro StringBuiltinsAssembler::LoadSurrogatePairAt(
- String, intptr, intptr, constexpr UnicodeEncoding): int32;
- extern macro StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint(
- int32): String;
-
- // This function assumes StringPrimitiveWithNoCustomIteration is true.
- transitioning builtin StringToList(implicit context: Context)(string: String):
- JSArray {
- const kind = ElementsKind::PACKED_ELEMENTS;
- const stringLength: intptr = string.length_intptr;
-
- const nativeContext = LoadNativeContext(context);
- const map: Map = LoadJSArrayElementsMap(kind, nativeContext);
- const array: JSArray = AllocateJSArray(
- kind, map, stringLength, SmiTag(stringLength),
- AllocationFlag::kAllowLargeObjectAllocation);
- const elements = UnsafeCast<FixedArray>(array.elements);
- const encoding = UnicodeEncoding::UTF16;
- let arrayLength: Smi = 0;
- let i: intptr = 0;
- while (i < stringLength) {
- const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding);
- const value: String = StringFromSingleUTF16EncodedCodePoint(ch);
- elements[arrayLength] = value;
- // Increment and continue the loop.
- i = i + value.length_intptr;
- arrayLength++;
- }
- assert(arrayLength >= 0);
- assert(SmiTag(stringLength) >= arrayLength);
- array.length = arrayLength;
+// ES6 #sec-string.prototype.valueof
+transitioning javascript builtin
+StringPrototypeValueOf(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return ToThisValue(
+ receiver, PrimitiveType::kString, 'String.prototype.valueOf');
+}
- return array;
+extern macro StringBuiltinsAssembler::LoadSurrogatePairAt(
+ String, intptr, intptr, constexpr UnicodeEncoding): int32;
+extern macro StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint(
+ int32): String;
+
+// This function assumes StringPrimitiveWithNoCustomIteration is true.
+transitioning builtin StringToList(implicit context: Context)(string: String):
+ JSArray {
+ const kind = ElementsKind::PACKED_ELEMENTS;
+ const stringLength: intptr = string.length_intptr;
+
+ const nativeContext = LoadNativeContext(context);
+ const map: Map = LoadJSArrayElementsMap(kind, nativeContext);
+ const array: JSArray = AllocateJSArray(
+ kind, map, stringLength, SmiTag(stringLength),
+ AllocationFlag::kAllowLargeObjectAllocation);
+ const elements = UnsafeCast<FixedArray>(array.elements);
+ const encoding = UnicodeEncoding::UTF16;
+ let arrayLength: Smi = 0;
+ let i: intptr = 0;
+ while (i < stringLength) {
+ const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding);
+ const value: String = StringFromSingleUTF16EncodedCodePoint(ch);
+ elements[arrayLength] = value;
+ // Increment and continue the loop.
+ i = i + value.length_intptr;
+ arrayLength++;
}
+ assert(arrayLength >= 0);
+ assert(SmiTag(stringLength) >= arrayLength);
+ array.length = arrayLength;
- transitioning macro GenerateStringAt(implicit context: Context)(
- receiver: JSAny, position: JSAny,
- methodName: constexpr string): never labels
- IfInBounds(String, uintptr, uintptr), IfOutOfBounds {
- // 1. Let O be ? RequireObjectCoercible(this value).
- // 2. Let S be ? ToString(O).
- const string: String = ToThisString(receiver, methodName);
-
- // 3. Let position be ? ToInteger(pos).
- const indexNumber: Number = ToInteger_Inline(position);
-
- // Convert the {position} to a uintptr and check that it's in bounds of
- // the {string}.
- typeswitch (indexNumber) {
- case (indexSmi: Smi): {
- const length: uintptr = string.length_uintptr;
- const index: uintptr = Unsigned(Convert<intptr>(indexSmi));
- // Max string length fits Smi range, so we can do an unsigned bounds
- // check.
- const kMaxStringLengthFitsSmi: constexpr bool =
- kStringMaxLengthUintptr < kSmiMaxValue;
- StaticAssert(kMaxStringLengthFitsSmi);
- if (index >= length) goto IfOutOfBounds;
- goto IfInBounds(string, index, length);
- }
- case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
- // Valid string indices fit into Smi range, so HeapNumber index is
- // definitely an out of bounds case.
- goto IfOutOfBounds;
- }
- }
- }
+ return array;
+}
- // ES6 #sec-string.prototype.charat
- transitioning javascript builtin StringPrototypeCharAt(
- js-implicit context: NativeContext,
- receiver: JSAny)(position: JSAny): JSAny {
- try {
- GenerateStringAt(receiver, position, 'String.prototype.charAt')
- otherwise IfInBounds, IfOutOfBounds;
- }
- label IfInBounds(string: String, index: uintptr, _length: uintptr) {
- const code: int32 = StringCharCodeAt(string, index);
- return StringFromSingleCharCode(code);
+transitioning macro GenerateStringAt(implicit context: Context)(
+ receiver: JSAny, position: JSAny,
+ methodName: constexpr string): never labels
+IfInBounds(String, uintptr, uintptr), IfOutOfBounds {
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const string: String = ToThisString(receiver, methodName);
+
+ // 3. Let position be ? ToInteger(pos).
+ const indexNumber: Number = ToInteger_Inline(position);
+
+ // Convert the {position} to a uintptr and check that it's in bounds of
+ // the {string}.
+ typeswitch (indexNumber) {
+ case (indexSmi: Smi): {
+ const length: uintptr = string.length_uintptr;
+ const index: uintptr = Unsigned(Convert<intptr>(indexSmi));
+ // Max string length fits Smi range, so we can do an unsigned bounds
+ // check.
+ const kMaxStringLengthFitsSmi: constexpr bool =
+ kStringMaxLengthUintptr < kSmiMaxValue;
+ StaticAssert(kMaxStringLengthFitsSmi);
+ if (index >= length) goto IfOutOfBounds;
+ goto IfInBounds(string, index, length);
}
- label IfOutOfBounds {
- return kEmptyString;
+ case (indexHeapNumber: HeapNumber): {
+ assert(IsNumberNormalized(indexHeapNumber));
+ // Valid string indices fit into Smi range, so HeapNumber index is
+ // definitely an out of bounds case.
+ goto IfOutOfBounds;
}
}
+}
- // ES6 #sec-string.prototype.charcodeat
- transitioning javascript builtin StringPrototypeCharCodeAt(
- js-implicit context: NativeContext,
- receiver: JSAny)(position: JSAny): JSAny {
- try {
- GenerateStringAt(receiver, position, 'String.prototype.charCodeAt')
- otherwise IfInBounds, IfOutOfBounds;
- }
- label IfInBounds(string: String, index: uintptr, _length: uintptr) {
- const code: int32 = StringCharCodeAt(string, index);
- return Convert<Smi>(code);
- }
- label IfOutOfBounds {
- return kNaN;
- }
+// ES6 #sec-string.prototype.charat
+transitioning javascript builtin StringPrototypeCharAt(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(position: JSAny): JSAny {
+ try {
+ GenerateStringAt(receiver, position, 'String.prototype.charAt')
+ otherwise IfInBounds, IfOutOfBounds;
+ } label IfInBounds(string: String, index: uintptr, _length: uintptr) {
+ const code: int32 = StringCharCodeAt(string, index);
+ return StringFromSingleCharCode(code);
+ } label IfOutOfBounds {
+ return kEmptyString;
}
+}
- // ES6 #sec-string.prototype.codepointat
- transitioning javascript builtin StringPrototypeCodePointAt(
- js-implicit context: NativeContext,
- receiver: JSAny)(position: JSAny): JSAny {
- try {
- GenerateStringAt(receiver, position, 'String.prototype.codePointAt')
- otherwise IfInBounds, IfOutOfBounds;
- }
- label IfInBounds(string: String, index: uintptr, length: uintptr) {
- // This is always a call to a builtin from Javascript, so we need to
- // produce UTF32.
- const code: int32 = LoadSurrogatePairAt(
- string, Signed(length), Signed(index), UnicodeEncoding::UTF32);
- return Convert<Smi>(code);
- }
- label IfOutOfBounds {
- return Undefined;
- }
+// ES6 #sec-string.prototype.charcodeat
+transitioning javascript builtin StringPrototypeCharCodeAt(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(position: JSAny): JSAny {
+ try {
+ GenerateStringAt(receiver, position, 'String.prototype.charCodeAt')
+ otherwise IfInBounds, IfOutOfBounds;
+ } label IfInBounds(string: String, index: uintptr, _length: uintptr) {
+ const code: int32 = StringCharCodeAt(string, index);
+ return Convert<Smi>(code);
+ } label IfOutOfBounds {
+ return kNaN;
}
+}
- // ES6 String.prototype.concat(...args)
- // ES6 #sec-string.prototype.concat
- transitioning javascript builtin StringPrototypeConcat(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // Check that {receiver} is coercible to Object and convert it to a String.
- let string: String = ToThisString(receiver, 'String.prototype.concat');
-
- // Concatenate all the arguments passed to this builtin.
- const length: intptr = Convert<intptr>(arguments.length);
- for (let i: intptr = 0; i < length; i++) {
- const temp: String = ToString_Inline(arguments[i]);
- string = string + temp;
- }
- return string;
+// ES6 #sec-string.prototype.codepointat
+transitioning javascript builtin StringPrototypeCodePointAt(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(position: JSAny): JSAny {
+ try {
+ GenerateStringAt(receiver, position, 'String.prototype.codePointAt')
+ otherwise IfInBounds, IfOutOfBounds;
+ } label IfInBounds(string: String, index: uintptr, length: uintptr) {
+ // This is always a call to a builtin from Javascript, so we need to
+ // produce UTF32.
+ const code: int32 = LoadSurrogatePairAt(
+ string, Signed(length), Signed(index), UnicodeEncoding::UTF32);
+ return Convert<Smi>(code);
+ } label IfOutOfBounds {
+ return Undefined;
+ }
+}
+
+// ES6 String.prototype.concat(...args)
+// ES6 #sec-string.prototype.concat
+transitioning javascript builtin StringPrototypeConcat(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ let string: String = ToThisString(receiver, 'String.prototype.concat');
+
+ // Concatenate all the arguments passed to this builtin.
+ const length: intptr = Convert<intptr>(arguments.length);
+ for (let i: intptr = 0; i < length; i++) {
+ const temp: String = ToString_Inline(arguments[i]);
+ string = string + temp;
}
+ return string;
+}
- extern transitioning runtime
- SymbolDescriptiveString(implicit context: Context)(Symbol): String;
-
- // ES #sec-string-constructor
- // https://tc39.github.io/ecma262/#sec-string-constructor
- transitioning javascript builtin StringConstructor(
- js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
- target: JSFunction)(...arguments): JSAny {
- const length: intptr = Convert<intptr>(arguments.length);
- let s: String;
- // 1. If no arguments were passed to this function invocation, let s be "".
- if (length == 0) {
- s = EmptyStringConstant();
- } else {
- // 2. Else,
- // 2. a. If NewTarget is undefined and Type(value) is Symbol, return
- // SymbolDescriptiveString(value).
- if (newTarget == Undefined) {
- typeswitch (arguments[0]) {
- case (value: Symbol): {
- return SymbolDescriptiveString(value);
- }
- case (JSAny): {
- }
+extern transitioning runtime
+SymbolDescriptiveString(implicit context: Context)(Symbol): String;
+
+// ES #sec-string-constructor
+// https://tc39.github.io/ecma262/#sec-string-constructor
+transitioning javascript builtin StringConstructor(
+ js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
+ target: JSFunction)(...arguments): JSAny {
+ const length: intptr = Convert<intptr>(arguments.length);
+ let s: String;
+ // 1. If no arguments were passed to this function invocation, let s be "".
+ if (length == 0) {
+ s = EmptyStringConstant();
+ } else {
+ // 2. Else,
+ // 2. a. If NewTarget is undefined and Type(value) is Symbol, return
+ // SymbolDescriptiveString(value).
+ if (newTarget == Undefined) {
+ typeswitch (arguments[0]) {
+ case (value: Symbol): {
+ return SymbolDescriptiveString(value);
+ }
+ case (JSAny): {
}
}
- // 2. b. Let s be ? ToString(value).
- s = ToString_Inline(arguments[0]);
- }
- // 3. If NewTarget is undefined, return s.
- if (newTarget == Undefined) {
- return s;
}
- // 4. Return ! StringCreate(s, ? GetPrototypeFromConstructor(NewTarget,
- // "%String.prototype%")).
- const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
- const obj =
- UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
- obj.value = s;
- return obj;
+ // 2. b. Let s be ? ToString(value).
+ s = ToString_Inline(arguments[0]);
}
-
- transitioning builtin StringAddConvertLeft(implicit context: Context)(
- left: JSAny, right: String): String {
- return ToStringImpl(context, ToPrimitiveDefault(left)) + right;
+ // 3. If NewTarget is undefined, return s.
+ if (newTarget == Undefined) {
+ return s;
}
+ // 4. Return ! StringCreate(s, ? GetPrototypeFromConstructor(NewTarget,
+ // "%String.prototype%")).
+ const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
+ const obj =
+ UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
+ obj.value = s;
+ return obj;
+}
- transitioning builtin StringAddConvertRight(implicit context: Context)(
- left: String, right: JSAny): String {
- return left + ToStringImpl(context, ToPrimitiveDefault(right));
- }
+transitioning builtin StringAddConvertLeft(implicit context: Context)(
+ left: JSAny, right: String): String {
+ return ToStringImpl(context, ToPrimitiveDefault(left)) + right;
+}
- builtin StringCharAt(implicit context: Context)(
- receiver: String, position: uintptr): String {
- // Load the character code at the {position} from the {receiver}.
- const code: int32 = StringCharCodeAt(receiver, position);
- // And return the single character string with only that {code}
- return StringFromSingleCharCode(code);
- }
+transitioning builtin StringAddConvertRight(implicit context: Context)(
+ left: String, right: JSAny): String {
+ return left + ToStringImpl(context, ToPrimitiveDefault(right));
+}
+
+builtin StringCharAt(implicit context: Context)(
+ receiver: String, position: uintptr): String {
+ // Load the character code at the {position} from the {receiver}.
+ const code: int32 = StringCharCodeAt(receiver, position);
+ // And return the single character string with only that {code}
+ return StringFromSingleCharCode(code);
+}
}
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
index b067bb0249..e98b38d1a5 100644
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ b/deps/v8/src/builtins/builtins-trace.cc
@@ -10,6 +10,10 @@
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
+#if defined(V8_USE_PERFETTO)
+#include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -69,6 +73,7 @@ class MaybeUtf8 {
std::unique_ptr<uint8_t> allocated_;
};
+#if !defined(V8_USE_PERFETTO)
class JsonTraceValue : public ConvertableToTraceFormat {
public:
explicit JsonTraceValue(Isolate* isolate, Handle<String> object) {
@@ -91,6 +96,7 @@ const uint8_t* GetCategoryGroupEnabled(Isolate* isolate,
MaybeUtf8 category(isolate, string);
return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(*category);
}
+#endif // !defined(V8_USE_PERFETTO)
#undef MAX_STACK_LENGTH
@@ -104,8 +110,15 @@ BUILTIN(IsTraceCategoryEnabled) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kTraceEventCategoryError));
}
- return isolate->heap()->ToBoolean(
- *GetCategoryGroupEnabled(isolate, Handle<String>::cast(category)));
+ bool enabled;
+#if defined(V8_USE_PERFETTO)
+ MaybeUtf8 category_str(isolate, Handle<String>::cast(category));
+ perfetto::DynamicCategory dynamic_category{*category_str};
+ enabled = TRACE_EVENT_CATEGORY_ENABLED(dynamic_category);
+#else
+ enabled = *GetCategoryGroupEnabled(isolate, Handle<String>::cast(category));
+#endif
+ return isolate->heap()->ToBoolean(enabled);
}
// Builtins::kTrace(phase, category, name, id, data) : bool
@@ -118,18 +131,23 @@ BUILTIN(Trace) {
Handle<Object> id_arg = args.atOrUndefined(isolate, 4);
Handle<Object> data_arg = args.atOrUndefined(isolate, 5);
- const uint8_t* category_group_enabled =
- GetCategoryGroupEnabled(isolate, Handle<String>::cast(category));
-
// Exit early if the category group is not enabled.
- if (!*category_group_enabled) {
+#if defined(V8_USE_PERFETTO)
+ MaybeUtf8 category_str(isolate, Handle<String>::cast(category));
+ perfetto::DynamicCategory dynamic_category{*category_str};
+ if (!TRACE_EVENT_CATEGORY_ENABLED(dynamic_category))
return ReadOnlyRoots(isolate).false_value();
- }
+#else
+ const uint8_t* category_group_enabled =
+ GetCategoryGroupEnabled(isolate, Handle<String>::cast(category));
+ if (!*category_group_enabled) return ReadOnlyRoots(isolate).false_value();
+#endif
if (!phase_arg->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kTraceEventPhaseError));
}
+ char phase = static_cast<char>(DoubleToInt32(phase_arg->Number()));
if (!category->IsString()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kTraceEventCategoryError));
@@ -160,32 +178,67 @@ BUILTIN(Trace) {
// We support passing one additional trace event argument with the
// name "data". Any JSON serializable value may be passed.
static const char* arg_name = "data";
+ Handle<Object> arg_json;
int32_t num_args = 0;
- uint8_t arg_type;
- uint64_t arg_value;
-
if (!data_arg->IsUndefined(isolate)) {
// Serializes the data argument as a JSON string, which is then
// copied into an object. This eliminates duplicated code but
// could have perf costs. It is also subject to all the same
// limitations as JSON.stringify() as it relates to circular
// references and value limitations (e.g. BigInt is not supported).
- Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
+ isolate, arg_json,
JsonStringify(isolate, data_arg, isolate->factory()->undefined_value(),
isolate->factory()->undefined_value()));
- std::unique_ptr<JsonTraceValue> traced_value;
- traced_value.reset(
- new JsonTraceValue(isolate, Handle<String>::cast(result)));
- tracing::SetTraceValue(std::move(traced_value), &arg_type, &arg_value);
num_args++;
}
+#if defined(V8_USE_PERFETTO)
+ auto trace_args = [&](perfetto::EventContext ctx) {
+ // TODO(skyostil): Use interned names to reduce trace size.
+ if (phase != TRACE_EVENT_PHASE_END) {
+ ctx.event()->set_name(*name);
+ }
+ if (num_args) {
+ MaybeUtf8 arg_contents(isolate, Handle<String>::cast(arg_json));
+ auto annotation = ctx.event()->add_debug_annotations();
+ annotation->set_name(arg_name);
+ annotation->set_legacy_json_value(*arg_contents);
+ }
+ if (flags & TRACE_EVENT_FLAG_HAS_ID) {
+ auto legacy_event = ctx.event()->set_legacy_event();
+ legacy_event->set_global_id(id);
+ }
+ };
+
+ switch (phase) {
+ case TRACE_EVENT_PHASE_BEGIN:
+ TRACE_EVENT_BEGIN(dynamic_category, nullptr, trace_args);
+ break;
+ case TRACE_EVENT_PHASE_END:
+ TRACE_EVENT_END(dynamic_category, trace_args);
+ break;
+ case TRACE_EVENT_PHASE_INSTANT:
+ TRACE_EVENT_INSTANT(dynamic_category, nullptr, trace_args);
+ break;
+ default:
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kTraceEventPhaseError));
+ }
+
+#else // !defined(V8_USE_PERFETTO)
+ uint8_t arg_type;
+ uint64_t arg_value;
+ if (num_args) {
+ std::unique_ptr<JsonTraceValue> traced_value(
+ new JsonTraceValue(isolate, Handle<String>::cast(arg_json)));
+ tracing::SetTraceValue(std::move(traced_value), &arg_type, &arg_value);
+ }
+
TRACE_EVENT_API_ADD_TRACE_EVENT(
- static_cast<char>(DoubleToInt32(phase_arg->Number())),
- category_group_enabled, *name, tracing::kGlobalScope, id, tracing::kNoId,
- num_args, &arg_name, &arg_type, &arg_value, flags);
+ phase, category_group_enabled, *name, tracing::kGlobalScope, id,
+ tracing::kNoId, num_args, &arg_name, &arg_type, &arg_value, flags);
+#endif // !defined(V8_USE_PERFETTO)
return ReadOnlyRoots(isolate).true_value();
}
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 021a0e9240..a6d3887ad3 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -65,8 +65,9 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
- IntPtrConstant(0));
+ StoreJSArrayBufferBackingStore(
+ buffer,
+ EncodeExternalPointer(ReinterpretCast<RawPtrT>(IntPtrConstant(0))));
if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
IntPtrConstant(0));
@@ -239,7 +240,7 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), &call_runtime);
- TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(buffer);
+ TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStorePtr(buffer);
GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
var_result = buffer;
Goto(&done);
@@ -397,8 +398,8 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
}
StoreObjectField(holder, JSTypedArray::kBasePointerOffset, base);
- StoreObjectFieldNoWriteBarrier<UintPtrT>(
- holder, JSTypedArray::kExternalPointerOffset, offset);
+ StoreJSTypedArrayExternalPointer(
+ holder, EncodeExternalPointer(ReinterpretCast<RawPtrT>(offset)));
}
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
@@ -407,8 +408,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
SmiConstant(0));
base = RawPtrAdd(base, Signed(offset));
- StoreObjectFieldNoWriteBarrier<RawPtrT>(
- holder, JSTypedArray::kExternalPointerOffset, base);
+ StoreJSTypedArrayExternalPointer(holder, EncodeExternalPointer(base));
}
void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 770f5da97b..28efa39c67 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-wasm-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/interface-descriptors.h"
@@ -12,71 +14,45 @@
namespace v8 {
namespace internal {
-class WasmBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit WasmBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- TNode<WasmInstanceObject> LoadInstanceFromFrame() {
- return CAST(
- LoadFromParentFrame(WasmCompiledFrameConstants::kWasmInstanceOffset));
- }
-
- TNode<Context> LoadContextFromInstance(TNode<WasmInstanceObject> instance) {
- return CAST(Load(MachineType::AnyTagged(), instance,
- IntPtrConstant(WasmInstanceObject::kNativeContextOffset -
- kHeapObjectTag)));
- }
-
- TNode<Smi> SmiFromUint32WithSaturation(TNode<Uint32T> value, uint32_t max) {
- DCHECK_LE(max, static_cast<uint32_t>(Smi::kMaxValue));
- TNode<Uint32T> capped_value = SelectConstant(
- Uint32LessThan(value, Uint32Constant(max)), value, Uint32Constant(max));
- return SmiFromUint32(capped_value);
- }
-};
-
-TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) {
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- TailCallRuntime(Runtime::kWasmStackGuard, context);
+TNode<WasmInstanceObject> WasmBuiltinsAssembler::LoadInstanceFromFrame() {
+ return CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceOffset));
}
-TF_BUILTIN(WasmStackOverflow, WasmBuiltinsAssembler) {
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- TailCallRuntime(Runtime::kThrowWasmStackOverflow, context);
+TNode<NativeContext> WasmBuiltinsAssembler::LoadContextFromInstance(
+ TNode<WasmInstanceObject> instance) {
+ return CAST(Load(MachineType::AnyTagged(), instance,
+ IntPtrConstant(WasmInstanceObject::kNativeContextOffset -
+ kHeapObjectTag)));
}
-TF_BUILTIN(WasmThrow, WasmBuiltinsAssembler) {
- TNode<Object> exception = CAST(Parameter(Descriptor::kException));
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- TailCallRuntime(Runtime::kThrow, context, exception);
+TNode<FixedArray> WasmBuiltinsAssembler::LoadTablesFromInstance(
+ TNode<WasmInstanceObject> instance) {
+ return LoadObjectField<FixedArray>(instance,
+ WasmInstanceObject::kTablesOffset);
}
-TF_BUILTIN(WasmRethrow, WasmBuiltinsAssembler) {
- TNode<Object> exception = CAST(Parameter(Descriptor::kException));
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
-
- Label nullref(this, Label::kDeferred);
- GotoIf(TaggedEqual(NullConstant(), exception), &nullref);
+TNode<FixedArray> WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance(
+ TNode<WasmInstanceObject> instance) {
+ return LoadObjectField<FixedArray>(
+ instance, WasmInstanceObject::kWasmExternalFunctionsOffset);
+}
- TailCallRuntime(Runtime::kReThrow, context, exception);
+TNode<Smi> WasmBuiltinsAssembler::SmiFromUint32WithSaturation(
+ TNode<Uint32T> value, uint32_t max) {
+ DCHECK_LE(max, static_cast<uint32_t>(Smi::kMaxValue));
+ TNode<Uint32T> capped_value = SelectConstant(
+ Uint32LessThan(value, Uint32Constant(max)), value, Uint32Constant(max));
+ return SmiFromUint32(capped_value);
+}
- BIND(&nullref);
- MessageTemplate message_id = MessageTemplate::kWasmTrapRethrowNullRef;
- TailCallRuntime(Runtime::kThrowWasmError, context,
- SmiConstant(static_cast<int>(message_id)));
+TF_BUILTIN(WasmFloat32ToNumber, WasmBuiltinsAssembler) {
+ TNode<Float32T> val = UncheckedCast<Float32T>(Parameter(Descriptor::kValue));
+ Return(ChangeFloat32ToTagged(val));
}
-TF_BUILTIN(WasmTraceMemory, WasmBuiltinsAssembler) {
- TNode<Smi> info = CAST(Parameter(Descriptor::kMemoryTracingInfo));
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- TailCallRuntime(Runtime::kWasmTraceMemory, context, info);
+TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) {
+ TNode<Float64T> val = UncheckedCast<Float64T>(Parameter(Descriptor::kValue));
+ Return(ChangeFloat64ToTagged(val));
}
TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
@@ -210,26 +186,6 @@ TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmMemoryGrow, WasmBuiltinsAssembler) {
- TNode<Int32T> num_pages =
- UncheckedCast<Int32T>(Parameter(Descriptor::kNumPages));
- Label num_pages_out_of_range(this, Label::kDeferred);
-
- TNode<BoolT> num_pages_fits_in_smi =
- IsValidPositiveSmi(ChangeInt32ToIntPtr(num_pages));
- GotoIfNot(num_pages_fits_in_smi, &num_pages_out_of_range);
-
- TNode<Smi> num_pages_smi = SmiFromInt32(num_pages);
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- TNode<Smi> ret_smi = CAST(
- CallRuntime(Runtime::kWasmMemoryGrow, context, instance, num_pages_smi));
- Return(SmiToInt32(ret_smi));
-
- BIND(&num_pages_out_of_range);
- Return(Int32Constant(-1));
-}
-
TF_BUILTIN(WasmTableInit, WasmBuiltinsAssembler) {
TNode<Uint32T> dst_raw =
UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
@@ -290,65 +246,42 @@ TF_BUILTIN(WasmTableCopy, WasmBuiltinsAssembler) {
src_table, dst, src, size);
}
-TF_BUILTIN(WasmTableGet, WasmBuiltinsAssembler) {
- TNode<Int32T> entry_index =
- UncheckedCast<Int32T>(Parameter(Descriptor::kEntryIndex));
+TF_BUILTIN(WasmAllocateArray, WasmBuiltinsAssembler) {
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- Label entry_index_out_of_range(this, Label::kDeferred);
-
- TNode<BoolT> entry_index_fits_in_smi =
- IsValidPositiveSmi(ChangeInt32ToIntPtr(entry_index));
- GotoIfNot(entry_index_fits_in_smi, &entry_index_out_of_range);
-
- TNode<Smi> entry_index_smi = SmiFromInt32(entry_index);
- TNode<Smi> table_index_smi = CAST(Parameter(Descriptor::kTableIndex));
-
- TailCallRuntime(Runtime::kWasmFunctionTableGet, context, instance,
- table_index_smi, entry_index_smi);
-
- BIND(&entry_index_out_of_range);
- MessageTemplate message_id =
- wasm::WasmOpcodes::TrapReasonToMessageId(wasm::kTrapTableOutOfBounds);
- TailCallRuntime(Runtime::kThrowWasmError, context,
- SmiConstant(static_cast<int>(message_id)));
+ TNode<Smi> map_index = CAST(Parameter(Descriptor::kMapIndex));
+ TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
+ TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
+ TNode<FixedArray> maps_list = LoadObjectField<FixedArray>(
+ instance, WasmInstanceObject::kManagedObjectMapsOffset);
+ TNode<Map> map = CAST(LoadFixedArrayElement(maps_list, map_index));
+ TNode<IntPtrT> untagged_length = SmiUntag(length);
+ // instance_size = WasmArray::kHeaderSize
+ // + RoundUp(element_size * length, kObjectAlignment)
+ TNode<IntPtrT> raw_size = IntPtrMul(SmiUntag(element_size), untagged_length);
+ TNode<IntPtrT> rounded_size =
+ WordAnd(IntPtrAdd(raw_size, IntPtrConstant(kObjectAlignmentMask)),
+ IntPtrConstant(~kObjectAlignmentMask));
+ TNode<IntPtrT> instance_size =
+ IntPtrAdd(IntPtrConstant(WasmArray::kHeaderSize), rounded_size);
+ TNode<WasmArray> result = UncheckedCast<WasmArray>(Allocate(instance_size));
+ StoreMap(result, map);
+ StoreObjectFieldNoWriteBarrier(result, WasmArray::kLengthOffset,
+ TruncateIntPtrToInt32(untagged_length));
+ Return(result);
}
-TF_BUILTIN(WasmTableSet, WasmBuiltinsAssembler) {
- TNode<Int32T> entry_index =
- UncheckedCast<Int32T>(Parameter(Descriptor::kEntryIndex));
+TF_BUILTIN(WasmAllocateStruct, WasmBuiltinsAssembler) {
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
- Label entry_index_out_of_range(this, Label::kDeferred);
-
- TNode<BoolT> entry_index_fits_in_smi =
- IsValidPositiveSmi(ChangeInt32ToIntPtr(entry_index));
- GotoIfNot(entry_index_fits_in_smi, &entry_index_out_of_range);
-
- TNode<Smi> entry_index_smi = SmiFromInt32(entry_index);
- TNode<Smi> table_index_smi = CAST(Parameter(Descriptor::kTableIndex));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TailCallRuntime(Runtime::kWasmFunctionTableSet, context, instance,
- table_index_smi, entry_index_smi, value);
-
- BIND(&entry_index_out_of_range);
- MessageTemplate message_id =
- wasm::WasmOpcodes::TrapReasonToMessageId(wasm::kTrapTableOutOfBounds);
- TailCallRuntime(Runtime::kThrowWasmError, context,
- SmiConstant(static_cast<int>(message_id)));
+ TNode<Smi> map_index = CAST(Parameter(Descriptor::kMapIndex));
+ TNode<FixedArray> maps_list = LoadObjectField<FixedArray>(
+ instance, WasmInstanceObject::kManagedObjectMapsOffset);
+ TNode<Map> map = CAST(LoadFixedArrayElement(maps_list, map_index));
+ TNode<IntPtrT> instance_size =
+ TimesTaggedSize(LoadMapInstanceSizeInWords(map));
+ TNode<WasmStruct> result = UncheckedCast<WasmStruct>(Allocate(instance_size));
+ StoreMap(result, map);
+ Return(result);
}
-#define DECLARE_THROW_RUNTIME_FN(name) \
- TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) { \
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame(); \
- TNode<Context> context = LoadContextFromInstance(instance); \
- MessageTemplate message_id = \
- wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntime(Runtime::kThrowWasmError, context, \
- SmiConstant(static_cast<int>(message_id))); \
- }
-FOREACH_WASM_TRAPREASON(DECLARE_THROW_RUNTIME_FN)
-#undef DECLARE_THROW_RUNTIME_FN
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.h b/deps/v8/src/builtins/builtins-wasm-gen.h
new file mode 100644
index 0000000000..3740560666
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-wasm-gen.h
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_WASM_GEN_H_
+#define V8_BUILTINS_BUILTINS_WASM_GEN_H_
+
+#include "src/codegen/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class WasmBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit WasmBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<WasmInstanceObject> LoadInstanceFromFrame();
+
+ TNode<NativeContext> LoadContextFromInstance(
+ TNode<WasmInstanceObject> instance);
+
+ TNode<FixedArray> LoadTablesFromInstance(TNode<WasmInstanceObject> instance);
+
+ TNode<FixedArray> LoadExternalFunctionsFromInstance(
+ TNode<WasmInstanceObject> instance);
+
+ protected:
+ TNode<Smi> SmiFromUint32WithSaturation(TNode<Uint32T> value, uint32_t max);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_WASM_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index e75c7fae9d..d5cceda454 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -36,7 +36,7 @@ BUILTIN(FinalizationRegistryConstructor) {
finalization_registry->set_native_context(*isolate->native_context());
finalization_registry->set_cleanup(*cleanup);
finalization_registry->set_flags(
- JSFinalizationRegistry::ScheduledForCleanupField::encode(false));
+ JSFinalizationRegistry::ScheduledForCleanupBit::encode(false));
DCHECK(finalization_registry->active_cells().IsUndefined(isolate));
DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
@@ -122,61 +122,6 @@ BUILTIN(FinalizationRegistryUnregister) {
return *isolate->factory()->ToBoolean(success);
}
-BUILTIN(FinalizationRegistryCleanupSome) {
- HandleScope scope(isolate);
- const char* method_name = "FinalizationRegistry.prototype.cleanupSome";
-
- // 1. Let finalizationGroup be the this value.
- //
- // 2. If Type(finalizationGroup) is not Object, throw a TypeError
- // exception.
- //
- // 3. If finalizationGroup does not have a [[Cells]] internal slot,
- // throw a TypeError exception.
- CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name);
-
- Handle<Object> callback(finalization_registry->cleanup(), isolate);
- Handle<Object> callback_obj = args.atOrUndefined(isolate, 1);
-
- // 4. If callback is not undefined and IsCallable(callback) is
- // false, throw a TypeError exception.
- if (!callback_obj->IsUndefined(isolate)) {
- if (!callback_obj->IsCallable()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable));
- }
- callback = callback_obj;
- }
-
- // Don't do set_scheduled_for_cleanup(false); we still have the task
- // scheduled.
- if (JSFinalizationRegistry::Cleanup(isolate, finalization_registry, callback)
- .IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
-BUILTIN(FinalizationRegistryCleanupIteratorNext) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSFinalizationRegistryCleanupIterator, iterator, "next");
-
- Handle<JSFinalizationRegistry> finalization_registry(
- iterator->finalization_registry(), isolate);
- if (!finalization_registry->NeedsCleanup()) {
- return *isolate->factory()->NewJSIteratorResult(
- handle(ReadOnlyRoots(isolate).undefined_value(), isolate), true);
- }
- Handle<Object> holdings =
- handle(JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate),
- isolate);
-
- return *isolate->factory()->NewJSIteratorResult(holdings, false);
-}
-
BUILTIN(WeakRefConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index cb7ff412de..dfac203578 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -2,44 +2,47 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern macro IsCallable(HeapObject): bool;
+extern macro IsAllocationSite(HeapObject): bool;
+extern macro IsBigInt(HeapObject): bool;
extern macro IsConstructor(HeapObject): bool;
+extern macro IsContext(HeapObject): bool;
+extern macro IsCustomElementsReceiverInstanceType(int32): bool;
+extern macro IsExtensibleMap(Map): bool;
+extern macro IsFeedbackCell(HeapObject): bool;
extern macro IsFeedbackVector(HeapObject): bool;
+extern macro IsFixedArray(HeapObject): bool;
+extern macro IsHeapNumber(HeapObject): bool;
+extern macro IsJSAggregateError(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
-extern macro IsJSProxy(HeapObject): bool;
-extern macro IsJSRegExp(HeapObject): bool;
-extern macro IsJSRegExpStringIterator(HeapObject): bool;
-extern macro IsMap(HeapObject): bool;
-extern macro IsJSFunction(HeapObject): bool;
+extern macro IsJSArrayMap(Map): bool;
extern macro IsJSBoundFunction(HeapObject): bool;
+extern macro IsJSFinalizationRegistry(HeapObject): bool;
+extern macro IsJSFunction(HeapObject): bool;
extern macro IsJSObject(HeapObject): bool;
+extern macro IsJSPrimitiveWrapper(HeapObject): bool;
extern macro IsJSPromise(HeapObject): bool;
-extern macro IsJSTypedArray(HeapObject): bool;
-extern macro IsNumberDictionary(HeapObject): bool;
-extern macro IsContext(HeapObject): bool;
-extern macro IsNativeContext(HeapObject): bool;
+extern macro IsJSProxy(HeapObject): bool;
extern macro IsJSReceiver(HeapObject): bool;
-extern macro TaggedIsCallable(Object): bool;
-extern macro IsHeapNumber(HeapObject): bool;
-extern macro IsBigInt(HeapObject): bool;
-extern macro IsFixedArray(HeapObject): bool;
+extern macro IsJSRegExp(HeapObject): bool;
+extern macro IsJSRegExpStringIterator(HeapObject): bool;
+extern macro IsJSTypedArray(HeapObject): bool;
+extern macro IsMap(HeapObject): bool;
extern macro IsName(HeapObject): bool;
-extern macro IsPrivateSymbol(HeapObject): bool;
-extern macro IsNumber(Object): bool;
+extern macro IsNativeContext(HeapObject): bool;
+extern macro IsNumberDictionary(HeapObject): bool;
extern macro IsNumberNormalized(Number): bool;
-extern macro IsSafeInteger(Object): bool;
+extern macro IsNumber(Object): bool;
extern macro IsOddball(HeapObject): bool;
-extern macro IsSymbol(HeapObject): bool;
-extern macro IsJSArrayMap(Map): bool;
-extern macro IsExtensibleMap(Map): bool;
-extern macro IsJSPrimitiveWrapper(HeapObject): bool;
+extern macro IsPrivateSymbol(HeapObject): bool;
extern macro IsPromiseCapability(HeapObject): bool;
+extern macro IsPromiseFulfillReactionJobTask(HeapObject): bool;
extern macro IsPromiseReaction(HeapObject): bool;
extern macro IsPromiseReactionJobTask(HeapObject): bool;
extern macro IsPromiseRejectReactionJobTask(HeapObject): bool;
-extern macro IsPromiseFulfillReactionJobTask(HeapObject): bool;
+extern macro IsSafeInteger(Object): bool;
extern macro IsSharedFunctionInfo(HeapObject): bool;
-extern macro IsCustomElementsReceiverInstanceType(int32): bool;
+extern macro IsSymbol(HeapObject): bool;
+extern macro IsTuple2(HeapObject): bool;
extern macro HeapObjectToJSDataView(HeapObject): JSDataView
labels CastError;
@@ -57,6 +60,8 @@ extern macro TaggedToPositiveSmi(Object): PositiveSmi
labels CastError;
extern macro TaggedToDirectString(Object): DirectString
labels CastError;
+extern macro HeapObjectToJSAggregateError(HeapObject): JSAggregateError
+ labels CastError;
extern macro HeapObjectToJSArray(HeapObject): JSArray
labels CastError;
extern macro HeapObjectToCallable(HeapObject): Callable
@@ -278,6 +283,12 @@ Cast<Undefined>(o: HeapObject): Undefined
return Cast<Undefined>(o) otherwise CastError;
}
+Cast<AllocationSite>(o: HeapObject): AllocationSite
+ labels CastError {
+ if (IsAllocationSite(o)) return %RawDownCast<AllocationSite>(o);
+ goto CastError;
+}
+
Cast<FixedArray>(o: HeapObject): FixedArray
labels CastError {
return HeapObjectToFixedArray(o) otherwise CastError;
@@ -365,6 +376,11 @@ Cast<Undefined|Callable>(o: HeapObject): Undefined|Callable
return HeapObjectToCallable(o) otherwise CastError;
}
+Cast<JSAggregateError>(o: HeapObject): JSAggregateError
+ labels CastError {
+ return HeapObjectToJSAggregateError(o) otherwise CastError;
+}
+
Cast<JSArray>(o: HeapObject): JSArray
labels CastError {
return HeapObjectToJSArray(o) otherwise CastError;
@@ -481,6 +497,12 @@ Cast<Map>(implicit context: Context)(o: HeapObject): Map
goto CastError;
}
+Cast<FeedbackCell>(implicit context: Context)(o: HeapObject): FeedbackCell
+ labels CastError {
+ if (IsFeedbackCell(o)) return %RawDownCast<FeedbackCell>(o);
+ goto CastError;
+}
+
Cast<FeedbackVector>(implicit context: Context)(o: HeapObject): FeedbackVector
labels CastError {
if (IsFeedbackVector(o)) return %RawDownCast<FeedbackVector>(o);
@@ -735,8 +757,21 @@ Cast<JSPromise>(o: HeapObject): JSPromise labels CastError {
goto CastError;
}
+Cast<JSFinalizationRegistry>(o: HeapObject):
+ JSFinalizationRegistry labels CastError {
+ if (IsJSFinalizationRegistry(o)) {
+ return %RawDownCast<JSFinalizationRegistry>(o);
+ }
+ goto CastError;
+}
+
UnsafeCast<RegExpMatchInfo>(implicit context: Context)(o: Object):
RegExpMatchInfo {
assert(Is<FixedArray>(o));
return %RawDownCast<RegExpMatchInfo>(o);
}
+
+macro CastOrDefault<T: type, Arg: type, Default: type>(
+ implicit context: Context)(x: Arg, default: Default): T|Default {
+ return Cast<T>(x) otherwise return default;
+}
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index 60136af633..c0d311a825 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -5,54 +5,53 @@
#include 'src/builtins/builtins-collections-gen.h'
namespace collections {
- @export
- macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: JSAny):
- KeyValuePair labels MayHaveSideEffects {
- typeswitch (o) {
- case (a: FastJSArray): {
- const length: Smi = a.length;
- typeswitch (a.elements) {
- case (elements: FixedArray): {
- return KeyValuePair{
- key: length > 0 ? array::LoadElementOrUndefined(elements, 0) :
- Undefined,
- value: length > 1 ? array::LoadElementOrUndefined(elements, 1) :
- Undefined
- };
- }
- case (elements: FixedDoubleArray): {
- return KeyValuePair{
- key: length > 0 ? array::LoadElementOrUndefined(elements, 0) :
- Undefined,
- value: length > 1 ? array::LoadElementOrUndefined(elements, 1) :
- Undefined
- };
- }
- case (FixedArrayBase): deferred {
- unreachable;
- }
+@export
+macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: JSAny):
+ KeyValuePair labels MayHaveSideEffects {
+ typeswitch (o) {
+ case (a: FastJSArray): {
+ const length: Smi = a.length;
+ typeswitch (a.elements) {
+ case (elements: FixedArray): {
+ return KeyValuePair{
+ key: length > 0 ? array::LoadElementOrUndefined(elements, 0) :
+ Undefined,
+ value: length > 1 ? array::LoadElementOrUndefined(elements, 1) :
+ Undefined
+ };
+ }
+ case (elements: FixedDoubleArray): {
+ return KeyValuePair{
+ key: length > 0 ? array::LoadElementOrUndefined(elements, 0) :
+ Undefined,
+ value: length > 1 ? array::LoadElementOrUndefined(elements, 1) :
+ Undefined
+ };
+ }
+ case (FixedArrayBase): deferred {
+ unreachable;
}
- }
- case (JSReceiver): {
- goto MayHaveSideEffects;
- }
- case (o: JSAny): deferred {
- ThrowTypeError(MessageTemplate::kIteratorValueNotAnObject, o);
}
}
- }
-
- @export
- transitioning macro LoadKeyValuePair(implicit context: Context)(o: JSAny):
- KeyValuePair {
- try {
- return LoadKeyValuePairNoSideEffects(o) otherwise Generic;
+ case (JSReceiver): {
+ goto MayHaveSideEffects;
}
- label Generic {
- return KeyValuePair{
- key: GetProperty(o, Convert<Smi>(0)),
- value: GetProperty(o, Convert<Smi>(1))
- };
+ case (o: JSAny): deferred {
+ ThrowTypeError(MessageTemplate::kIteratorValueNotAnObject, o);
}
}
}
+
+@export
+transitioning macro LoadKeyValuePair(implicit context: Context)(o: JSAny):
+ KeyValuePair {
+ try {
+ return LoadKeyValuePairNoSideEffects(o) otherwise Generic;
+ } label Generic {
+ return KeyValuePair{
+ key: GetProperty(o, Convert<Smi>(0)),
+ value: GetProperty(o, Convert<Smi>(1))
+ };
+ }
+}
+}
diff --git a/deps/v8/src/builtins/console.tq b/deps/v8/src/builtins/console.tq
index 48d5d08abc..c0daa19b6d 100644
--- a/deps/v8/src/builtins/console.tq
+++ b/deps/v8/src/builtins/console.tq
@@ -3,16 +3,16 @@
// found in the LICENSE file.
namespace console {
- extern builtin ConsoleAssert(implicit context:
- Context)(JSFunction, JSAny, int32): JSAny;
+extern builtin ConsoleAssert(implicit context: Context)(
+ JSFunction, JSAny, int32): JSAny;
- javascript builtin FastConsoleAssert(
- js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
- target: JSFunction)(...arguments): JSAny {
- if (ToBoolean(arguments[0])) {
- return Undefined;
- } else {
- tail ConsoleAssert(target, newTarget, Convert<int32>(arguments.length));
- }
+javascript builtin FastConsoleAssert(
+ js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
+ target: JSFunction)(...arguments): JSAny {
+ if (ToBoolean(arguments[0])) {
+ return Undefined;
+ } else {
+ tail ConsoleAssert(target, newTarget, Convert<int32>(arguments.length));
}
}
+}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index ee9be1d411..e2c1112038 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -90,6 +90,10 @@ FromConstexpr<LanguageModeSmi, constexpr LanguageMode>(
c: constexpr LanguageMode): LanguageModeSmi {
return %RawDownCast<LanguageModeSmi>(SmiConstant(c));
}
+FromConstexpr<PromiseState, constexpr PromiseState>(c: constexpr PromiseState):
+ PromiseState {
+ return %RawDownCast<PromiseState>(Int32Constant(c));
+}
macro Convert<To: type, From: type>(i: From): To {
return i;
@@ -209,6 +213,9 @@ Convert<float64, Number>(n: Number): float64 {
Convert<uintptr, Number>(n: Number): uintptr {
return ChangeUintPtrNumberToUintPtr(n);
}
+Convert<float64, int32>(f: int32): float64 {
+ return ChangeInt32ToFloat64(f);
+}
Convert<float64, float32>(f: float32): float64 {
return ChangeFloat32ToFloat64(f);
}
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index c5808dfd9d..5f61a19472 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -6,877 +6,830 @@
namespace data_view {
- macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
- if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
- return 'DataView.prototype.getUint8';
- } else if constexpr (kind == ElementsKind::INT8_ELEMENTS) {
- return 'DataView.prototype.getInt8';
- } else if constexpr (kind == ElementsKind::UINT16_ELEMENTS) {
- return 'DataView.prototype.getUint16';
- } else if constexpr (kind == ElementsKind::INT16_ELEMENTS) {
- return 'DataView.prototype.getInt16';
- } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
- return 'DataView.prototype.getUint32';
- } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
- return 'DataView.prototype.getInt32';
- } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
- return 'DataView.prototype.getFloat32';
- } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
- return 'DataView.prototype.getFloat64';
- } else if constexpr (kind == ElementsKind::BIGINT64_ELEMENTS) {
- return 'DataView.prototype.getBigInt64';
- } else if constexpr (kind == ElementsKind::BIGUINT64_ELEMENTS) {
- return 'DataView.prototype.getBigUint64';
- } else {
- unreachable;
- }
+macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
+ if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
+ return 'DataView.prototype.getUint8';
+ } else if constexpr (kind == ElementsKind::INT8_ELEMENTS) {
+ return 'DataView.prototype.getInt8';
+ } else if constexpr (kind == ElementsKind::UINT16_ELEMENTS) {
+ return 'DataView.prototype.getUint16';
+ } else if constexpr (kind == ElementsKind::INT16_ELEMENTS) {
+ return 'DataView.prototype.getInt16';
+ } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
+ return 'DataView.prototype.getUint32';
+ } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
+ return 'DataView.prototype.getInt32';
+ } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
+ return 'DataView.prototype.getFloat32';
+ } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
+ return 'DataView.prototype.getFloat64';
+ } else if constexpr (kind == ElementsKind::BIGINT64_ELEMENTS) {
+ return 'DataView.prototype.getBigInt64';
+ } else if constexpr (kind == ElementsKind::BIGUINT64_ELEMENTS) {
+ return 'DataView.prototype.getBigUint64';
+ } else {
+ unreachable;
}
+}
- macro MakeDataViewSetterNameString(kind: constexpr ElementsKind): String {
- if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
- return 'DataView.prototype.setUint8';
- } else if constexpr (kind == ElementsKind::INT8_ELEMENTS) {
- return 'DataView.prototype.setInt8';
- } else if constexpr (kind == ElementsKind::UINT16_ELEMENTS) {
- return 'DataView.prototype.setUint16';
- } else if constexpr (kind == ElementsKind::INT16_ELEMENTS) {
- return 'DataView.prototype.setInt16';
- } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
- return 'DataView.prototype.setUint32';
- } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
- return 'DataView.prototype.setInt32';
- } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
- return 'DataView.prototype.setFloat32';
- } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
- return 'DataView.prototype.setFloat64';
- } else if constexpr (kind == ElementsKind::BIGINT64_ELEMENTS) {
- return 'DataView.prototype.setBigInt64';
- } else if constexpr (kind == ElementsKind::BIGUINT64_ELEMENTS) {
- return 'DataView.prototype.setBigUint64';
- } else {
- unreachable;
- }
+macro MakeDataViewSetterNameString(kind: constexpr ElementsKind): String {
+ if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
+ return 'DataView.prototype.setUint8';
+ } else if constexpr (kind == ElementsKind::INT8_ELEMENTS) {
+ return 'DataView.prototype.setInt8';
+ } else if constexpr (kind == ElementsKind::UINT16_ELEMENTS) {
+ return 'DataView.prototype.setUint16';
+ } else if constexpr (kind == ElementsKind::INT16_ELEMENTS) {
+ return 'DataView.prototype.setInt16';
+ } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
+ return 'DataView.prototype.setUint32';
+ } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
+ return 'DataView.prototype.setInt32';
+ } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
+ return 'DataView.prototype.setFloat32';
+ } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
+ return 'DataView.prototype.setFloat64';
+ } else if constexpr (kind == ElementsKind::BIGINT64_ELEMENTS) {
+ return 'DataView.prototype.setBigInt64';
+ } else if constexpr (kind == ElementsKind::BIGUINT64_ELEMENTS) {
+ return 'DataView.prototype.setBigUint64';
+ } else {
+ unreachable;
}
+}
- macro WasDetached(view: JSArrayBufferView): bool {
- return IsDetachedBuffer(view.buffer);
- }
+macro WasDetached(view: JSArrayBufferView): bool {
+ return IsDetachedBuffer(view.buffer);
+}
- macro ValidateDataView(context: Context, o: JSAny, method: String):
- JSDataView {
- try {
- return Cast<JSDataView>(o) otherwise CastError;
- }
- label CastError {
- ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method);
- }
+macro ValidateDataView(context: Context, o: JSAny, method: String): JSDataView {
+ try {
+ return Cast<JSDataView>(o) otherwise CastError;
+ } label CastError {
+ ThrowTypeError(MessageTemplate::kIncompatibleMethodReceiver, method);
}
+}
- // ES6 section 24.2.4.1 get DataView.prototype.buffer
- javascript builtin DataViewPrototypeGetBuffer(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSArrayBuffer {
- const dataView: JSDataView =
- ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
- return dataView.buffer;
- }
+// ES6 section 24.2.4.1 get DataView.prototype.buffer
+javascript builtin DataViewPrototypeGetBuffer(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(...arguments): JSArrayBuffer {
+ const dataView: JSDataView =
+ ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
+ return dataView.buffer;
+}
- // ES6 section 24.2.4.2 get DataView.prototype.byteLength
- javascript builtin DataViewPrototypeGetByteLength(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): Number {
- const dataView: JSDataView = ValidateDataView(
- context, receiver, 'get DataView.prototype.byte_length');
- if (WasDetached(dataView)) {
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {dataView} was detached.
- return 0;
- }
- return Convert<Number>(dataView.byte_length);
- }
+// ES6 section 24.2.4.2 get DataView.prototype.byteLength
+javascript builtin DataViewPrototypeGetByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
+ const dataView: JSDataView =
+ ValidateDataView(context, receiver, 'get DataView.prototype.byte_length');
+ if (WasDetached(dataView)) {
+ // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer of the {dataView} was detached.
+ return 0;
+ }
+ return Convert<Number>(dataView.byte_length);
+}
- // ES6 section 24.2.4.3 get DataView.prototype.byteOffset
- javascript builtin DataViewPrototypeGetByteOffset(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): Number {
- const dataView: JSDataView = ValidateDataView(
- context, receiver, 'get DataView.prototype.byte_offset');
- if (WasDetached(dataView)) {
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {dataView} was detached.
- return 0;
- }
- return Convert<Number>(dataView.byte_offset);
- }
+// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
+javascript builtin DataViewPrototypeGetByteOffset(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
+ const dataView: JSDataView =
+ ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset');
+ if (WasDetached(dataView)) {
+ // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer of the {dataView} was detached.
+ return 0;
+ }
+ return Convert<Number>(dataView.byte_offset);
+}
- extern macro BitcastInt32ToFloat32(uint32): float32;
- extern macro BitcastFloat32ToInt32(float32): uint32;
- extern macro Float64ExtractLowWord32(float64): uint32;
- extern macro Float64ExtractHighWord32(float64): uint32;
- extern macro Float64InsertLowWord32(float64, uint32): float64;
- extern macro Float64InsertHighWord32(float64, uint32): float64;
+extern macro BitcastInt32ToFloat32(uint32): float32;
+extern macro BitcastFloat32ToInt32(float32): uint32;
+extern macro Float64ExtractLowWord32(float64): uint32;
+extern macro Float64ExtractHighWord32(float64): uint32;
+extern macro Float64InsertLowWord32(float64, uint32): float64;
+extern macro Float64InsertHighWord32(float64, uint32): float64;
- extern macro DataViewBuiltinsAssembler::LoadUint8(RawPtr, uintptr): uint32;
- extern macro DataViewBuiltinsAssembler::LoadInt8(RawPtr, uintptr): int32;
+extern macro DataViewBuiltinsAssembler::LoadUint8(RawPtr, uintptr): uint32;
+extern macro DataViewBuiltinsAssembler::LoadInt8(RawPtr, uintptr): int32;
- macro LoadDataView8(
- buffer: JSArrayBuffer, offset: uintptr, signed: constexpr bool): Smi {
- if constexpr (signed) {
- return Convert<Smi>(LoadInt8(buffer.backing_store, offset));
- } else {
- return Convert<Smi>(LoadUint8(buffer.backing_store, offset));
- }
+macro LoadDataView8(
+ buffer: JSArrayBuffer, offset: uintptr, signed: constexpr bool): Smi {
+ if constexpr (signed) {
+ return Convert<Smi>(LoadInt8(buffer.backing_store_ptr, offset));
+ } else {
+ return Convert<Smi>(LoadUint8(buffer.backing_store_ptr, offset));
}
+}
- macro LoadDataView16(
- buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
- signed: constexpr bool): Number {
- const dataPointer: RawPtr = buffer.backing_store;
+macro LoadDataView16(
+ buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
+ signed: constexpr bool): Number {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
+
+ let b0: int32;
+ let b1: int32;
+ let result: int32;
+
+ // Sign-extend the most significant byte by loading it as an Int8.
+ if (requestedLittleEndian) {
+ b0 = Signed(LoadUint8(dataPointer, offset));
+ b1 = LoadInt8(dataPointer, offset + 1);
+ result = (b1 << 8) + b0;
+ } else {
+ b0 = LoadInt8(dataPointer, offset);
+ b1 = Signed(LoadUint8(dataPointer, offset + 1));
+ result = (b0 << 8) + b1;
+ }
+ if constexpr (signed) {
+ return Convert<Smi>(result);
+ } else {
+ // Bit-mask the higher bits to prevent sign extension if we're unsigned.
+ return Convert<Smi>(result & 0xFFFF);
+ }
+}
- let b0: int32;
- let b1: int32;
- let result: int32;
+macro LoadDataView32(
+ buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
+ kind: constexpr ElementsKind): Number {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
- // Sign-extend the most significant byte by loading it as an Int8.
- if (requestedLittleEndian) {
- b0 = Signed(LoadUint8(dataPointer, offset));
- b1 = LoadInt8(dataPointer, offset + 1);
- result = (b1 << 8) + b0;
- } else {
- b0 = LoadInt8(dataPointer, offset);
- b1 = Signed(LoadUint8(dataPointer, offset + 1));
- result = (b0 << 8) + b1;
- }
- if constexpr (signed) {
- return Convert<Smi>(result);
- } else {
- // Bit-mask the higher bits to prevent sign extension if we're unsigned.
- return Convert<Smi>(result & 0xFFFF);
- }
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ let result: uint32;
+
+ if (requestedLittleEndian) {
+ result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ } else {
+ result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
}
- macro LoadDataView32(
- buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
- kind: constexpr ElementsKind): Number {
- const dataPointer: RawPtr = buffer.backing_store;
+ if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
+ return Convert<Number>(Signed(result));
+ } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
+ return Convert<Number>(result);
+ } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
+ const floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
+ return Convert<Number>(floatRes);
+ } else {
+ unreachable;
+ }
+}
- const b0: uint32 = LoadUint8(dataPointer, offset);
- const b1: uint32 = LoadUint8(dataPointer, offset + 1);
- const b2: uint32 = LoadUint8(dataPointer, offset + 2);
- const b3: uint32 = LoadUint8(dataPointer, offset + 3);
- let result: uint32;
+macro LoadDataViewFloat64(
+ buffer: JSArrayBuffer, offset: uintptr,
+ requestedLittleEndian: bool): Number {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
+
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ const b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ const b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ const b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ let lowWord: uint32;
+ let highWord: uint32;
+
+ if (requestedLittleEndian) {
+ lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ } else {
+ highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ }
+
+ let result: float64 = 0;
+ result = Float64InsertLowWord32(result, lowWord);
+ result = Float64InsertHighWord32(result, highWord);
+
+ return Convert<Number>(result);
+}
- if (requestedLittleEndian) {
- result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- } else {
- result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- }
+const kZeroDigitBigInt: constexpr int31 = 0;
+const kOneDigitBigInt: constexpr int31 = 1;
+const kTwoDigitBigInt: constexpr int31 = 2;
- if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
- return Convert<Number>(Signed(result));
- } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
- return Convert<Number>(result);
- } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
- const floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
- return Convert<Number>(floatRes);
- } else {
- unreachable;
- }
+// Create a BigInt on a 64-bit architecture from two 32-bit values.
+macro MakeBigIntOn64Bit(implicit context: Context)(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
+ // 0n is represented by a zero-length BigInt.
+ if (lowWord == 0 && highWord == 0) {
+ return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
}
- macro LoadDataViewFloat64(
- buffer: JSArrayBuffer, offset: uintptr,
- requestedLittleEndian: bool): Number {
- const dataPointer: RawPtr = buffer.backing_store;
-
- const b0: uint32 = LoadUint8(dataPointer, offset);
- const b1: uint32 = LoadUint8(dataPointer, offset + 1);
- const b2: uint32 = LoadUint8(dataPointer, offset + 2);
- const b3: uint32 = LoadUint8(dataPointer, offset + 3);
- const b4: uint32 = LoadUint8(dataPointer, offset + 4);
- const b5: uint32 = LoadUint8(dataPointer, offset + 5);
- const b6: uint32 = LoadUint8(dataPointer, offset + 6);
- const b7: uint32 = LoadUint8(dataPointer, offset + 7);
- let lowWord: uint32;
- let highWord: uint32;
-
- if (requestedLittleEndian) {
- lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
- } else {
- highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ let sign: uint32 = bigint::kPositiveSign;
+ const highPart: intptr = Signed(Convert<uintptr>(highWord));
+ const lowPart: intptr = Signed(Convert<uintptr>(lowWord));
+ let rawValue: intptr = (highPart << 32) + lowPart;
+
+ if constexpr (signed) {
+ if (rawValue < 0) {
+ sign = bigint::kNegativeSign;
+ // We have to store the absolute value of rawValue in the digit.
+ rawValue = 0 - rawValue;
}
+ }
- let result: float64 = 0;
- result = Float64InsertLowWord32(result, lowWord);
- result = Float64InsertHighWord32(result, highWord);
+ // Allocate the BigInt and store the absolute value.
+ const result: MutableBigInt =
+ bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
+ bigint::StoreBigIntDigit(result, 0, Unsigned(rawValue));
+ return Convert<BigInt>(result);
+}
- return Convert<Number>(result);
+// Create a BigInt on a 32-bit architecture from two 32-bit values.
+macro MakeBigIntOn32Bit(implicit context: Context)(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
+ // 0n is represented by a zero-length BigInt.
+ if (lowWord == 0 && highWord == 0) {
+ return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
}
- const kZeroDigitBigInt: constexpr int31 = 0;
- const kOneDigitBigInt: constexpr int31 = 1;
- const kTwoDigitBigInt: constexpr int31 = 2;
-
- // Create a BigInt on a 64-bit architecture from two 32-bit values.
- macro MakeBigIntOn64Bit(implicit context: Context)(
- lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
- // 0n is represented by a zero-length BigInt.
- if (lowWord == 0 && highWord == 0) {
- return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
- }
+ // On a 32-bit platform, we might need 1 or 2 digits to store the number.
+ let needTwoDigits: bool = false;
+ let sign: uint32 = bigint::kPositiveSign;
- let sign: uint32 = bigint::kPositiveSign;
- const highPart: intptr = Signed(Convert<uintptr>(highWord));
- const lowPart: intptr = Signed(Convert<uintptr>(lowWord));
- let rawValue: intptr = (highPart << 32) + lowPart;
+ // We need to do some math on lowWord and highWord,
+ // so Convert them to int32.
+ let lowPart: int32 = Signed(lowWord);
+ let highPart: int32 = Signed(highWord);
+ // If highWord == 0, the number is positive, and we only need 1 digit,
+ // so we don't have anything to do.
+ // Otherwise, all cases are possible.
+ if (highWord != 0) {
if constexpr (signed) {
- if (rawValue < 0) {
+ // If highPart < 0, the number is always negative.
+ if (highPart < 0) {
sign = bigint::kNegativeSign;
- // We have to store the absolute value of rawValue in the digit.
- rawValue = 0 - rawValue;
- }
- }
- // Allocate the BigInt and store the absolute value.
- const result: MutableBigInt =
- bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
- bigint::StoreBigIntDigit(result, 0, Unsigned(rawValue));
- return Convert<BigInt>(result);
- }
-
- // Create a BigInt on a 32-bit architecture from two 32-bit values.
- macro MakeBigIntOn32Bit(implicit context: Context)(
- lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
- // 0n is represented by a zero-length BigInt.
- if (lowWord == 0 && highWord == 0) {
- return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
- }
+ // We have to compute the absolute value by hand.
+ // There will be a negative carry from the low word
+ // to the high word iff low != 0.
+ highPart = 0 - highPart;
+ if (lowPart != 0) {
+ highPart = highPart - 1;
+ }
+ lowPart = 0 - lowPart;
- // On a 32-bit platform, we might need 1 or 2 digits to store the number.
- let needTwoDigits: bool = false;
- let sign: uint32 = bigint::kPositiveSign;
-
- // We need to do some math on lowWord and highWord,
- // so Convert them to int32.
- let lowPart: int32 = Signed(lowWord);
- let highPart: int32 = Signed(highWord);
-
- // If highWord == 0, the number is positive, and we only need 1 digit,
- // so we don't have anything to do.
- // Otherwise, all cases are possible.
- if (highWord != 0) {
- if constexpr (signed) {
- // If highPart < 0, the number is always negative.
- if (highPart < 0) {
- sign = bigint::kNegativeSign;
-
- // We have to compute the absolute value by hand.
- // There will be a negative carry from the low word
- // to the high word iff low != 0.
- highPart = 0 - highPart;
- if (lowPart != 0) {
- highPart = highPart - 1;
- }
- lowPart = 0 - lowPart;
-
- // Here, highPart could be 0 again so we might have 1 or 2 digits.
- if (highPart != 0) {
- needTwoDigits = true;
- }
-
- } else {
- // In this case, the number is positive, and we need 2 digits.
+ // Here, highPart could be 0 again so we might have 1 or 2 digits.
+ if (highPart != 0) {
needTwoDigits = true;
}
} else {
- // In this case, the number is positive (unsigned),
- // and we need 2 digits.
+ // In this case, the number is positive, and we need 2 digits.
needTwoDigits = true;
}
- }
- // Allocate the BigInt with the right sign and length.
- let result: MutableBigInt;
- if (needTwoDigits) {
- result = bigint::AllocateEmptyBigInt(sign, kTwoDigitBigInt);
} else {
- result = bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
- }
-
- // Finally, write the digit(s) to the BigInt.
- bigint::StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
- if (needTwoDigits) {
- bigint::StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
+ // In this case, the number is positive (unsigned),
+ // and we need 2 digits.
+ needTwoDigits = true;
}
- return Convert<BigInt>(result);
}
- macro MakeBigInt(implicit context: Context)(
- lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
- // A BigInt digit has the platform word size, so we only need one digit
- // on 64-bit platforms but may need two on 32-bit.
- if constexpr (Is64()) {
- return MakeBigIntOn64Bit(lowWord, highWord, signed);
- } else {
- return MakeBigIntOn32Bit(lowWord, highWord, signed);
- }
+ // Allocate the BigInt with the right sign and length.
+ let result: MutableBigInt;
+ if (needTwoDigits) {
+ result = bigint::AllocateEmptyBigInt(sign, kTwoDigitBigInt);
+ } else {
+ result = bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
}
- macro LoadDataViewBigInt(implicit context: Context)(
- buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
- signed: constexpr bool): BigInt {
- const dataPointer: RawPtr = buffer.backing_store;
-
- const b0: uint32 = LoadUint8(dataPointer, offset);
- const b1: uint32 = LoadUint8(dataPointer, offset + 1);
- const b2: uint32 = LoadUint8(dataPointer, offset + 2);
- const b3: uint32 = LoadUint8(dataPointer, offset + 3);
- const b4: uint32 = LoadUint8(dataPointer, offset + 4);
- const b5: uint32 = LoadUint8(dataPointer, offset + 5);
- const b6: uint32 = LoadUint8(dataPointer, offset + 6);
- const b7: uint32 = LoadUint8(dataPointer, offset + 7);
- let lowWord: uint32;
- let highWord: uint32;
-
- if (requestedLittleEndian) {
- lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
- } else {
- highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
- }
+ // Finally, write the digit(s) to the BigInt.
+ bigint::StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
+ if (needTwoDigits) {
+ bigint::StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
+ }
+ return Convert<BigInt>(result);
+}
- return MakeBigInt(lowWord, highWord, signed);
+macro MakeBigInt(implicit context: Context)(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
+ // A BigInt digit has the platform word size, so we only need one digit
+ // on 64-bit platforms but may need two on 32-bit.
+ if constexpr (Is64()) {
+ return MakeBigIntOn64Bit(lowWord, highWord, signed);
+ } else {
+ return MakeBigIntOn32Bit(lowWord, highWord, signed);
}
+}
- extern macro DataViewBuiltinsAssembler::DataViewElementSize(
- constexpr ElementsKind): constexpr int31;
-
- // GetViewValue ( view, requestIndex, isLittleEndian, type )
- // https://tc39.es/ecma262/#sec-getviewvalue
- transitioning macro DataViewGet(
- context: Context, receiver: JSAny, requestIndex: JSAny,
- requestedLittleEndian: JSAny, kind: constexpr ElementsKind): Numeric {
- // 1. Perform ? RequireInternalSlot(view, [[DataView]]).
- // 2. Assert: view has a [[ViewedArrayBuffer]] internal slot.
- const dataView: JSDataView =
- ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
-
- try {
- // 3. Let getIndex be ? ToIndex(requestIndex).
- const getIndex: uintptr = ToIndex(requestIndex) otherwise RangeError;
-
- // 4. Set isLittleEndian to ! ToBoolean(isLittleEndian).
- const littleEndian: bool = ToBoolean(requestedLittleEndian);
-
- // 5. Let buffer be view.[[ViewedArrayBuffer]].
- const buffer: JSArrayBuffer = dataView.buffer;
-
- // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(
- MessageTemplate::kDetachedOperation,
- MakeDataViewGetterNameString(kind));
- }
+macro LoadDataViewBigInt(implicit context: Context)(
+ buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
+ signed: constexpr bool): BigInt {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
+
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ const b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ const b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ const b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ let lowWord: uint32;
+ let highWord: uint32;
+
+ if (requestedLittleEndian) {
+ lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ } else {
+ highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
+ }
+
+ return MakeBigInt(lowWord, highWord, signed);
+}
- // 7. Let viewOffset be view.[[ByteOffset]].
- const viewOffset: uintptr = dataView.byte_offset;
-
- // 8. Let viewSize be view.[[ByteLength]].
- const viewSize: uintptr = dataView.byte_length;
-
- // 9. Let elementSize be the Element Size value specified in Table 62
- // for Element Type type.
- const elementSize: uintptr = DataViewElementSize(kind);
-
- // 10. If getIndex + elementSize > viewSize, throw a RangeError exception.
- CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize)
- otherwise RangeError;
-
- // 11. Let bufferIndex be getIndex + viewOffset.
- const bufferIndex: uintptr = getIndex + viewOffset;
-
- if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
- return LoadDataView8(buffer, bufferIndex, false);
- } else if constexpr (kind == ElementsKind::INT8_ELEMENTS) {
- return LoadDataView8(buffer, bufferIndex, true);
- } else if constexpr (kind == ElementsKind::UINT16_ELEMENTS) {
- return LoadDataView16(buffer, bufferIndex, littleEndian, false);
- } else if constexpr (kind == ElementsKind::INT16_ELEMENTS) {
- return LoadDataView16(buffer, bufferIndex, littleEndian, true);
- } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
- return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
- } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
- return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
- } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
- return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
- } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
- return LoadDataViewFloat64(buffer, bufferIndex, littleEndian);
- } else if constexpr (kind == ElementsKind::BIGUINT64_ELEMENTS) {
- return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, false);
- } else if constexpr (kind == ElementsKind::BIGINT64_ELEMENTS) {
- return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, true);
- } else {
- unreachable;
- }
+extern macro DataViewBuiltinsAssembler::DataViewElementSize(
+ constexpr ElementsKind): constexpr int31;
+
+// GetViewValue ( view, requestIndex, isLittleEndian, type )
+// https://tc39.es/ecma262/#sec-getviewvalue
+transitioning macro DataViewGet(
+ context: Context, receiver: JSAny, requestIndex: JSAny,
+ requestedLittleEndian: JSAny, kind: constexpr ElementsKind): Numeric {
+ // 1. Perform ? RequireInternalSlot(view, [[DataView]]).
+ // 2. Assert: view has a [[ViewedArrayBuffer]] internal slot.
+ const dataView: JSDataView =
+ ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
+
+ try {
+ // 3. Let getIndex be ? ToIndex(requestIndex).
+ const getIndex: uintptr = ToIndex(requestIndex) otherwise RangeError;
+
+ // 4. Set isLittleEndian to ! ToBoolean(isLittleEndian).
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+
+ // 5. Let buffer be view.[[ViewedArrayBuffer]].
+ const buffer: JSArrayBuffer = dataView.buffer;
+
+ // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation,
+ MakeDataViewGetterNameString(kind));
}
- label RangeError {
- ThrowRangeError(MessageTemplate::kInvalidDataViewAccessorOffset);
+
+ // 7. Let viewOffset be view.[[ByteOffset]].
+ const viewOffset: uintptr = dataView.byte_offset;
+
+ // 8. Let viewSize be view.[[ByteLength]].
+ const viewSize: uintptr = dataView.byte_length;
+
+ // 9. Let elementSize be the Element Size value specified in Table 62
+ // for Element Type type.
+ const elementSize: uintptr = DataViewElementSize(kind);
+
+ // 10. If getIndex + elementSize > viewSize, throw a RangeError exception.
+ CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize)
+ otherwise RangeError;
+
+ // 11. Let bufferIndex be getIndex + viewOffset.
+ const bufferIndex: uintptr = getIndex + viewOffset;
+
+ if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
+ return LoadDataView8(buffer, bufferIndex, false);
+ } else if constexpr (kind == ElementsKind::INT8_ELEMENTS) {
+ return LoadDataView8(buffer, bufferIndex, true);
+ } else if constexpr (kind == ElementsKind::UINT16_ELEMENTS) {
+ return LoadDataView16(buffer, bufferIndex, littleEndian, false);
+ } else if constexpr (kind == ElementsKind::INT16_ELEMENTS) {
+ return LoadDataView16(buffer, bufferIndex, littleEndian, true);
+ } else if constexpr (kind == ElementsKind::UINT32_ELEMENTS) {
+ return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
+ } else if constexpr (kind == ElementsKind::INT32_ELEMENTS) {
+ return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
+ } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
+ return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
+ } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
+ return LoadDataViewFloat64(buffer, bufferIndex, littleEndian);
+ } else if constexpr (kind == ElementsKind::BIGUINT64_ELEMENTS) {
+ return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, false);
+ } else if constexpr (kind == ElementsKind::BIGINT64_ELEMENTS) {
+ return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, true);
+ } else {
+ unreachable;
}
+ } label RangeError {
+ ThrowRangeError(MessageTemplate::kInvalidDataViewAccessorOffset);
}
+}
- transitioning javascript builtin DataViewPrototypeGetUint8(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- return DataViewGet(
- context, receiver, offset, Undefined, ElementsKind::UINT8_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetUint8(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ return DataViewGet(
+ context, receiver, offset, Undefined, ElementsKind::UINT8_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetInt8(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- return DataViewGet(
- context, receiver, offset, Undefined, ElementsKind::INT8_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetInt8(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ return DataViewGet(
+ context, receiver, offset, Undefined, ElementsKind::INT8_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetUint16(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::UINT16_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetUint16(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, ElementsKind::UINT16_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetInt16(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::INT16_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetInt16(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, ElementsKind::INT16_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetUint32(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::UINT32_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetUint32(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, ElementsKind::UINT32_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetInt32(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::INT32_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetInt32(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian, ElementsKind::INT32_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetFloat32(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::FLOAT32_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetFloat32(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian,
+ ElementsKind::FLOAT32_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetFloat64(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::FLOAT64_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetFloat64(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian,
+ ElementsKind::FLOAT64_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetBigUint64(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::BIGUINT64_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetBigUint64(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian,
+ ElementsKind::BIGUINT64_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeGetBigInt64(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewGet(
- context, receiver, offset, isLittleEndian,
- ElementsKind::BIGINT64_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeGetBigInt64(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const isLittleEndian: JSAny = arguments[1];
+ return DataViewGet(
+ context, receiver, offset, isLittleEndian,
+ ElementsKind::BIGINT64_ELEMENTS);
+}
- extern macro ToNumber(Context, JSAny): Number;
- extern macro ToBigInt(Context, JSAny): BigInt;
- extern macro TruncateFloat64ToWord32(float64): uint32;
+extern macro ToNumber(Context, JSAny): Number;
+extern macro ToBigInt(Context, JSAny): BigInt;
+extern macro TruncateFloat64ToWord32(float64): uint32;
- extern macro DataViewBuiltinsAssembler::StoreWord8(RawPtr, uintptr, uint32):
- void;
+extern macro DataViewBuiltinsAssembler::StoreWord8(
+ RawPtr, uintptr, uint32): void;
- macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) {
- StoreWord8(buffer.backing_store, offset, value & 0xFF);
- }
+macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) {
+ StoreWord8(buffer.backing_store_ptr, offset, value & 0xFF);
+}
- macro StoreDataView16(
- buffer: JSArrayBuffer, offset: uintptr, value: uint32,
- requestedLittleEndian: bool) {
- const dataPointer: RawPtr = buffer.backing_store;
+macro StoreDataView16(
+ buffer: JSArrayBuffer, offset: uintptr, value: uint32,
+ requestedLittleEndian: bool) {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
- const b0: uint32 = value & 0xFF;
- const b1: uint32 = (value >>> 8) & 0xFF;
+ const b0: uint32 = value & 0xFF;
+ const b1: uint32 = (value >>> 8) & 0xFF;
- if (requestedLittleEndian) {
- StoreWord8(dataPointer, offset, b0);
- StoreWord8(dataPointer, offset + 1, b1);
- } else {
- StoreWord8(dataPointer, offset, b1);
- StoreWord8(dataPointer, offset + 1, b0);
- }
+ if (requestedLittleEndian) {
+ StoreWord8(dataPointer, offset, b0);
+ StoreWord8(dataPointer, offset + 1, b1);
+ } else {
+ StoreWord8(dataPointer, offset, b1);
+ StoreWord8(dataPointer, offset + 1, b0);
}
+}
- macro StoreDataView32(
- buffer: JSArrayBuffer, offset: uintptr, value: uint32,
- requestedLittleEndian: bool) {
- const dataPointer: RawPtr = buffer.backing_store;
-
- const b0: uint32 = value & 0xFF;
- const b1: uint32 = (value >>> 8) & 0xFF;
- const b2: uint32 = (value >>> 16) & 0xFF;
- const b3: uint32 = value >>> 24; // We don't need to mask here.
-
- if (requestedLittleEndian) {
- StoreWord8(dataPointer, offset, b0);
- StoreWord8(dataPointer, offset + 1, b1);
- StoreWord8(dataPointer, offset + 2, b2);
- StoreWord8(dataPointer, offset + 3, b3);
- } else {
- StoreWord8(dataPointer, offset, b3);
- StoreWord8(dataPointer, offset + 1, b2);
- StoreWord8(dataPointer, offset + 2, b1);
- StoreWord8(dataPointer, offset + 3, b0);
- }
+macro StoreDataView32(
+ buffer: JSArrayBuffer, offset: uintptr, value: uint32,
+ requestedLittleEndian: bool) {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
+
+ const b0: uint32 = value & 0xFF;
+ const b1: uint32 = (value >>> 8) & 0xFF;
+ const b2: uint32 = (value >>> 16) & 0xFF;
+ const b3: uint32 = value >>> 24; // We don't need to mask here.
+
+ if (requestedLittleEndian) {
+ StoreWord8(dataPointer, offset, b0);
+ StoreWord8(dataPointer, offset + 1, b1);
+ StoreWord8(dataPointer, offset + 2, b2);
+ StoreWord8(dataPointer, offset + 3, b3);
+ } else {
+ StoreWord8(dataPointer, offset, b3);
+ StoreWord8(dataPointer, offset + 1, b2);
+ StoreWord8(dataPointer, offset + 2, b1);
+ StoreWord8(dataPointer, offset + 3, b0);
}
+}
- macro StoreDataView64(
- buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
- requestedLittleEndian: bool) {
- const dataPointer: RawPtr = buffer.backing_store;
-
- const b0: uint32 = lowWord & 0xFF;
- const b1: uint32 = (lowWord >>> 8) & 0xFF;
- const b2: uint32 = (lowWord >>> 16) & 0xFF;
- const b3: uint32 = lowWord >>> 24;
-
- const b4: uint32 = highWord & 0xFF;
- const b5: uint32 = (highWord >>> 8) & 0xFF;
- const b6: uint32 = (highWord >>> 16) & 0xFF;
- const b7: uint32 = highWord >>> 24;
-
- if (requestedLittleEndian) {
- StoreWord8(dataPointer, offset, b0);
- StoreWord8(dataPointer, offset + 1, b1);
- StoreWord8(dataPointer, offset + 2, b2);
- StoreWord8(dataPointer, offset + 3, b3);
- StoreWord8(dataPointer, offset + 4, b4);
- StoreWord8(dataPointer, offset + 5, b5);
- StoreWord8(dataPointer, offset + 6, b6);
- StoreWord8(dataPointer, offset + 7, b7);
- } else {
- StoreWord8(dataPointer, offset, b7);
- StoreWord8(dataPointer, offset + 1, b6);
- StoreWord8(dataPointer, offset + 2, b5);
- StoreWord8(dataPointer, offset + 3, b4);
- StoreWord8(dataPointer, offset + 4, b3);
- StoreWord8(dataPointer, offset + 5, b2);
- StoreWord8(dataPointer, offset + 6, b1);
- StoreWord8(dataPointer, offset + 7, b0);
- }
+macro StoreDataView64(
+ buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
+ requestedLittleEndian: bool) {
+ const dataPointer: RawPtr = buffer.backing_store_ptr;
+
+ const b0: uint32 = lowWord & 0xFF;
+ const b1: uint32 = (lowWord >>> 8) & 0xFF;
+ const b2: uint32 = (lowWord >>> 16) & 0xFF;
+ const b3: uint32 = lowWord >>> 24;
+
+ const b4: uint32 = highWord & 0xFF;
+ const b5: uint32 = (highWord >>> 8) & 0xFF;
+ const b6: uint32 = (highWord >>> 16) & 0xFF;
+ const b7: uint32 = highWord >>> 24;
+
+ if (requestedLittleEndian) {
+ StoreWord8(dataPointer, offset, b0);
+ StoreWord8(dataPointer, offset + 1, b1);
+ StoreWord8(dataPointer, offset + 2, b2);
+ StoreWord8(dataPointer, offset + 3, b3);
+ StoreWord8(dataPointer, offset + 4, b4);
+ StoreWord8(dataPointer, offset + 5, b5);
+ StoreWord8(dataPointer, offset + 6, b6);
+ StoreWord8(dataPointer, offset + 7, b7);
+ } else {
+ StoreWord8(dataPointer, offset, b7);
+ StoreWord8(dataPointer, offset + 1, b6);
+ StoreWord8(dataPointer, offset + 2, b5);
+ StoreWord8(dataPointer, offset + 3, b4);
+ StoreWord8(dataPointer, offset + 4, b3);
+ StoreWord8(dataPointer, offset + 5, b2);
+ StoreWord8(dataPointer, offset + 6, b1);
+ StoreWord8(dataPointer, offset + 7, b0);
}
+}
- extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(
- BigIntBase): uint32;
- extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase):
- uint32;
-
- // We might get here a BigInt that is bigger than 64 bits, but we're only
- // interested in the 64 lowest ones. This means the lowest BigInt digit
- // on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones.
- macro StoreDataViewBigInt(
- buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
- requestedLittleEndian: bool) {
- const length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
- const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
-
- // The 32-bit words that will hold the BigInt's value in
- // two's complement representation.
- let lowWord: uint32 = 0;
- let highWord: uint32 = 0;
-
- // The length is nonzero if and only if the BigInt's value is nonzero.
- if (length != 0) {
- if constexpr (Is64()) {
- // There is always exactly 1 BigInt digit to load in this case.
- const value: uintptr = bigint::LoadBigIntDigit(bigIntValue, 0);
- lowWord = Convert<uint32>(value); // Truncates value to 32 bits.
- highWord = Convert<uint32>(value >>> 32);
- } else { // There might be either 1 or 2 BigInt digits we need to load.
- lowWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 0));
- if (length >= 2) { // Only load the second digit if there is one.
- highWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 1));
- }
+extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(BigIntBase):
+ uint32;
+extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase):
+ uint32;
+
+// We might get here a BigInt that is bigger than 64 bits, but we're only
+// interested in the 64 lowest ones. This means the lowest BigInt digit
+// on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones.
+macro StoreDataViewBigInt(
+ buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
+ requestedLittleEndian: bool) {
+ const length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
+ const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
+
+ // The 32-bit words that will hold the BigInt's value in
+ // two's complement representation.
+ let lowWord: uint32 = 0;
+ let highWord: uint32 = 0;
+
+ // The length is nonzero if and only if the BigInt's value is nonzero.
+ if (length != 0) {
+ if constexpr (Is64()) {
+ // There is always exactly 1 BigInt digit to load in this case.
+ const value: uintptr = bigint::LoadBigIntDigit(bigIntValue, 0);
+ lowWord = Convert<uint32>(value); // Truncates value to 32 bits.
+ highWord = Convert<uint32>(value >>> 32);
+ } else { // There might be either 1 or 2 BigInt digits we need to load.
+ lowWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 0));
+ if (length >= 2) { // Only load the second digit if there is one.
+ highWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 1));
}
}
+ }
- if (sign != 0) { // The number is negative, Convert it.
- highWord = Unsigned(0 - Signed(highWord));
- if (lowWord != 0) {
- highWord = Unsigned(Signed(highWord) - 1);
- }
- lowWord = Unsigned(0 - Signed(lowWord));
+ if (sign != 0) { // The number is negative, Convert it.
+ highWord = Unsigned(0 - Signed(highWord));
+ if (lowWord != 0) {
+ highWord = Unsigned(Signed(highWord) - 1);
}
-
- StoreDataView64(buffer, offset, lowWord, highWord, requestedLittleEndian);
+ lowWord = Unsigned(0 - Signed(lowWord));
}
- // SetViewValue ( view, requestIndex, isLittleEndian, type, value )
- // https://tc39.es/ecma262/#sec-setviewvalue
- transitioning macro DataViewSet(
- context: Context, receiver: JSAny, requestIndex: JSAny, value: JSAny,
- requestedLittleEndian: JSAny, kind: constexpr ElementsKind): JSAny {
- // 1. Perform ? RequireInternalSlot(view, [[DataView]]).
- // 2. Assert: view has a [[ViewedArrayBuffer]] internal slot.
- const dataView: JSDataView =
- ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
-
- try {
- // 3. Let getIndex be ? ToIndex(requestIndex).
- const getIndex: uintptr = ToIndex(requestIndex) otherwise RangeError;
+ StoreDataView64(buffer, offset, lowWord, highWord, requestedLittleEndian);
+}
- const littleEndian: bool = ToBoolean(requestedLittleEndian);
- const buffer: JSArrayBuffer = dataView.buffer;
+// SetViewValue ( view, requestIndex, isLittleEndian, type, value )
+// https://tc39.es/ecma262/#sec-setviewvalue
+transitioning macro DataViewSet(
+ context: Context, receiver: JSAny, requestIndex: JSAny, value: JSAny,
+ requestedLittleEndian: JSAny, kind: constexpr ElementsKind): JSAny {
+ // 1. Perform ? RequireInternalSlot(view, [[DataView]]).
+ // 2. Assert: view has a [[ViewedArrayBuffer]] internal slot.
+ const dataView: JSDataView =
+ ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
+
+ try {
+ // 3. Let getIndex be ? ToIndex(requestIndex).
+ const getIndex: uintptr = ToIndex(requestIndex) otherwise RangeError;
+
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+ const buffer: JSArrayBuffer = dataView.buffer;
+
+ let numberValue: Numeric;
+ if constexpr (
+ kind == ElementsKind::BIGUINT64_ELEMENTS ||
+ kind == ElementsKind::BIGINT64_ELEMENTS) {
+ // 4. If ! IsBigIntElementType(type) is true, let numberValue be
+ // ? ToBigInt(value).
+ numberValue = ToBigInt(context, value);
+ } else {
+ // 5. Otherwise, let numberValue be ? ToNumber(value).
+ numberValue = ToNumber(context, value);
+ }
- let numberValue: Numeric;
- if constexpr (
- kind == ElementsKind::BIGUINT64_ELEMENTS ||
- kind == ElementsKind::BIGINT64_ELEMENTS) {
- // 4. If ! IsBigIntElementType(type) is true, let numberValue be
- // ? ToBigInt(value).
- numberValue = ToBigInt(context, value);
- } else {
- // 5. Otherwise, let numberValue be ? ToNumber(value).
- numberValue = ToNumber(context, value);
- }
+ // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation,
+ MakeDataViewSetterNameString(kind));
+ }
- // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(
- MessageTemplate::kDetachedOperation,
- MakeDataViewSetterNameString(kind));
- }
+ // 9. Let viewOffset be view.[[ByteOffset]].
+ const viewOffset: uintptr = dataView.byte_offset;
- // 9. Let viewOffset be view.[[ByteOffset]].
- const viewOffset: uintptr = dataView.byte_offset;
+ // 10. Let viewSize be view.[[ByteLength]].
+ const viewSize: uintptr = dataView.byte_length;
- // 10. Let viewSize be view.[[ByteLength]].
- const viewSize: uintptr = dataView.byte_length;
+ // 11. Let elementSize be the Element Size value specified in Table 62
+ // for Element Type type.
+ const elementSize: uintptr = DataViewElementSize(kind);
- // 11. Let elementSize be the Element Size value specified in Table 62
- // for Element Type type.
- const elementSize: uintptr = DataViewElementSize(kind);
+ // 12. If getIndex + elementSize > viewSize, throw a RangeError exception.
+ CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize)
+ otherwise RangeError;
- // 12. If getIndex + elementSize > viewSize, throw a RangeError exception.
- CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize)
- otherwise RangeError;
+ // 13. Let bufferIndex be getIndex + viewOffset.
+ const bufferIndex: uintptr = getIndex + viewOffset;
- // 13. Let bufferIndex be getIndex + viewOffset.
- const bufferIndex: uintptr = getIndex + viewOffset;
+ if constexpr (
+ kind == ElementsKind::BIGUINT64_ELEMENTS ||
+ kind == ElementsKind::BIGINT64_ELEMENTS) {
+ // For these elements kinds numberValue is BigInt.
+ const bigIntValue: BigInt = %RawDownCast<BigInt>(numberValue);
+ StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian);
+ } else {
+ // For these elements kinds numberValue is Number.
+ const numValue: Number = %RawDownCast<Number>(numberValue);
+ const doubleValue: float64 = ChangeNumberToFloat64(numValue);
if constexpr (
- kind == ElementsKind::BIGUINT64_ELEMENTS ||
- kind == ElementsKind::BIGINT64_ELEMENTS) {
- // For these elements kinds numberValue is BigInt.
- const bigIntValue: BigInt = %RawDownCast<BigInt>(numberValue);
- StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian);
- } else {
- // For these elements kinds numberValue is Number.
- const numValue: Number = %RawDownCast<Number>(numberValue);
- const doubleValue: float64 = ChangeNumberToFloat64(numValue);
-
- if constexpr (
- kind == ElementsKind::UINT8_ELEMENTS ||
- kind == ElementsKind::INT8_ELEMENTS) {
- StoreDataView8(
- buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue));
- } else if constexpr (
- kind == ElementsKind::UINT16_ELEMENTS ||
- kind == ElementsKind::INT16_ELEMENTS) {
- StoreDataView16(
- buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
- littleEndian);
- } else if constexpr (
- kind == ElementsKind::UINT32_ELEMENTS ||
- kind == ElementsKind::INT32_ELEMENTS) {
- StoreDataView32(
- buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
- littleEndian);
- } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
- const floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
- StoreDataView32(
- buffer, bufferIndex, BitcastFloat32ToInt32(floatValue),
- littleEndian);
- } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
- const lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
- const highWord: uint32 = Float64ExtractHighWord32(doubleValue);
- StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian);
- }
+ kind == ElementsKind::UINT8_ELEMENTS ||
+ kind == ElementsKind::INT8_ELEMENTS) {
+ StoreDataView8(
+ buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue));
+ } else if constexpr (
+ kind == ElementsKind::UINT16_ELEMENTS ||
+ kind == ElementsKind::INT16_ELEMENTS) {
+ StoreDataView16(
+ buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
+ littleEndian);
+ } else if constexpr (
+ kind == ElementsKind::UINT32_ELEMENTS ||
+ kind == ElementsKind::INT32_ELEMENTS) {
+ StoreDataView32(
+ buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
+ littleEndian);
+ } else if constexpr (kind == ElementsKind::FLOAT32_ELEMENTS) {
+ const floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
+ StoreDataView32(
+ buffer, bufferIndex, BitcastFloat32ToInt32(floatValue),
+ littleEndian);
+ } else if constexpr (kind == ElementsKind::FLOAT64_ELEMENTS) {
+ const lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
+ const highWord: uint32 = Float64ExtractHighWord32(doubleValue);
+ StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian);
}
- return Undefined;
- }
- label RangeError {
- ThrowRangeError(MessageTemplate::kInvalidDataViewAccessorOffset);
}
+ return Undefined;
+ } label RangeError {
+ ThrowRangeError(MessageTemplate::kInvalidDataViewAccessorOffset);
}
+}
- transitioning javascript builtin DataViewPrototypeSetUint8(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, Undefined,
- ElementsKind::UINT8_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetUint8(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ return DataViewSet(
+ context, receiver, offset, value, Undefined,
+ ElementsKind::UINT8_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetInt8(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, Undefined,
- ElementsKind::INT8_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetInt8(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ return DataViewSet(
+ context, receiver, offset, value, Undefined, ElementsKind::INT8_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetUint16(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::UINT16_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetUint16(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::UINT16_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetInt16(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::INT16_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetInt16(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::INT16_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetUint32(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::UINT32_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetUint32(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::UINT32_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetInt32(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::INT32_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetInt32(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::INT32_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetFloat32(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::FLOAT32_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetFloat32(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::FLOAT32_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetFloat64(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::FLOAT64_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetFloat64(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::FLOAT64_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetBigUint64(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::BIGUINT64_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetBigUint64(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::BIGUINT64_ELEMENTS);
+}
- transitioning javascript builtin DataViewPrototypeSetBigInt64(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const offset: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- const value: JSAny = arguments.length > 1 ? arguments[1] : Undefined;
- const isLittleEndian: JSAny =
- arguments.length > 2 ? arguments[2] : Undefined;
- return DataViewSet(
- context, receiver, offset, value, isLittleEndian,
- ElementsKind::BIGINT64_ELEMENTS);
- }
+transitioning javascript builtin DataViewPrototypeSetBigInt64(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const offset: JSAny = arguments[0];
+ const value: JSAny = arguments[1];
+ const isLittleEndian: JSAny = arguments[2];
+ return DataViewSet(
+ context, receiver, offset, value, isLittleEndian,
+ ElementsKind::BIGINT64_ELEMENTS);
+}
}
diff --git a/deps/v8/src/builtins/finalization-registry.tq b/deps/v8/src/builtins/finalization-registry.tq
new file mode 100644
index 0000000000..143486c737
--- /dev/null
+++ b/deps/v8/src/builtins/finalization-registry.tq
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace runtime {
+extern runtime
+ShrinkFinalizationRegistryUnregisterTokenMap(
+ Context, JSFinalizationRegistry): void;
+}
+
+namespace weakref {
+extern transitioning macro
+RemoveFinalizationRegistryCellFromUnregisterTokenMap(
+ JSFinalizationRegistry, WeakCell): void;
+
+macro SplitOffTail(weakCell: WeakCell): WeakCell|Undefined {
+ const weakCellTail = weakCell.next;
+ weakCell.next = Undefined;
+ typeswitch (weakCellTail) {
+ case (Undefined): {
+ }
+ case (tailIsNowAHead: WeakCell): {
+ assert(tailIsNowAHead.prev == weakCell);
+ tailIsNowAHead.prev = Undefined;
+ }
+ }
+ return weakCellTail;
+}
+
+transitioning macro
+PopClearedCell(finalizationRegistry: JSFinalizationRegistry): WeakCell|
+ Undefined {
+ typeswitch (finalizationRegistry.cleared_cells) {
+ case (Undefined): {
+ return Undefined;
+ }
+ case (weakCell: WeakCell): {
+ assert(weakCell.prev == Undefined);
+ finalizationRegistry.cleared_cells = SplitOffTail(weakCell);
+
+ // If the WeakCell has an unregister token, remove the cell from the
+ // unregister token linked lists and and the unregister token from
+ // key_map. This doesn't shrink key_map, which is done manually after
+ // the cleanup loop to avoid a runtime call.
+ if (weakCell.unregister_token != Undefined) {
+ RemoveFinalizationRegistryCellFromUnregisterTokenMap(
+ finalizationRegistry, weakCell);
+ }
+
+ return weakCell;
+ }
+ }
+}
+
+transitioning macro
+FinalizationRegistryCleanupLoop(implicit context: Context)(
+ finalizationRegistry: JSFinalizationRegistry, callback: Callable) {
+ while (true) {
+ const weakCellHead = PopClearedCell(finalizationRegistry);
+ typeswitch (weakCellHead) {
+ case (Undefined): {
+ break;
+ }
+ case (weakCell: WeakCell): {
+ try {
+ Call(context, callback, Undefined, weakCell.holdings);
+ } catch (e) {
+ runtime::ShrinkFinalizationRegistryUnregisterTokenMap(
+ context, finalizationRegistry);
+ ReThrow(context, e);
+ }
+ }
+ }
+ }
+
+ runtime::ShrinkFinalizationRegistryUnregisterTokenMap(
+ context, finalizationRegistry);
+}
+
+transitioning javascript builtin
+FinalizationRegistryPrototypeCleanupSome(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. Let finalizationRegistry be the this value.
+ //
+ // 2. Perform ? RequireInternalSlot(finalizationRegistry, [[Cells]]).
+ const methodName: constexpr string =
+ 'FinalizationRegistry.prototype.cleanupSome';
+ const finalizationRegistry =
+ Cast<JSFinalizationRegistry>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver);
+
+ let callback: Callable;
+ if (arguments[0] != Undefined) {
+ // 4. If callback is not undefined and IsCallable(callback) is
+ // false, throw a TypeError exception.
+ callback = Cast<Callable>(arguments[0]) otherwise ThrowTypeError(
+ MessageTemplate::kWeakRefsCleanupMustBeCallable, arguments[0]);
+ } else {
+ callback = finalizationRegistry.cleanup;
+ }
+
+ FinalizationRegistryCleanupLoop(finalizationRegistry, callback);
+ return Undefined;
+}
+}
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index 487d01c060..042207bff6 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -33,6 +33,10 @@ void GrowableFixedArray::Push(const TNode<Object> value) {
}
}
+TNode<FixedArray> GrowableFixedArray::ToFixedArray() {
+ return ResizeFixedArray(length(), length());
+}
+
TNode<JSArray> GrowableFixedArray::ToJSArray(const TNode<Context> context) {
const ElementsKind kind = PACKED_ELEMENTS;
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h
index 6e5d2ac768..e61fce37e8 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.h
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.h
@@ -32,6 +32,7 @@ class GrowableFixedArray : public CodeStubAssembler {
void Push(const TNode<Object> value);
+ TNode<FixedArray> ToFixedArray();
TNode<JSArray> ToJSArray(const TNode<Context> context);
private:
diff --git a/deps/v8/src/builtins/growable-fixed-array.tq b/deps/v8/src/builtins/growable-fixed-array.tq
index 0666c39fd7..094e051a65 100644
--- a/deps/v8/src/builtins/growable-fixed-array.tq
+++ b/deps/v8/src/builtins/growable-fixed-array.tq
@@ -3,44 +3,48 @@
// found in the LICENSE file.
namespace growable_fixed_array {
- // TODO(pwong): Support FixedTypedArrays.
- struct GrowableFixedArray {
- macro Push(obj: Object) {
- this.EnsureCapacity();
- this.array.objects[this.length++] = obj;
- }
- macro ResizeFixedArray(newCapacity: intptr): FixedArray {
- assert(this.length >= 0);
- assert(newCapacity >= 0);
- assert(newCapacity >= this.length);
- const first: intptr = 0;
- return ExtractFixedArray(this.array, first, this.length, newCapacity);
- }
- macro EnsureCapacity() {
- assert(this.length <= this.capacity);
- if (this.capacity == this.length) {
- // Growth rate is analog to JSObject::NewElementsCapacity:
- // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
- this.capacity = this.capacity + (this.capacity >> 1) + 16;
- this.array = this.ResizeFixedArray(this.capacity);
- }
- }
- macro ToJSArray(implicit context: Context)(): JSArray {
- const nativeContext: NativeContext = LoadNativeContext(context);
- const map: Map =
- LoadJSArrayElementsMap(ElementsKind::PACKED_ELEMENTS, nativeContext);
- const fixedArray: FixedArray = this.ResizeFixedArray(this.length);
- const lengthSmi = Convert<Smi>(this.length);
- return AllocateJSArray(map, fixedArray, lengthSmi);
+// TODO(pwong): Support FixedTypedArrays.
+struct GrowableFixedArray {
+ macro Push(obj: Object) {
+ this.EnsureCapacity();
+ this.array.objects[this.length++] = obj;
+ }
+ macro ResizeFixedArray(newCapacity: intptr): FixedArray {
+ assert(this.length >= 0);
+ assert(newCapacity >= 0);
+ assert(newCapacity >= this.length);
+ const first: intptr = 0;
+ return ExtractFixedArray(this.array, first, this.length, newCapacity);
+ }
+ macro EnsureCapacity() {
+ assert(this.length <= this.capacity);
+ if (this.capacity == this.length) {
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
+ this.capacity = this.capacity + (this.capacity >> 1) + 16;
+ this.array = this.ResizeFixedArray(this.capacity);
}
-
- array: FixedArray;
- // TODO(v8:4153): make capacity and length uintptr
- capacity: intptr;
- length: intptr;
+ }
+ macro ToFixedArray(): FixedArray {
+ return this.ResizeFixedArray(this.length);
}
- macro NewGrowableFixedArray(): GrowableFixedArray {
- return GrowableFixedArray{array: kEmptyFixedArray, capacity: 0, length: 0};
+ macro ToJSArray(implicit context: Context)(): JSArray {
+ const nativeContext: NativeContext = LoadNativeContext(context);
+ const map: Map =
+ LoadJSArrayElementsMap(ElementsKind::PACKED_ELEMENTS, nativeContext);
+ const fixedArray: FixedArray = this.ResizeFixedArray(this.length);
+ const lengthSmi = Convert<Smi>(this.length);
+ return AllocateJSArray(map, fixedArray, lengthSmi);
}
+
+ array: FixedArray;
+ // TODO(v8:4153): make capacity and length uintptr
+ capacity: intptr;
+ length: intptr;
+}
+
+macro NewGrowableFixedArray(): GrowableFixedArray {
+ return GrowableFixedArray{array: kEmptyFixedArray, capacity: 0, length: 0};
+}
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 5bea93214c..04a1fa9e0d 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -130,31 +130,22 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ push(eax);
__ SmiUntag(eax);
+#ifdef V8_REVERSE_JSARGS
+ // Set up pointer to first argument (skip receiver).
+ __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+ // Copy arguments to the expression stack.
+ __ PushArray(esi, eax, ecx);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+#else
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-
// Set up pointer to last argument. We are using esi as scratch register.
__ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- // ----------- S t a t e -------------
- // -- eax: number of arguments (untagged)
- // -- edi: constructor function
- // -- edx: new target
- // -- esi: pointer to last argument
- // -- ecx: counter
- // -- sp[0*kSystemPointerSize]: the hole (receiver)
- // -- sp[1*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[2*kSystemPointerSize]: context
- // -----------------------------------
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(esi, ecx, times_system_pointer_size, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
+ // Copy arguments to the expression stack.
+ __ PushArray(esi, eax, ecx);
+#endif
// Call the function.
// eax: number of arguments (untagged)
@@ -254,29 +245,34 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(edx);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
+ // Push the allocated receiver to the stack.
__ Push(eax);
+
+#ifdef V8_REVERSE_JSARGS
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movd(xmm0, eax);
+
+ // Set up pointer to first argument (skip receiver).
+ __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+#else
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver.
__ Push(eax);
- // ----------- S t a t e -------------
- // -- edx: new target
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: implicit receiver
- // -- sp[2*kSystemPointerSize]: padding
- // -- sp[3*kSystemPointerSize]: constructor function
- // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[5*kSystemPointerSize]: context
- // -----------------------------------
+ // Set up pointer to last argument.
+ __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+#endif
// Restore argument count.
__ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(eax);
- // Set up pointer to last argument.
- __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
// Check if we have enough stack space to push all arguments.
// Argument count in eax. Clobbers ecx.
Label enough_stack_space, stack_overflow;
@@ -292,27 +288,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- // ----------- S t a t e -------------
- // -- eax: number of arguments (untagged)
- // -- edx: new target
- // -- edi: pointer to last argument
- // -- ecx: counter (tagged)
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: implicit receiver
- // -- sp[2*kSystemPointerSize]: padding
- // -- sp[3*kSystemPointerSize]: constructor function
- // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[5*kSystemPointerSize]: context
- // -----------------------------------
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ Push(Operand(edi, ecx, times_system_pointer_size, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
+ // Copy arguments to the expression stack.
+ __ PushArray(edi, eax, ecx);
+
+#ifdef V8_REVERSE_JSARGS
+ // Push implicit receiver.
+ __ movd(ecx, xmm0);
+ __ Push(ecx);
+#endif
// Restore and and call the constructor function.
__ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
@@ -536,9 +519,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the previous frame pointer (edx) to access C arguments
__ mov(scratch1, Operand(ebp, 0));
- // Push the function and the receiver onto the stack.
+ // Push the function.
__ push(Operand(scratch1, EntryFrameConstants::kFunctionArgOffset));
+
+#ifndef V8_REVERSE_JSARGS
+ // And the receiver onto the stack.
__ push(Operand(scratch1, EntryFrameConstants::kReceiverArgOffset));
+#endif
// Load the number of arguments and setup pointer to the arguments.
__ mov(eax, Operand(scratch1, EntryFrameConstants::kArgcOffset));
@@ -558,6 +545,18 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
// Copy arguments to the stack in a loop.
+#ifdef V8_REVERSE_JSARGS
+ Label loop, entry;
+ __ Move(ecx, eax);
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ // Push the parameter from argv.
+ __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
+ __ push(Operand(scratch2, 0)); // dereference handle
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+#else
Label loop, entry;
__ Move(ecx, Immediate(0));
__ jmp(&entry, Label::kNear);
@@ -569,10 +568,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ cmp(ecx, eax);
__ j(not_equal, &loop);
+#endif
- // Load the previous frame pointer (ebx) to access C arguments
+ // Load the previous frame pointer to access C arguments
__ mov(scratch2, Operand(ebp, 0));
+#ifdef V8_REVERSE_JSARGS
+ // Push the receiver onto the stack.
+ __ push(Operand(scratch2, EntryFrameConstants::kReceiverArgOffset));
+#endif
+
// Get the new.target and function from the frame.
__ mov(edx, Operand(scratch2, EntryFrameConstants::kNewTargetArgOffset));
__ mov(edi, Operand(scratch2, EntryFrameConstants::kFunctionArgOffset));
@@ -662,15 +667,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Pop return address.
__ PopReturnAddressTo(eax);
+#ifndef V8_REVERSE_JSARGS
// Push receiver.
__ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
+#endif
// ----------- S t a t e -------------
// -- eax : return address
// -- edx : the JSGeneratorObject to resume
// -- edi : generator function
// -- esi : generator context
- // -- esp[0] : generator receiver
+ // -- esp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
{
@@ -682,6 +689,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
+#ifdef V8_REVERSE_JSARGS
+ {
+ Label done_loop, loop;
+ __ mov(edi, ecx);
+
+ __ bind(&loop);
+ __ dec(edi);
+ __ j(less, &done_loop);
+ __ Push(
+ FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize));
+ __ jmp(&loop);
+
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
+#else
{
Label done_loop, loop;
__ Set(edi, 0);
@@ -689,13 +714,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(edi, ecx);
__ j(greater_equal, &done_loop);
- __ Push(FieldOperand(ebx, edi, times_system_pointer_size,
- FixedArray::kHeaderSize));
+ __ Push(
+ FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize));
__ add(edi, Immediate(1));
__ jmp(&loop);
__ bind(&done_loop);
}
+#endif
// Restore registers.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@@ -1211,11 +1237,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop_header, loop_check;
__ jmp(&loop_check);
__ bind(&loop_header);
+#ifdef V8_REVERSE_JSARGS
+ __ Push(Operand(array_limit, 0));
+ __ bind(&loop_check);
+ __ add(array_limit, Immediate(kSystemPointerSize));
+ __ cmp(array_limit, start_address);
+ __ j(below_equal, &loop_header, Label::kNear);
+#else
__ Push(Operand(start_address, 0));
__ sub(start_address, Immediate(kSystemPointerSize));
__ bind(&loop_check);
__ cmp(start_address, array_limit);
__ j(above, &loop_header, Label::kNear);
+#endif
}
// static
@@ -1235,6 +1269,14 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
const Register argv = ecx;
Label stack_overflow;
+
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ dec(eax);
+ }
+#endif
+
// Add a stack check before pushing the arguments.
Generate_StackOverflowCheck(masm, eax, scratch, &stack_overflow, true);
@@ -1242,11 +1284,37 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Compute the expected number of arguments.
__ mov(scratch, eax);
- __ add(scratch, Immediate(1)); // Add one for receiver.
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(eax);
+#ifdef V8_REVERSE_JSARGS
+ if (receiver_mode != ConvertReceiverMode::kNullOrUndefined) {
+ __ add(scratch, Immediate(1)); // Add one for receiver.
+ }
+
+ // Find the address of the last argument.
+ __ shl(scratch, kSystemPointerSizeLog2);
+ __ neg(scratch);
+ __ add(scratch, argv);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ movd(xmm1, scratch);
+ Generate_InterpreterPushArgs(masm, scratch, argv);
+ // Pass the spread in the register ecx.
+ __ movd(ecx, xmm1);
+ __ mov(ecx, Operand(ecx, 0));
+ } else {
+ Generate_InterpreterPushArgs(masm, scratch, argv);
+ }
+
+ // Push "undefined" as the receiver arg if we need to.
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+#else
+ __ add(scratch, Immediate(1)); // Add one for receiver.
+
// Push "undefined" as the receiver arg if we need to.
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
@@ -1259,18 +1327,22 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ add(scratch, argv);
Generate_InterpreterPushArgs(masm, scratch, argv);
- // Call the target.
-
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Pop(ecx); // Pass the spread in a register
- __ PushReturnAddressFrom(eax);
- __ movd(eax, xmm0); // Restore number of arguments.
+ }
+#endif
+
+ __ PushReturnAddressFrom(eax);
+ __ movd(eax, xmm0); // Restore number of arguments.
+
+ // Call the target.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+#ifndef V8_REVERSE_JSARGS
__ sub(eax, Immediate(1)); // Subtract one for spread
+#endif
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
- __ PushReturnAddressFrom(eax);
- __ movd(eax, xmm0); // Restore number of arguments.
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1328,6 +1400,25 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 3 copy arguments to correct locations.
// Slot meant for receiver contains return address. Reset it so that
// we will not incorrectly interpret return address as an object.
+#ifdef V8_REVERSE_JSARGS
+ __ mov(Operand(esp, (num_slots_to_move + 1) * kSystemPointerSize),
+ Immediate(0));
+ __ mov(scratch1, Immediate(0));
+
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ mov(scratch2, Operand(start_addr, 0));
+ __ mov(Operand(esp, scratch1, times_system_pointer_size,
+ (num_slots_to_move + 1) * kSystemPointerSize),
+ scratch2);
+ __ sub(start_addr, Immediate(kSystemPointerSize));
+ __ bind(&loop_check);
+ __ inc(scratch1);
+ __ cmp(scratch1, eax);
+ __ j(less_equal, &loop_header, Label::kNear);
+
+#else
__ mov(Operand(esp, num_args, times_system_pointer_size,
(num_slots_to_move + 1) * kSystemPointerSize),
Immediate(0));
@@ -1345,6 +1436,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ bind(&loop_check);
__ cmp(scratch1, Immediate(0));
__ j(greater, &loop_header, Label::kNear);
+#endif
}
} // end anonymous namespace
@@ -1362,9 +1454,15 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -- esp[8] : the new target
// -- esp[12] : the constructor
// -----------------------------------
-
Label stack_overflow;
+#ifdef V8_REVERSE_JSARGS
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ dec(eax);
+ }
+#endif
+
// Push arguments and move return address and stack spill slots to the top of
// stack. The eax register is readonly. The ecx register will be modified. edx
// and edi are used as scratch registers.
@@ -1399,11 +1497,17 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Drop(1); // The allocation site is unused.
__ Pop(kJavaScriptCallNewTargetRegister);
__ Pop(kJavaScriptCallTargetRegister);
+#ifdef V8_REVERSE_JSARGS
+ // Pass the spread in the register ecx, overwriting ecx.
+ __ mov(ecx, Operand(ecx, 0));
+ __ PushReturnAddressFrom(eax);
+ __ movd(eax, xmm0); // Reload number of arguments.
+#else
__ Pop(ecx); // Pop the spread (i.e. the first argument), overwriting ecx.
__ PushReturnAddressFrom(eax);
__ movd(eax, xmm0); // Reload number of arguments.
__ sub(eax, Immediate(1)); // The actual argc thus decrements by one.
-
+#endif
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1705,9 +1809,35 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// esp[8 * n] : Argument 1
// esp[8 * (n + 1)] : Receiver (callable to call)
- //
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
// eax contains the number of arguments, n, not counting the receiver.
- //
+
+#ifdef V8_REVERSE_JSARGS
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ {
+ StackArgumentsAccessor args(eax);
+ __ mov(edi, args.GetReceiverOperand());
+ }
+
+ // 2. Save the return address and drop the callable.
+ __ PopReturnAddressTo(edx);
+ __ Pop(ecx);
+
+ // 3. Make sure we have at least one argument.
+ {
+ Label done;
+ __ test(eax, eax);
+ __ j(not_zero, &done, Label::kNear);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ inc(eax);
+ __ bind(&done);
+ }
+
+ // 4. Push back the return address one slot down on the stack (overwriting the
+ // original callable), making the original first argument the new receiver.
+ __ PushReturnAddressFrom(edx);
+ __ dec(eax); // One fewer argument (first argument is new receiver).
+#else
// 1. Make sure we have at least one argument.
{
Label done;
@@ -1741,6 +1871,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ pop(edx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
}
+#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1956,6 +2087,56 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label stack_overflow;
Generate_StackOverflowCheck(masm, kArgumentsLength, edx, &stack_overflow);
+#ifdef V8_REVERSE_JSARGS
+ __ movd(xmm4, kArgumentsList); // Spill the arguments list.
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy, check;
+ Register src = edx, current = edi, tmp = esi;
+ // Update stack pointer.
+ __ mov(src, esp);
+ __ lea(tmp, Operand(kArgumentsLength, times_system_pointer_size, 0));
+ __ AllocateStackSpace(tmp);
+ // Include return address and receiver.
+ __ add(eax, Immediate(2));
+ __ mov(current, Immediate(0));
+ __ jmp(&check);
+ // Loop.
+ __ bind(&copy);
+ __ mov(tmp, Operand(src, current, times_system_pointer_size, 0));
+ __ mov(Operand(esp, current, times_system_pointer_size, 0), tmp);
+ __ inc(current);
+ __ bind(&check);
+ __ cmp(current, eax);
+ __ j(less, &copy);
+ __ lea(edx, Operand(esp, eax, times_system_pointer_size, 0));
+ }
+
+ __ movd(kArgumentsList, xmm4); // Recover arguments list.
+
+ // Push additional arguments onto the stack.
+ {
+ __ Move(eax, Immediate(0));
+ Label done, push, loop;
+ __ bind(&loop);
+ __ cmp(eax, kArgumentsLength);
+ __ j(equal, &done, Label::kNear);
+ // Turn the hole into undefined as we go.
+ __ mov(edi, FieldOperand(kArgumentsList, eax, times_tagged_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(edi, RootIndex::kTheHoleValue);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(edi, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ mov(Operand(edx, 0), edi);
+ __ add(edx, Immediate(kSystemPointerSize));
+ __ inc(eax);
+ __ jmp(&loop);
+ __ bind(&done);
+ }
+#else // !V8_REVERSE_JSARGS
// Push additional arguments onto the stack.
{
__ PopReturnAddressTo(edx);
@@ -1965,7 +2146,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmp(eax, kArgumentsLength);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ mov(edi, FieldOperand(kArgumentsList, eax, times_system_pointer_size,
+ __ mov(edi, FieldOperand(kArgumentsList, eax, times_tagged_size,
FixedArray::kHeaderSize));
__ CompareRoot(edi, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
@@ -1977,6 +2158,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&done);
__ PushReturnAddressFrom(edx);
}
+#endif // !V8_REVERSE_JSARGS
// Restore eax, edi and edx.
__ movd(esi, xmm3); // Restore the context.
@@ -2203,7 +2385,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- edx : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -----------------------------------
-
__ movd(xmm0, edx); // Spill edx.
// Load [[BoundArguments]] into ecx and length of that into edx.
@@ -2213,6 +2394,67 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntag(edx);
__ test(edx, edx);
__ j(zero, &no_bound_arguments);
+#ifdef V8_REVERSE_JSARGS
+ {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- xmm0 : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- edx : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Check the stack for overflow.
+ {
+ Label done, stack_overflow;
+ Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow);
+ __ jmp(&done);
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ int3();
+ }
+ __ bind(&done);
+ }
+
+ // Spill context.
+ __ movd(xmm3, esi);
+
+ // Save Return Adress and Receiver into registers.
+ __ pop(esi);
+ __ movd(xmm1, esi);
+ __ pop(esi);
+ __ movd(xmm2, esi);
+
+ // Push [[BoundArguments]] to the stack.
+ {
+ Label loop;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(edx);
+ // Adjust effective number of arguments (eax contains the number of
+ // arguments from the call not including receiver plus the number of
+ // [[BoundArguments]]).
+ __ add(eax, edx);
+ __ bind(&loop);
+ __ dec(edx);
+ __ mov(esi, FieldOperand(ecx, edx, times_tagged_size,
+ FixedArray::kHeaderSize));
+ __ push(esi);
+ __ j(greater, &loop);
+ }
+
+ // Restore Receiver and Return Address.
+ __ movd(esi, xmm2);
+ __ push(esi);
+ __ movd(esi, xmm1);
+ __ push(esi);
+
+ // Restore context.
+ __ movd(esi, xmm3);
+ }
+#else // !V8_REVERSE_JSARGS
{
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
@@ -2241,7 +2483,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
}
__ bind(&done);
}
-
#if V8_OS_WIN
// Correctly allocate the stack space that was checked above.
{
@@ -2291,6 +2532,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// [[BoundArguments]]), so we need to subtract one for the return address.
__ dec(eax);
}
+#endif // !V8_REVERSE_JSARGS
__ bind(&no_bound_arguments);
__ movd(edx, xmm0); // Reload edx.
@@ -2518,7 +2760,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
+#ifdef V8_REVERSE_JSARGS
+ __ lea(edi, Operand(ebp, ecx, times_system_pointer_size, offset));
+#else
__ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
+#endif
__ mov(eax, -1); // account for receiver
Label copy;
@@ -2543,6 +2789,35 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Remember expected arguments in xmm0.
__ movd(xmm0, kExpectedNumberOfArgumentsRegister);
+#ifdef V8_REVERSE_JSARGS
+ // Remember new target.
+ __ movd(xmm1, edx);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ mov(edx, ecx);
+ __ sub(edx, eax);
+ __ bind(&fill);
+ __ Push(Immediate(masm->isolate()->factory()->undefined_value()));
+ __ dec(edx);
+ __ j(greater, &fill);
+
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
+ __ mov(edx, Immediate(-1));
+
+ Label copy;
+ __ bind(&copy);
+ __ inc(edx);
+ __ push(Operand(edi, 0));
+ __ sub(edi, Immediate(kSystemPointerSize));
+ __ cmp(edx, eax);
+ __ j(less, &copy);
+
+ // Restore new.target
+ __ movd(edx, xmm1);
+#else // !V8_REVERSE_JSARGS
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
@@ -2567,6 +2842,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Push(Immediate(masm->isolate()->factory()->undefined_value()));
__ cmp(eax, kExpectedNumberOfArgumentsRegister);
__ j(less, &fill);
+#endif // !V8_REVERSE_JSARGS
// Restore expected arguments.
__ movd(eax, xmm0);
@@ -3153,6 +3429,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- esp[argc * 4] : first argument
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+ // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = edx;
Register argc = ecx;
@@ -3222,8 +3499,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
+#ifdef V8_REVERSE_JSARGS
+ __ lea(scratch,
+ Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
+#else
__ lea(scratch, Operand(scratch, argc, times_system_pointer_size,
(FCA::kArgsLength - 1) * kSystemPointerSize));
+#endif
__ mov(ApiParameterOperand(kApiArgc + 1), scratch);
// FunctionCallbackInfo::length_.
diff --git a/deps/v8/src/builtins/ic-callable.tq b/deps/v8/src/builtins/ic-callable.tq
new file mode 100644
index 0000000000..95e107a9a6
--- /dev/null
+++ b/deps/v8/src/builtins/ic-callable.tq
@@ -0,0 +1,183 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace ic {
+namespace callable {
+
+extern macro IncrementCallCount(FeedbackVector, uintptr): void;
+
+macro IsMonomorphic(feedback: MaybeObject, target: JSAny): bool {
+ return IsWeakReferenceToObject(feedback, target);
+}
+
+macro InSameNativeContext(lhs: Context, rhs: Context): bool {
+ return LoadNativeContext(lhs) == LoadNativeContext(rhs);
+}
+
+macro MaybeObjectToStrong(maybeObject: MaybeObject):
+ HeapObject labels IfCleared {
+ assert(IsWeakOrCleared(maybeObject));
+ const weakObject = %RawDownCast<Weak<HeapObject>>(maybeObject);
+ return WeakToStrong(weakObject) otherwise IfCleared;
+}
+
+macro TryInitializeAsMonomorphic(implicit context: Context)(
+ maybeTarget: JSAny, feedbackVector: FeedbackVector,
+ slotId: uintptr): void labels TransitionToMegamorphic {
+ const targetHeapObject =
+ Cast<HeapObject>(maybeTarget) otherwise TransitionToMegamorphic;
+
+ let unwrappedTarget = targetHeapObject;
+ while (Is<JSBoundFunction>(unwrappedTarget)) {
+ unwrappedTarget =
+ UnsafeCast<JSBoundFunction>(unwrappedTarget).bound_target_function;
+ }
+
+ const unwrappedTargetJSFunction =
+ Cast<JSFunction>(unwrappedTarget) otherwise TransitionToMegamorphic;
+ if (!InSameNativeContext(unwrappedTargetJSFunction.context, context)) {
+ goto TransitionToMegamorphic;
+ }
+
+ StoreWeakReferenceInFeedbackVector(feedbackVector, slotId, targetHeapObject);
+ ReportFeedbackUpdate(feedbackVector, slotId, 'Call:Initialize');
+}
+
+macro TransitionToMegamorphic(implicit context: Context)(
+ feedbackVector: FeedbackVector, slotId: uintptr): void {
+ StoreFeedbackVectorSlot(feedbackVector, slotId, kMegamorphicSymbol);
+ ReportFeedbackUpdate(feedbackVector, slotId, 'Call:TransitionMegamorphic');
+}
+
+macro CollectCallFeedback(
+ maybeTarget: JSAny, context: Context,
+ maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
+ const feedbackVector =
+ Cast<FeedbackVector>(maybeFeedbackVector) otherwise return;
+ IncrementCallCount(feedbackVector, slotId);
+
+ try {
+ const feedback: MaybeObject =
+ LoadFeedbackVectorSlot(feedbackVector, slotId);
+ if (IsMonomorphic(feedback, maybeTarget)) return;
+ if (IsMegamorphic(feedback)) return;
+ if (IsUninitialized(feedback)) goto TryInitializeAsMonomorphic;
+
+ // If cleared, we have a new chance to become monomorphic.
+ const feedbackValue: HeapObject =
+ MaybeObjectToStrong(feedback) otherwise TryInitializeAsMonomorphic;
+
+ // Try transitioning to a feedback cell.
+ // Check if {target}s feedback cell matches the {feedbackValue}.
+ const target =
+ Cast<JSFunction>(maybeTarget) otherwise TransitionToMegamorphic;
+ const targetFeedbackCell: FeedbackCell = target.feedback_cell;
+ if (TaggedEqual(feedbackValue, targetFeedbackCell)) return;
+
+ // Check if {target} and {feedbackValue} are both JSFunctions with
+ // the same feedback vector cell, and that those functions were
+ // actually compiled already.
+ const feedbackValueJSFunction =
+ Cast<JSFunction>(feedbackValue) otherwise TransitionToMegamorphic;
+ const feedbackCell: FeedbackCell = feedbackValueJSFunction.feedback_cell;
+ if (!TaggedEqual(feedbackCell, targetFeedbackCell))
+ goto TransitionToMegamorphic;
+
+ StoreWeakReferenceInFeedbackVector(feedbackVector, slotId, feedbackCell);
+ ReportFeedbackUpdate(feedbackVector, slotId, 'Call:FeedbackVectorCell');
+ } label TryInitializeAsMonomorphic {
+ TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId)
+ otherwise TransitionToMegamorphic;
+ } label TransitionToMegamorphic {
+ TransitionToMegamorphic(feedbackVector, slotId);
+ }
+}
+
+macro CollectInstanceOfFeedback(
+ maybeTarget: JSAny, context: Context,
+ maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
+ const feedbackVector =
+ Cast<FeedbackVector>(maybeFeedbackVector) otherwise return;
+ // Note: The call count is not incremented.
+
+ try {
+ const feedback: MaybeObject =
+ LoadFeedbackVectorSlot(feedbackVector, slotId);
+ if (IsMonomorphic(feedback, maybeTarget)) return;
+ if (IsMegamorphic(feedback)) return;
+ if (IsUninitialized(feedback)) goto TryInitializeAsMonomorphic;
+
+ // If cleared, we have a new chance to become monomorphic.
+ const _feedbackValue: HeapObject =
+ MaybeObjectToStrong(feedback) otherwise TryInitializeAsMonomorphic;
+
+ goto TransitionToMegamorphic;
+ } label TryInitializeAsMonomorphic {
+ TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId)
+ otherwise TransitionToMegamorphic;
+ } label TransitionToMegamorphic {
+ TransitionToMegamorphic(feedbackVector, slotId);
+ }
+}
+
+macro BothTaggedEqualArrayFunction(implicit context: Context)(
+ first: JSAny, second: JSAny): bool {
+ return TaggedEqual(first, second) && TaggedEqual(second, GetArrayFunction());
+}
+
+extern macro CreateAllocationSiteInFeedbackVector(
+ FeedbackVector, uintptr): AllocationSite;
+
+macro CollectConstructFeedback(implicit context: Context)(
+ target: JSAny, newTarget: JSAny,
+ maybeFeedbackVector: Undefined|FeedbackVector,
+ slotId: uintptr): never labels ConstructGeneric,
+ ConstructArray(AllocationSite) {
+ const feedbackVector = Cast<FeedbackVector>(maybeFeedbackVector)
+ otherwise goto ConstructGeneric;
+ IncrementCallCount(feedbackVector, slotId);
+
+ try {
+ const feedback: MaybeObject =
+ LoadFeedbackVectorSlot(feedbackVector, slotId);
+ if (IsMonomorphic(feedback, newTarget)) goto ConstructGeneric;
+ if (IsMegamorphic(feedback)) goto ConstructGeneric;
+ if (IsUninitialized(feedback)) goto TryInitializeAsMonomorphic;
+
+ if (!IsWeakOrCleared(feedback)) {
+ const feedbackAsStrong = %RawDownCast<Object>(feedback);
+ if (Is<AllocationSite>(feedbackAsStrong)) {
+ if (BothTaggedEqualArrayFunction(target, newTarget)) {
+ goto ConstructArray(UnsafeCast<AllocationSite>(feedbackAsStrong));
+ }
+ goto TransitionToMegamorphic;
+ }
+ }
+
+ // If cleared, we have a new chance to become monomorphic.
+ const _feedbackValue: HeapObject =
+ MaybeObjectToStrong(feedback) otherwise TryInitializeAsMonomorphic;
+
+ goto TransitionToMegamorphic;
+ } label TryInitializeAsMonomorphic {
+ if (BothTaggedEqualArrayFunction(target, newTarget)) {
+ // In this case we can skip unwrapping and context validation since we
+ // know the target is the current context's array function.
+ const allocationSite =
+ CreateAllocationSiteInFeedbackVector(feedbackVector, slotId);
+ ReportFeedbackUpdate(
+ feedbackVector, slotId, 'Construct:CreateAllocationSite');
+ goto ConstructArray(allocationSite);
+ }
+
+ TryInitializeAsMonomorphic(newTarget, feedbackVector, slotId)
+ otherwise TransitionToMegamorphic;
+ } label TransitionToMegamorphic {
+ TransitionToMegamorphic(feedbackVector, slotId);
+ }
+ goto ConstructGeneric;
+}
+
+} // namespace callable
+} // namespace ic
diff --git a/deps/v8/src/builtins/ic.tq b/deps/v8/src/builtins/ic.tq
new file mode 100644
index 0000000000..f6fecc557f
--- /dev/null
+++ b/deps/v8/src/builtins/ic.tq
@@ -0,0 +1,59 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace ic {
+
+// --- The public interface (forwards to the actual implementation).
+
+@export
+macro CollectCallFeedback(
+ maybeTarget: JSAny, context: Context,
+ maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
+ callable::CollectCallFeedback(
+ maybeTarget, context, maybeFeedbackVector, slotId);
+}
+
+@export
+macro CollectInstanceOfFeedback(
+ maybeTarget: JSAny, context: Context,
+ maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
+ callable::CollectInstanceOfFeedback(
+ maybeTarget, context, maybeFeedbackVector, slotId);
+}
+
+@export
+macro CollectConstructFeedback(implicit context: Context)(
+ target: JSAny, newTarget: JSAny,
+ maybeFeedbackVector: Undefined|FeedbackVector,
+ slotId: uintptr): never labels ConstructGeneric,
+ ConstructArray(AllocationSite) {
+ callable::CollectConstructFeedback(
+ target, newTarget, maybeFeedbackVector, slotId)
+ otherwise ConstructGeneric, ConstructArray;
+}
+
+// --- Common functionality.
+
+extern macro MegamorphicSymbolConstant(): Symbol;
+extern macro UninitializedSymbolConstant(): Symbol;
+
+const kMegamorphicSymbol: Symbol = MegamorphicSymbolConstant();
+const kUninitializedSymbol: Symbol = UninitializedSymbolConstant();
+
+macro IsMegamorphic(feedback: MaybeObject): bool {
+ return TaggedEqual(feedback, kMegamorphicSymbol);
+}
+
+macro IsUninitialized(feedback: MaybeObject): bool {
+ return TaggedEqual(feedback, kUninitializedSymbol);
+}
+
+extern macro LoadFeedbackVectorSlot(FeedbackVector, uintptr): MaybeObject;
+extern macro StoreFeedbackVectorSlot(
+ FeedbackVector, uintptr, MaybeObject): void;
+extern macro StoreWeakReferenceInFeedbackVector(
+ FeedbackVector, uintptr, HeapObject): MaybeObject;
+extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
+
+} // namespace ic
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index 65cb207eaa..07bfc40d8f 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -6,33 +6,34 @@
namespace internal_coverage {
- macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
- CoverageInfo labels IfNoCoverageInfo {
- const shared: SharedFunctionInfo = function.shared_function_info;
- const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
- otherwise goto IfNoCoverageInfo;
-
- if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo;
- return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
- }
-
- macro IncrementBlockCount(implicit context: Context)(
- coverageInfo: CoverageInfo, slot: Smi) {
- assert(Convert<int32>(slot) < coverageInfo.slot_count);
- ++coverageInfo.slots[slot].block_count;
- }
-
- builtin IncBlockCounter(implicit context: Context)(
- function: JSFunction, coverageArraySlotIndex: Smi): Undefined {
- // It's quite possible that a function contains IncBlockCounter bytecodes,
- // but no coverage info exists. This happens e.g. by selecting the
- // best-effort coverage collection mode, which triggers deletion of all
- // coverage infos in order to avoid memory leaks.
-
- const coverageInfo: CoverageInfo =
- GetCoverageInfo(function) otherwise return Undefined;
- IncrementBlockCount(coverageInfo, coverageArraySlotIndex);
- return Undefined;
- }
+macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
+ CoverageInfo labels IfNoCoverageInfo {
+ const shared: SharedFunctionInfo = function.shared_function_info;
+ const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
+ otherwise goto IfNoCoverageInfo;
+
+ if (!debugInfo.flags.has_coverage_info) goto IfNoCoverageInfo;
+ return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
+}
+
+macro IncrementBlockCount(implicit context: Context)(
+ coverageInfo: CoverageInfo, slot: Smi) {
+ assert(Convert<int32>(slot) < coverageInfo.slot_count);
+ ++coverageInfo.slots[slot].block_count;
+}
+
+builtin IncBlockCounter(
+ implicit context:
+ Context)(function: JSFunction, coverageArraySlotIndex: Smi): Undefined {
+ // It's quite possible that a function contains IncBlockCounter bytecodes,
+ // but no coverage info exists. This happens e.g. by selecting the
+ // best-effort coverage collection mode, which triggers deletion of all
+ // coverage infos in order to avoid memory leaks.
+
+ const coverageInfo: CoverageInfo =
+ GetCoverageInfo(function) otherwise return Undefined;
+ IncrementBlockCount(coverageInfo, coverageArraySlotIndex);
+ return Undefined;
+}
} // namespace internal_coverage
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 272a2a7db8..1354c434e7 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -5,105 +5,98 @@
#include 'src/builtins/builtins-iterator-gen.h'
namespace iterator {
- // Returned from IteratorBuiltinsAssembler::GetIterator().
- @export
- struct IteratorRecord {
- // iteratorRecord.[[Iterator]]
- object: JSReceiver;
-
- // iteratorRecord.[[NextMethod]]
- next: JSAny;
- }
+// Returned from IteratorBuiltinsAssembler::GetIterator().
+@export
+struct IteratorRecord {
+ // iteratorRecord.[[Iterator]]
+ object: JSReceiver;
+
+ // iteratorRecord.[[NextMethod]]
+ next: JSAny;
+}
- extern macro IteratorBuiltinsAssembler::FastIterableToList(
- implicit context: Context)(JSAny): JSArray labels Slow;
-
- extern macro IteratorBuiltinsAssembler::GetIteratorMethod(
- implicit context: Context)(JSAny): JSAny;
- extern macro IteratorBuiltinsAssembler::GetIterator(
- implicit context: Context)(JSAny): IteratorRecord;
- extern macro IteratorBuiltinsAssembler::GetIterator(
- implicit context: Context)(JSAny, JSAny): IteratorRecord;
-
- extern macro IteratorBuiltinsAssembler::IteratorStep(
- implicit context: Context)(IteratorRecord): JSReceiver
- labels Done;
- extern macro IteratorBuiltinsAssembler::IteratorStep(
- implicit context: Context)(IteratorRecord, Map): JSReceiver
- labels Done;
-
- extern macro IteratorBuiltinsAssembler::IteratorValue(
- implicit context: Context)(JSReceiver): JSAny;
- extern macro IteratorBuiltinsAssembler::IteratorValue(
- implicit context: Context)(JSReceiver, Map): JSAny;
-
- extern macro IteratorBuiltinsAssembler::IteratorCloseOnException(
- implicit context: Context)(IteratorRecord, JSAny): never;
-
- extern macro IteratorBuiltinsAssembler::IterableToList(
- implicit context: Context)(JSAny, JSAny): JSArray;
-
- extern macro IteratorBuiltinsAssembler::StringListFromIterable(
- implicit context: Context)(JSAny): JSArray;
-
- extern builtin IterableToListMayPreserveHoles(implicit context:
- Context)(JSAny, JSAny);
- extern builtin IterableToListWithSymbolLookup(implicit context:
- Context)(JSAny);
-
- transitioning builtin GetIteratorWithFeedback(
- context: Context, receiver: JSAny, loadSlot: TaggedIndex,
- callSlot: TaggedIndex, feedback: Undefined|FeedbackVector): JSAny {
- let iteratorMethod: JSAny;
- typeswitch (feedback) {
- case (Undefined): {
- iteratorMethod = GetProperty(receiver, IteratorSymbolConstant());
- }
- case (feedback: FeedbackVector): {
- iteratorMethod = LoadIC(
- context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
- }
+extern macro IteratorBuiltinsAssembler::FastIterableToList(
+ implicit context: Context)(JSAny): JSArray labels Slow;
+
+extern macro IteratorBuiltinsAssembler::GetIteratorMethod(
+ implicit context: Context)(JSAny): JSAny;
+extern macro IteratorBuiltinsAssembler::GetIterator(implicit context: Context)(
+ JSAny): IteratorRecord;
+extern macro IteratorBuiltinsAssembler::GetIterator(implicit context: Context)(
+ JSAny, JSAny): IteratorRecord;
+
+extern macro IteratorBuiltinsAssembler::IteratorStep(implicit context: Context)(
+ IteratorRecord): JSReceiver
+ labels Done;
+extern macro IteratorBuiltinsAssembler::IteratorStep(implicit context: Context)(
+ IteratorRecord, Map): JSReceiver
+ labels Done;
+
+extern macro IteratorBuiltinsAssembler::IteratorValue(
+ implicit context: Context)(JSReceiver): JSAny;
+extern macro IteratorBuiltinsAssembler::IteratorValue(
+ implicit context: Context)(JSReceiver, Map): JSAny;
+
+extern macro IteratorBuiltinsAssembler::IterableToList(
+ implicit context: Context)(JSAny, JSAny): JSArray;
+
+extern macro IteratorBuiltinsAssembler::StringListFromIterable(
+ implicit context: Context)(JSAny): JSArray;
+
+extern builtin IterableToListWithSymbolLookup(implicit context: Context)(JSAny):
+ JSArray;
+extern builtin IterableToFixedArrayWithSymbolLookupSlow(
+ implicit context: Context)(JSAny): FixedArray;
+
+transitioning builtin GetIteratorWithFeedback(
+ context: Context, receiver: JSAny, loadSlot: TaggedIndex,
+ callSlot: TaggedIndex, feedback: Undefined|FeedbackVector): JSAny {
+ let iteratorMethod: JSAny;
+ typeswitch (feedback) {
+ case (Undefined): {
+ iteratorMethod = GetProperty(receiver, IteratorSymbolConstant());
+ }
+ case (feedback: FeedbackVector): {
+ iteratorMethod = LoadIC(
+ context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
}
- // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
- const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
- return CallIteratorWithFeedback(
- context, receiver, iteratorMethod, callSlotSmi, feedback);
}
+ // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
+ const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
+ return CallIteratorWithFeedback(
+ context, receiver, iteratorMethod, callSlotSmi, feedback);
+}
- transitioning builtin CallIteratorWithFeedback(
- context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
- feedback: Undefined|FeedbackVector): JSAny {
- const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot));
- CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged);
- const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
- otherwise ThrowCalledNonCallable(iteratorMethod);
- return Call(context, iteratorCallable, receiver);
- }
+transitioning builtin CallIteratorWithFeedback(
+ context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
+ feedback: Undefined|FeedbackVector): JSAny {
+ const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot));
+ ic::CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged);
+ const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
+ otherwise ThrowCalledNonCallable(iteratorMethod);
+ return Call(context, iteratorCallable, receiver);
+}
- transitioning
- macro IteratorCloseOnException(implicit context: Context)(
- iterator: IteratorRecord, exception: Object): never labels
- IfException(Object) {
- // Let return be ? GetMethod(iterator, "return").
- let method: JSAny;
- try {
- method = GetProperty(iterator.object, kReturnString);
- } catch (e) {
- goto IfException(e);
- }
+// https://tc39.es/ecma262/#sec-iteratorclose
+@export
+transitioning macro IteratorCloseOnException(implicit context: Context)(
+ iterator: IteratorRecord) {
+ try {
+ // 4. Let innerResult be GetMethod(iterator, "return").
+ const method = GetProperty(iterator.object, kReturnString);
- // If return is undefined, return Completion(completion).
- if (method == Undefined || method == Null) goto IfException(exception);
+ // 5. If innerResult.[[Type]] is normal, then
+ // a. Let return be innerResult.[[Value]].
+ // b. If return is undefined, return Completion(completion).
+ if (method == Undefined || method == Null) return;
- // Let innerResult be Call(return, iterator, Ā« Ā»).
+ // c. Set innerResult to Call(return, iterator).
// If an exception occurs, the original exception remains bound
- try {
- Call(context, method, iterator.object);
- } catch (_e) {
- goto IfException(exception);
- }
-
- // (If completion.[[Type]] is throw) return Completion(completion).
- goto IfException(exception);
+ Call(context, method, iterator.object);
+ } catch (_e) {
+ // Swallow the exception.
}
+
+ // (If completion.[[Type]] is throw) return Completion(completion).
+}
}
diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq
index 8c9998906a..0586f432f5 100644
--- a/deps/v8/src/builtins/math.tq
+++ b/deps/v8/src/builtins/math.tq
@@ -4,468 +4,456 @@
namespace math {
- extern transitioning builtin
- NonNumberToNumber(implicit context: Context)(HeapObject): Number;
-
- transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny):
- never
- labels SmiResult(Smi), Float64Result(float64) {
- let x1: JSAny = x;
- while (true) {
- typeswitch (x1) {
- case (s: Smi): {
- goto SmiResult(s);
- }
- case (h: HeapNumber): {
- goto Float64Result(Convert<float64>(h));
- }
- case (a: JSAnyNotNumber): {
- x1 = NonNumberToNumber(a);
- }
+extern transitioning builtin
+NonNumberToNumber(implicit context: Context)(HeapObject): Number;
+
+transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny):
+ never
+ labels SmiResult(Smi), Float64Result(float64) {
+ let x1: JSAny = x;
+ while (true) {
+ typeswitch (x1) {
+ case (s: Smi): {
+ goto SmiResult(s);
+ }
+ case (h: HeapNumber): {
+ goto Float64Result(Convert<float64>(h));
+ }
+ case (a: JSAnyNotNumber): {
+ x1 = NonNumberToNumber(a);
}
}
- VerifiedUnreachable();
}
+ VerifiedUnreachable();
+}
- // ES6 #sec-math.abs
- extern macro IsIntPtrAbsWithOverflowSupported(): constexpr bool;
- extern macro TrySmiSub(Smi, Smi): Smi labels Overflow;
- extern macro TrySmiAbs(Smi): Smi labels Overflow;
- extern macro Float64Abs(float64): float64;
- const kSmiMaxValuePlusOne:
- constexpr float64 generates '0.0 - kSmiMinValue';
-
- transitioning javascript builtin
- MathAbs(js-implicit context: NativeContext)(x: JSAny): Number {
+// ES6 #sec-math.abs
+extern macro IsIntPtrAbsWithOverflowSupported(): constexpr bool;
+extern macro TrySmiSub(Smi, Smi): Smi labels Overflow;
+extern macro TrySmiAbs(Smi): Smi labels Overflow;
+extern macro Float64Abs(float64): float64;
+const kSmiMaxValuePlusOne:
+ constexpr float64 generates '0.0 - kSmiMinValue';
+
+transitioning javascript builtin
+MathAbs(js-implicit context: NativeContext)(x: JSAny): Number {
+ try {
+ ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
+ } label SmiResult(s: Smi) {
try {
- ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
- }
- label SmiResult(s: Smi) {
- try {
- if constexpr (IsIntPtrAbsWithOverflowSupported()) {
- const result: Smi = TrySmiAbs(s)
- otherwise SmiOverflow;
- return result;
+ if constexpr (IsIntPtrAbsWithOverflowSupported()) {
+ const result: Smi = TrySmiAbs(s)
+ otherwise SmiOverflow;
+ return result;
+ } else {
+ if (0 <= s) {
+ return s;
} else {
- if (0 <= s) {
- return s;
- } else {
- const result: Smi = TrySmiSub(0, s) otherwise SmiOverflow;
- return result;
- }
+ const result: Smi = TrySmiSub(0, s) otherwise SmiOverflow;
+ return result;
}
}
- label SmiOverflow {
- return NumberConstant(kSmiMaxValuePlusOne);
- }
- }
- label Float64Result(f: float64) {
- return Convert<Number>(Float64Abs(f));
+ } label SmiOverflow {
+ return NumberConstant(kSmiMaxValuePlusOne);
}
+ } label Float64Result(f: float64) {
+ return Convert<Number>(Float64Abs(f));
}
+}
- // ES6 #sec-math.ceil
- extern macro Float64Ceil(float64): float64;
- transitioning javascript builtin
- MathCeil(js-implicit context: NativeContext)(x: JSAny): Number {
- try {
- ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
- }
- label SmiResult(s: Smi) {
- return s;
- }
- label Float64Result(f: float64) {
- return Convert<Number>(Float64Ceil(f));
- }
+// ES6 #sec-math.ceil
+extern macro Float64Ceil(float64): float64;
+transitioning javascript builtin
+MathCeil(js-implicit context: NativeContext)(x: JSAny): Number {
+ try {
+ ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
+ } label SmiResult(s: Smi) {
+ return s;
+ } label Float64Result(f: float64) {
+ return Convert<Number>(Float64Ceil(f));
}
+}
- // ES6 #sec-math.floor
- extern macro Float64Floor(float64): float64;
- transitioning javascript builtin
- MathFloor(js-implicit context: NativeContext)(x: JSAny): Number {
- try {
- ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
- }
- label SmiResult(s: Smi) {
- return s;
- }
- label Float64Result(f: float64) {
- return Convert<Number>(Float64Floor(f));
- }
+// ES6 #sec-math.floor
+extern macro Float64Floor(float64): float64;
+transitioning javascript builtin
+MathFloor(js-implicit context: NativeContext)(x: JSAny): Number {
+ try {
+ ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
+ } label SmiResult(s: Smi) {
+ return s;
+ } label Float64Result(f: float64) {
+ return Convert<Number>(Float64Floor(f));
}
+}
- // ES6 #sec-math.round
- extern macro Float64Round(float64): float64;
- transitioning javascript builtin
- MathRound(js-implicit context: NativeContext)(x: JSAny): Number {
- try {
- ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
- }
- label SmiResult(s: Smi) {
- return s;
- }
- label Float64Result(f: float64) {
- return Convert<Number>(Float64Round(f));
- }
+// ES6 #sec-math.round
+extern macro Float64Round(float64): float64;
+transitioning javascript builtin
+MathRound(js-implicit context: NativeContext)(x: JSAny): Number {
+ try {
+ ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
+ } label SmiResult(s: Smi) {
+ return s;
+ } label Float64Result(f: float64) {
+ return Convert<Number>(Float64Round(f));
}
+}
- // ES6 #sec-math.trunc
- extern macro Float64Trunc(float64): float64;
- transitioning javascript builtin
- MathTrunc(js-implicit context: NativeContext)(x: JSAny): Number {
- try {
- ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
- }
- label SmiResult(s: Smi) {
- return s;
- }
- label Float64Result(f: float64) {
- return Convert<Number>(Float64Trunc(f));
- }
+// ES6 #sec-math.trunc
+extern macro Float64Trunc(float64): float64;
+transitioning javascript builtin
+MathTrunc(js-implicit context: NativeContext)(x: JSAny): Number {
+ try {
+ ReduceToSmiOrFloat64(x) otherwise SmiResult, Float64Result;
+ } label SmiResult(s: Smi) {
+ return s;
+ } label Float64Result(f: float64) {
+ return Convert<Number>(Float64Trunc(f));
}
+}
- // ES6 #sec-math.pow
- extern macro Float64Pow(float64, float64): float64;
- extern macro TruncateTaggedToFloat64(implicit context: Context)(JSAny):
- float64;
-
- @export
- macro MathPowImpl(implicit context: Context)(base: JSAny, exponent: JSAny):
- Number {
- const baseValue: float64 = TruncateTaggedToFloat64(base);
- const exponentValue: float64 = TruncateTaggedToFloat64(exponent);
- const result: float64 = Float64Pow(baseValue, exponentValue);
- return Convert<Number>(result);
- }
+// ES6 #sec-math.pow
+extern macro Float64Pow(float64, float64): float64;
+extern macro TruncateTaggedToFloat64(implicit context: Context)(JSAny): float64;
+
+@export
+macro MathPowImpl(implicit context: Context)(base: JSAny, exponent: JSAny):
+ Number {
+ const baseValue: float64 = TruncateTaggedToFloat64(base);
+ const exponentValue: float64 = TruncateTaggedToFloat64(exponent);
+ const result: float64 = Float64Pow(baseValue, exponentValue);
+ return Convert<Number>(result);
+}
- transitioning javascript builtin
- MathPow(js-implicit context: NativeContext)(base: JSAny, exponent: JSAny):
- Number {
- return MathPowImpl(base, exponent);
- }
+transitioning javascript builtin
+MathPow(js-implicit context: NativeContext)(
+ base: JSAny, exponent: JSAny): Number {
+ return MathPowImpl(base, exponent);
+}
- // ES6 #sec-math.max
- extern macro Float64Max(float64, float64): float64;
- transitioning javascript builtin
- MathMax(js-implicit context: NativeContext)(...arguments): Number {
- let result: float64 = MINUS_V8_INFINITY;
- const argCount = arguments.length;
- for (let i: intptr = 0; i < argCount; i++) {
- const doubleValue = TruncateTaggedToFloat64(arguments[i]);
- result = Float64Max(result, doubleValue);
- }
- return Convert<Number>(result);
- }
+// ES6 #sec-math.max
+extern macro Float64Max(float64, float64): float64;
+transitioning javascript builtin
+MathMax(js-implicit context: NativeContext)(...arguments): Number {
+ let result: float64 = MINUS_V8_INFINITY;
+ const argCount = arguments.length;
+ for (let i: intptr = 0; i < argCount; i++) {
+ const doubleValue = TruncateTaggedToFloat64(arguments[i]);
+ result = Float64Max(result, doubleValue);
+ }
+ return Convert<Number>(result);
+}
- // ES6 #sec-math.min
- extern macro Float64Min(float64, float64): float64;
- transitioning javascript builtin
- MathMin(js-implicit context: NativeContext)(...arguments): Number {
- let result: float64 = V8_INFINITY;
- const argCount = arguments.length;
- for (let i: intptr = 0; i < argCount; i++) {
- const doubleValue = TruncateTaggedToFloat64(arguments[i]);
- result = Float64Min(result, doubleValue);
- }
- return Convert<Number>(result);
- }
+// ES6 #sec-math.min
+extern macro Float64Min(float64, float64): float64;
+transitioning javascript builtin
+MathMin(js-implicit context: NativeContext)(...arguments): Number {
+ let result: float64 = V8_INFINITY;
+ const argCount = arguments.length;
+ for (let i: intptr = 0; i < argCount; i++) {
+ const doubleValue = TruncateTaggedToFloat64(arguments[i]);
+ result = Float64Min(result, doubleValue);
+ }
+ return Convert<Number>(result);
+}
- // ES6 #sec-math.acos
- extern macro Float64Acos(float64): float64;
+// ES6 #sec-math.acos
+extern macro Float64Acos(float64): float64;
- transitioning javascript builtin
- MathAcos(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Acos(value));
- }
+transitioning javascript builtin
+MathAcos(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Acos(value));
+}
- // ES6 #sec-math.acosh
- extern macro Float64Acosh(float64): float64;
+// ES6 #sec-math.acosh
+extern macro Float64Acosh(float64): float64;
- transitioning javascript builtin
- MathAcosh(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Acosh(value));
- }
+transitioning javascript builtin
+MathAcosh(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Acosh(value));
+}
- // ES6 #sec-math.asin
- extern macro Float64Asin(float64): float64;
+// ES6 #sec-math.asin
+extern macro Float64Asin(float64): float64;
- transitioning javascript builtin
- MathAsin(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Asin(value));
- }
+transitioning javascript builtin
+MathAsin(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Asin(value));
+}
- // ES6 #sec-math.asinh
- extern macro Float64Asinh(float64): float64;
+// ES6 #sec-math.asinh
+extern macro Float64Asinh(float64): float64;
- transitioning javascript builtin
- MathAsinh(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Asinh(value));
- }
+transitioning javascript builtin
+MathAsinh(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Asinh(value));
+}
- // ES6 #sec-math.atan
- extern macro Float64Atan(float64): float64;
+// ES6 #sec-math.atan
+extern macro Float64Atan(float64): float64;
- transitioning javascript builtin
- MathAtan(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Atan(value));
- }
+transitioning javascript builtin
+MathAtan(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Atan(value));
+}
- // ES6 #sec-math.atan2
- extern macro Float64Atan2(float64, float64): float64;
+// ES6 #sec-math.atan2
+extern macro Float64Atan2(float64, float64): float64;
- transitioning javascript builtin
- MathAtan2(js-implicit context: NativeContext)(y: JSAny, x: JSAny): Number {
- const yValue = Convert<float64>(ToNumber_Inline(y));
- const xValue = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Atan2(yValue, xValue));
- }
+transitioning javascript builtin
+MathAtan2(js-implicit context: NativeContext)(y: JSAny, x: JSAny): Number {
+ const yValue = Convert<float64>(ToNumber_Inline(y));
+ const xValue = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Atan2(yValue, xValue));
+}
- // ES6 #sec-math.atanh
- extern macro Float64Atanh(float64): float64;
+// ES6 #sec-math.atanh
+extern macro Float64Atanh(float64): float64;
- transitioning javascript builtin
- MathAtanh(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Atanh(value));
- }
+transitioning javascript builtin
+MathAtanh(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Atanh(value));
+}
- // ES6 #sec-math.cbrt
- extern macro Float64Cbrt(float64): float64;
+// ES6 #sec-math.cbrt
+extern macro Float64Cbrt(float64): float64;
- transitioning javascript builtin
- MathCbrt(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Cbrt(value));
- }
+transitioning javascript builtin
+MathCbrt(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Cbrt(value));
+}
- // ES6 #sec-math.clz32
- extern macro Word32Clz(int32): int32;
+// ES6 #sec-math.clz32
+extern macro Word32Clz(int32): int32;
- transitioning javascript builtin
- MathClz32(js-implicit context: NativeContext)(x: JSAny): Number {
- const value: int32 = Convert<int32>(ToNumber_Inline(x));
- return Convert<Number>(Word32Clz(value));
- }
+transitioning javascript builtin
+MathClz32(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value: int32 = Convert<int32>(ToNumber_Inline(x));
+ return Convert<Number>(Word32Clz(value));
+}
- // ES6 #sec-math.cos
- extern macro Float64Cos(float64): float64;
+// ES6 #sec-math.cos
+extern macro Float64Cos(float64): float64;
- transitioning javascript builtin
- MathCos(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Cos(value));
- }
+transitioning javascript builtin
+MathCos(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Cos(value));
+}
- // ES6 #sec-math.cosh
- extern macro Float64Cosh(float64): float64;
+// ES6 #sec-math.cosh
+extern macro Float64Cosh(float64): float64;
- transitioning javascript builtin
- MathCosh(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Cosh(value));
- }
+transitioning javascript builtin
+MathCosh(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Cosh(value));
+}
- // ES6 #sec-math.exp
- extern macro Float64Exp(float64): float64;
+// ES6 #sec-math.exp
+extern macro Float64Exp(float64): float64;
- transitioning javascript builtin
- MathExp(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Exp(value));
- }
+transitioning javascript builtin
+MathExp(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Exp(value));
+}
- // ES6 #sec-math.expm1
- extern macro Float64Expm1(float64): float64;
+// ES6 #sec-math.expm1
+extern macro Float64Expm1(float64): float64;
- transitioning javascript builtin
- MathExpm1(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Expm1(value));
- }
+transitioning javascript builtin
+MathExpm1(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Expm1(value));
+}
- // ES6 #sec-math.fround
- transitioning javascript builtin
- MathFround(js-implicit context: NativeContext)(x: JSAny): Number {
- const x32 = Convert<float32>(ToNumber_Inline(x));
- const x64 = Convert<float64>(x32);
- return Convert<Number>(x64);
- }
+// ES6 #sec-math.fround
+transitioning javascript builtin
+MathFround(js-implicit context: NativeContext)(x: JSAny): Number {
+ const x32 = Convert<float32>(ToNumber_Inline(x));
+ const x64 = Convert<float64>(x32);
+ return Convert<Number>(x64);
+}
- // ES6 #sec-math.imul
- transitioning javascript builtin
- MathImul(js-implicit context: NativeContext)(x: JSAny, y: JSAny): Number {
- const x = Convert<int32>(ToNumber_Inline(x));
- const y = Convert<int32>(ToNumber_Inline(y));
- return Convert<Number>(x * y);
- }
+// ES6 #sec-math.imul
+transitioning javascript builtin
+MathImul(js-implicit context: NativeContext)(x: JSAny, y: JSAny): Number {
+ const x = Convert<int32>(ToNumber_Inline(x));
+ const y = Convert<int32>(ToNumber_Inline(y));
+ return Convert<Number>(x * y);
+}
- // ES6 #sec-math.log
- extern macro Float64Log(float64): float64;
+// ES6 #sec-math.log
+extern macro Float64Log(float64): float64;
- transitioning javascript builtin
- MathLog(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Log(value));
- }
+transitioning javascript builtin
+MathLog(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Log(value));
+}
- // ES6 #sec-math.log1p
- extern macro Float64Log1p(float64): float64;
+// ES6 #sec-math.log1p
+extern macro Float64Log1p(float64): float64;
- transitioning javascript builtin
- MathLog1p(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Log1p(value));
- }
+transitioning javascript builtin
+MathLog1p(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Log1p(value));
+}
- // ES6 #sec-math.log10
- extern macro Float64Log10(float64): float64;
+// ES6 #sec-math.log10
+extern macro Float64Log10(float64): float64;
- transitioning javascript builtin
- MathLog10(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Log10(value));
- }
+transitioning javascript builtin
+MathLog10(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Log10(value));
+}
- // ES6 #sec-math.log2
- extern macro Float64Log2(float64): float64;
+// ES6 #sec-math.log2
+extern macro Float64Log2(float64): float64;
- transitioning javascript builtin
- MathLog2(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Log2(value));
- }
+transitioning javascript builtin
+MathLog2(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Log2(value));
+}
- // ES6 #sec-math.sin
- extern macro Float64Sin(float64): float64;
+// ES6 #sec-math.sin
+extern macro Float64Sin(float64): float64;
- transitioning javascript builtin
- MathSin(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Sin(value));
- }
+transitioning javascript builtin
+MathSin(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Sin(value));
+}
- // ES6 #sec-math.sign
- transitioning javascript builtin
- MathSign(js-implicit context: NativeContext)(x: JSAny): Number {
- const num = ToNumber_Inline(x);
- const value = Convert<float64>(num);
+// ES6 #sec-math.sign
+transitioning javascript builtin
+MathSign(js-implicit context: NativeContext)(x: JSAny): Number {
+ const num = ToNumber_Inline(x);
+ const value = Convert<float64>(num);
- if (value < 0) {
- return -1;
- } else if (value > 0) {
- return 1;
- } else {
- return num;
- }
+ if (value < 0) {
+ return -1;
+ } else if (value > 0) {
+ return 1;
+ } else {
+ return num;
}
+}
- // ES6 #sec-math.sinh
- extern macro Float64Sinh(float64): float64;
+// ES6 #sec-math.sinh
+extern macro Float64Sinh(float64): float64;
- transitioning javascript builtin
- MathSinh(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Sinh(value));
- }
+transitioning javascript builtin
+MathSinh(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Sinh(value));
+}
- // ES6 #sec-math.sqrt
- extern macro Float64Sqrt(float64): float64;
+// ES6 #sec-math.sqrt
+extern macro Float64Sqrt(float64): float64;
- transitioning javascript builtin
- MathSqrt(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Sqrt(value));
- }
+transitioning javascript builtin
+MathSqrt(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Sqrt(value));
+}
- // ES6 #sec-math.tan
- extern macro Float64Tan(float64): float64;
+// ES6 #sec-math.tan
+extern macro Float64Tan(float64): float64;
- transitioning javascript builtin
- MathTan(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Tan(value));
- }
+transitioning javascript builtin
+MathTan(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Tan(value));
+}
- // ES6 #sec-math.tanh
- extern macro Float64Tanh(float64): float64;
+// ES6 #sec-math.tanh
+extern macro Float64Tanh(float64): float64;
- transitioning javascript builtin
- MathTanh(js-implicit context: NativeContext)(x: JSAny): Number {
- const value = Convert<float64>(ToNumber_Inline(x));
- return Convert<Number>(Float64Tanh(value));
- }
+transitioning javascript builtin
+MathTanh(js-implicit context: NativeContext)(x: JSAny): Number {
+ const value = Convert<float64>(ToNumber_Inline(x));
+ return Convert<Number>(Float64Tanh(value));
+}
- // ES6 #sec-math.hypot
- transitioning javascript builtin
- MathHypot(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
- Number {
- const length = arguments.length;
- if (length == 0) {
- return 0;
- }
- const absValues = AllocateZeroedFixedDoubleArray(length);
- let oneArgIsNaN: bool = false;
- let max: float64 = 0;
- for (let i: intptr = 0; i < length; ++i) {
- const value = Convert<float64>(ToNumber_Inline(arguments[i]));
- if (Float64IsNaN(value)) {
- oneArgIsNaN = true;
- } else {
- const absValue = Float64Abs(value);
- absValues.floats[i] = Convert<float64_or_hole>(absValue);
- if (absValue > max) {
- max = absValue;
- }
+// ES6 #sec-math.hypot
+transitioning javascript builtin
+MathHypot(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
+ const length = arguments.length;
+ if (length == 0) {
+ return 0;
+ }
+ const absValues = AllocateZeroedFixedDoubleArray(length);
+ let oneArgIsNaN: bool = false;
+ let max: float64 = 0;
+ for (let i: intptr = 0; i < length; ++i) {
+ const value = Convert<float64>(ToNumber_Inline(arguments[i]));
+ if (Float64IsNaN(value)) {
+ oneArgIsNaN = true;
+ } else {
+ const absValue = Float64Abs(value);
+ absValues.floats[i] = Convert<float64_or_hole>(absValue);
+ if (absValue > max) {
+ max = absValue;
}
}
- if (max == V8_INFINITY) {
- return V8_INFINITY;
- } else if (oneArgIsNaN) {
- return kNaN;
- } else if (max == 0) {
- return 0;
- }
- assert(max > 0);
-
- // Kahan summation to avoid rounding errors.
- // Normalize the numbers to the largest one to avoid overflow.
- let sum: float64 = 0;
- let compensation: float64 = 0;
- for (let i: intptr = 0; i < length; ++i) {
- const n = absValues.floats[i].ValueUnsafeAssumeNotHole() / max;
- const summand = n * n - compensation;
- const preliminary = sum + summand;
- compensation = (preliminary - sum) - summand;
- sum = preliminary;
- }
- return Convert<Number>(Float64Sqrt(sum) * max);
}
+ if (max == V8_INFINITY) {
+ return V8_INFINITY;
+ } else if (oneArgIsNaN) {
+ return kNaN;
+ } else if (max == 0) {
+ return 0;
+ }
+ assert(max > 0);
+
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ let sum: float64 = 0;
+ let compensation: float64 = 0;
+ for (let i: intptr = 0; i < length; ++i) {
+ const n = absValues.floats[i].ValueUnsafeAssumeNotHole() / max;
+ const summand = n * n - compensation;
+ const preliminary = sum + summand;
+ compensation = (preliminary - sum) - summand;
+ sum = preliminary;
+ }
+ return Convert<Number>(Float64Sqrt(sum) * max);
+}
- // ES6 #sec-math.random
- extern macro RefillMathRandom(NativeContext): Smi;
-
- transitioning javascript builtin
- MathRandom(js-implicit context: NativeContext, receiver: JSAny)(): Number {
- let smiIndex: Smi =
- Cast<Smi>(context[NativeContextSlot::MATH_RANDOM_INDEX_INDEX])
- otherwise unreachable;
- if (smiIndex == 0) {
- // refill math random.
- smiIndex = RefillMathRandom(context);
- }
- const newSmiIndex: Smi = smiIndex - 1;
- context[NativeContextSlot::MATH_RANDOM_INDEX_INDEX] = newSmiIndex;
-
- const array: FixedDoubleArray = Cast<FixedDoubleArray>(
- context[NativeContextSlot::MATH_RANDOM_CACHE_INDEX])
- otherwise unreachable;
- const random: float64 =
- array.floats[Convert<intptr>(newSmiIndex)].ValueUnsafeAssumeNotHole();
- return AllocateHeapNumberWithValue(random);
- }
+// ES6 #sec-math.random
+extern macro RefillMathRandom(NativeContext): Smi;
+
+transitioning javascript builtin
+MathRandom(js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ let smiIndex: Smi =
+ Cast<Smi>(context[NativeContextSlot::MATH_RANDOM_INDEX_INDEX])
+ otherwise unreachable;
+ if (smiIndex == 0) {
+ // refill math random.
+ smiIndex = RefillMathRandom(context);
+ }
+ const newSmiIndex: Smi = smiIndex - 1;
+ context[NativeContextSlot::MATH_RANDOM_INDEX_INDEX] = newSmiIndex;
+
+ const array: FixedDoubleArray = Cast<FixedDoubleArray>(
+ context[NativeContextSlot::MATH_RANDOM_CACHE_INDEX])
+ otherwise unreachable;
+ const random: float64 =
+ array.floats[Convert<intptr>(newSmiIndex)].ValueUnsafeAssumeNotHole();
+ return AllocateHeapNumberWithValue(random);
+}
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index cb1a86db2f..c98961f2ad 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -911,16 +911,25 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Register scratch2, Label* if_return) {
+ Register scratch2, Register scratch3,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
- DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+ __ Move(original_bytecode_offset, bytecode_offset);
__ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
@@ -959,10 +968,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Branch(&not_jump_loop, ne, bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
__ lw(scratch2, MemOperand(scratch2));
__ Addu(bytecode_offset, bytecode_offset, scratch2);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1134,7 +1156,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lbu(a1, MemOperand(a1));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
- &do_return);
+ t0, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1412,7 +1434,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
- &if_return);
+ t0, &if_return);
__ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index baf2d5bfec..babe084bb0 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -930,15 +930,25 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation. Will bail out to a
-// label if the bytecode (without prefix) is a return bytecode.
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
- Register scratch2, Label* if_return) {
+ Register scratch2, Register scratch3,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
- DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
- bytecode));
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+ __ Move(original_bytecode_offset, bytecode_offset);
__ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
@@ -977,10 +987,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Branch(&not_jump_loop, ne, bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end);
+
+ __ bind(&not_jump_loop);
// Otherwise, load the size of the current bytecode and advance the offset.
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
__ Lw(scratch2, MemOperand(scratch2));
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
+
+ __ bind(&end);
}
// Generate code for entering a JS function with the interpreter.
@@ -1153,7 +1176,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
- &do_return);
+ a4, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1430,7 +1453,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
- &if_return);
+ a4, &if_return);
__ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq
index 958cd5f5f6..98680cf553 100644
--- a/deps/v8/src/builtins/number.tq
+++ b/deps/v8/src/builtins/number.tq
@@ -3,73 +3,71 @@
// LICENSE file.
namespace runtime {
- extern transitioning runtime
- DoubleToStringWithRadix(implicit context: Context)(Number, Number): String;
+extern transitioning runtime
+DoubleToStringWithRadix(implicit context: Context)(Number, Number): String;
} // namespace runtime
namespace number {
- extern macro NaNStringConstant(): String;
- extern macro ZeroStringConstant(): String;
- extern macro InfinityStringConstant(): String;
- extern macro MinusInfinityStringConstant(): String;
+extern macro NaNStringConstant(): String;
+extern macro ZeroStringConstant(): String;
+extern macro InfinityStringConstant(): String;
+extern macro MinusInfinityStringConstant(): String;
- const kAsciiZero: constexpr int32 = 48; // '0' (ascii)
- const kAsciiLowerCaseA: constexpr int32 = 97; // 'a' (ascii)
+const kAsciiZero: constexpr int32 = 48; // '0' (ascii)
+const kAsciiLowerCaseA: constexpr int32 = 97; // 'a' (ascii)
- transitioning macro ThisNumberValue(implicit context: Context)(
- receiver: JSAny, method: constexpr string): Number {
- return UnsafeCast<Number>(
- ToThisValue(receiver, PrimitiveType::kNumber, method));
- }
-
- // https://tc39.github.io/ecma262/#sec-number.prototype.tostring
- transitioning javascript builtin NumberPrototypeToString(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): String {
- // 1. Let x be ? thisNumberValue(this value).
- const x = ThisNumberValue(receiver, 'Number.prototype.toString');
+transitioning macro ThisNumberValue(implicit context: Context)(
+ receiver: JSAny, method: constexpr string): Number {
+ return UnsafeCast<Number>(
+ ToThisValue(receiver, PrimitiveType::kNumber, method));
+}
- // 2. If radix is not present, let radixNumber be 10.
- // 3. Else if radix is undefined, let radixNumber be 10.
- // 4. Else, let radixNumber be ? ToInteger(radix).
- const radix: JSAny = arguments[0];
- const radixNumber: Number =
- radix == Undefined ? 10 : ToInteger_Inline(radix);
+// https://tc39.github.io/ecma262/#sec-number.prototype.tostring
+transitioning javascript builtin NumberPrototypeToString(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ // 1. Let x be ? thisNumberValue(this value).
+ const x = ThisNumberValue(receiver, 'Number.prototype.toString');
- // 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
- if (radixNumber < 2 || radixNumber > 36) {
- ThrowRangeError(MessageTemplate::kToRadixFormatRange);
- }
+ // 2. If radix is not present, let radixNumber be 10.
+ // 3. Else if radix is undefined, let radixNumber be 10.
+ // 4. Else, let radixNumber be ? ToInteger(radix).
+ const radix: JSAny = arguments[0];
+ const radixNumber: Number = radix == Undefined ? 10 : ToInteger_Inline(radix);
- // 6. If radixNumber = 10, return ! ToString(x).
- if (radixNumber == 10) {
- return NumberToString(x);
- }
+ // 5. If radixNumber < 2 or radixNumber > 36, throw a RangeError exception.
+ if (radixNumber < 2 || radixNumber > 36) {
+ ThrowRangeError(MessageTemplate::kToRadixFormatRange);
+ }
- // 7. Return the String representation of this Number
- // value using the radix specified by radixNumber.
+ // 6. If radixNumber = 10, return ! ToString(x).
+ if (radixNumber == 10) {
+ return NumberToString(x);
+ }
- // Fast case where the result is a one character string.
- if (TaggedIsPositiveSmi(x) && x < radixNumber) {
- let charCode = Convert<int32>(UnsafeCast<Smi>(x));
- if (charCode < 10) {
- charCode += kAsciiZero;
- } else {
- charCode = charCode - 10 + kAsciiLowerCaseA;
- }
- return StringFromSingleCharCode(charCode);
- }
+ // 7. Return the String representation of this Number
+ // value using the radix specified by radixNumber.
- if (x == -0) {
- return ZeroStringConstant();
- } else if (NumberIsNaN(x)) {
- return NaNStringConstant();
- } else if (x == V8_INFINITY) {
- return InfinityStringConstant();
- } else if (x == MINUS_V8_INFINITY) {
- return MinusInfinityStringConstant();
+ // Fast case where the result is a one character string.
+ if (TaggedIsPositiveSmi(x) && x < radixNumber) {
+ let charCode = Convert<int32>(UnsafeCast<Smi>(x));
+ if (charCode < 10) {
+ charCode += kAsciiZero;
+ } else {
+ charCode = charCode - 10 + kAsciiLowerCaseA;
}
+ return StringFromSingleCharCode(charCode);
+ }
- return runtime::DoubleToStringWithRadix(x, radixNumber);
+ if (x == -0) {
+ return ZeroStringConstant();
+ } else if (NumberIsNaN(x)) {
+ return NaNStringConstant();
+ } else if (x == V8_INFINITY) {
+ return InfinityStringConstant();
+ } else if (x == MINUS_V8_INFINITY) {
+ return MinusInfinityStringConstant();
}
+
+ return runtime::DoubleToStringWithRadix(x, radixNumber);
+}
}
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 2dbe9beacf..32d4dea157 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -4,65 +4,63 @@
namespace object {
- transitioning macro ObjectFromEntriesFastCase(implicit context: Context)(
- iterable: JSAny): JSObject labels IfSlow {
- typeswitch (iterable) {
- case (array: FastJSArrayWithNoCustomIteration): {
- const elements: FixedArray =
- Cast<FixedArray>(array.elements) otherwise IfSlow;
- const length: Smi = array.length;
- const result: JSObject = NewJSObject();
+transitioning macro ObjectFromEntriesFastCase(implicit context: Context)(
+ iterable: JSAny): JSObject labels IfSlow {
+ typeswitch (iterable) {
+ case (array: FastJSArrayWithNoCustomIteration): {
+ const elements: FixedArray =
+ Cast<FixedArray>(array.elements) otherwise IfSlow;
+ const length: Smi = array.length;
+ const result: JSObject = NewJSObject();
- for (let k: Smi = 0; k < length; ++k) {
- const value: JSAny = array::LoadElementOrUndefined(elements, k);
- const pair: KeyValuePair =
- collections::LoadKeyValuePairNoSideEffects(value)
- otherwise IfSlow;
- // Bail out if ToPropertyKey will attempt to load and call
- // Symbol.toPrimitive, toString, and valueOf, which could
- // invalidate assumptions about the iterable.
- if (Is<JSReceiver>(pair.key)) goto IfSlow;
- FastCreateDataProperty(result, pair.key, pair.value);
- }
- return result;
- }
- case (JSAny): {
- goto IfSlow;
+ for (let k: Smi = 0; k < length; ++k) {
+ const value: JSAny = array::LoadElementOrUndefined(elements, k);
+ const pair: KeyValuePair =
+ collections::LoadKeyValuePairNoSideEffects(value)
+ otherwise IfSlow;
+ // Bail out if ToPropertyKey will attempt to load and call
+ // Symbol.toPrimitive, toString, and valueOf, which could
+ // invalidate assumptions about the iterable.
+ if (Is<JSReceiver>(pair.key)) goto IfSlow;
+ FastCreateDataProperty(result, pair.key, pair.value);
}
+ return result;
+ }
+ case (JSAny): {
+ goto IfSlow;
}
}
+}
- transitioning javascript builtin
- ObjectFromEntries(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- const iterable: JSAny = arguments[0];
+transitioning javascript builtin
+ObjectFromEntries(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const iterable: JSAny = arguments[0];
+ try {
+ if (IsNullOrUndefined(iterable)) goto Throw;
+ return ObjectFromEntriesFastCase(iterable) otherwise IfSlow;
+ } label IfSlow {
+ const result: JSObject = NewJSObject();
+ const fastIteratorResultMap: Map = GetIteratorResultMap();
+ let i: iterator::IteratorRecord = iterator::GetIterator(iterable);
try {
- if (IsNullOrUndefined(iterable)) goto Throw;
- return ObjectFromEntriesFastCase(iterable) otherwise IfSlow;
- }
- label IfSlow {
- const result: JSObject = NewJSObject();
- const fastIteratorResultMap: Map = GetIteratorResultMap();
- let i: iterator::IteratorRecord = iterator::GetIterator(iterable);
- try {
- assert(!IsNullOrUndefined(i.object));
- while (true) {
- const step: JSReceiver =
- iterator::IteratorStep(i, fastIteratorResultMap)
- otherwise return result;
- const iteratorValue: JSAny =
- iterator::IteratorValue(step, fastIteratorResultMap);
- const pair: KeyValuePair =
- collections::LoadKeyValuePair(iteratorValue);
- FastCreateDataProperty(result, pair.key, pair.value);
- }
- return result;
- } catch (e) deferred {
- iterator::IteratorCloseOnException(i, e);
+ assert(!IsNullOrUndefined(i.object));
+ while (true) {
+ const step: JSReceiver =
+ iterator::IteratorStep(i, fastIteratorResultMap)
+ otherwise return result;
+ const iteratorValue: JSAny =
+ iterator::IteratorValue(step, fastIteratorResultMap);
+ const pair: KeyValuePair = collections::LoadKeyValuePair(iteratorValue);
+ FastCreateDataProperty(result, pair.key, pair.value);
}
+ return result;
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(i);
+ ReThrow(context, e);
}
- label Throw deferred {
- ThrowTypeError(MessageTemplate::kNotIterable);
- }
+ } label Throw deferred {
+ ThrowTypeError(MessageTemplate::kNotIterable);
}
+}
} // namespace object
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
index c6d3e92279..931972024c 100644
--- a/deps/v8/src/builtins/object.tq
+++ b/deps/v8/src/builtins/object.tq
@@ -3,179 +3,199 @@
// found in the LICENSE file.
namespace runtime {
- extern transitioning runtime
- ObjectIsExtensible(implicit context: Context)(JSAny): JSAny;
+extern transitioning runtime
+ObjectIsExtensible(implicit context: Context)(JSAny): JSAny;
- extern transitioning runtime
- JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver):
- JSAny;
+extern transitioning runtime
+JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver): JSAny;
- extern transitioning runtime
- JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver):
- JSAny;
+extern transitioning runtime
+JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver):
+ JSAny;
- extern transitioning runtime
- JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): JSAny;
+extern transitioning runtime
+JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): JSAny;
- extern transitioning runtime
- JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, JSAny):
- JSAny;
+extern transitioning runtime
+JSReceiverSetPrototypeOfThrow(implicit context: Context)(
+ JSReceiver, JSAny): JSAny;
- extern transitioning runtime
- JSReceiverSetPrototypeOfDontThrow(implicit context:
- Context)(JSReceiver, JSAny): JSAny;
+extern transitioning runtime
+JSReceiverSetPrototypeOfDontThrow(implicit context: Context)(
+ JSReceiver, JSAny): JSAny;
- extern transitioning runtime ObjectCreate(implicit context:
- Context)(JSAny, JSAny): JSAny;
+extern transitioning runtime ObjectCreate(implicit context: Context)(
+ JSAny, JSAny): JSAny;
} // namespace runtime
namespace object {
- transitioning macro
- ObjectIsExtensibleImpl(implicit context: Context)(object: JSAny): JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
- const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
- otherwise return runtime::ObjectIsExtensible(objectJSReceiver);
- return proxy::ProxyIsExtensible(objectJSProxy);
- }
-
- transitioning macro
- ObjectPreventExtensionsThrow(implicit context: Context)(object: JSAny):
- JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
- const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
- otherwise return runtime::JSReceiverPreventExtensionsThrow(
- objectJSReceiver);
- proxy::ProxyPreventExtensions(objectJSProxy, True);
- return objectJSReceiver;
- }
-
- transitioning macro
- ObjectPreventExtensionsDontThrow(implicit context: Context)(object: JSAny):
- JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
- const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
- otherwise return runtime::JSReceiverPreventExtensionsDontThrow(
- objectJSReceiver);
- return proxy::ProxyPreventExtensions(objectJSProxy, False);
- }
-
- transitioning macro
- ObjectGetPrototypeOfImpl(implicit context: Context)(object: JSAny): JSAny {
- const objectJSReceiver: JSReceiver = ToObject_Inline(context, object);
- return object::JSReceiverGetPrototypeOf(objectJSReceiver);
- }
-
- transitioning macro
- JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver):
- JSAny {
- const objectJSProxy = Cast<JSProxy>(object)
- otherwise return runtime::JSReceiverGetPrototypeOf(object);
- return proxy::ProxyGetPrototypeOf(objectJSProxy);
- }
-
- transitioning macro
- ObjectSetPrototypeOfThrow(implicit context: Context)(
- object: JSAny, proto: JSReceiver|Null): JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
- const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
- otherwise return runtime::JSReceiverSetPrototypeOfThrow(
- objectJSReceiver, proto);
- proxy::ProxySetPrototypeOf(objectJSProxy, proto, True);
- return objectJSReceiver;
- }
-
- transitioning macro
- ObjectSetPrototypeOfDontThrow(implicit context: Context)(
- object: JSAny, proto: JSReceiver|Null): JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
- const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
- otherwise return runtime::JSReceiverSetPrototypeOfDontThrow(
- objectJSReceiver, proto);
- return proxy::ProxySetPrototypeOf(objectJSProxy, proto, False);
- }
-
- transitioning builtin CreateObjectWithoutProperties(
- implicit context: Context)(prototype: JSAny): JSAny {
- const nativeContext = LoadNativeContext(context);
-
- try {
- let map: Map;
- let properties: NameDictionary|EmptyFixedArray;
- typeswitch (prototype) {
- case (Null): {
- map = UnsafeCast<Map>(
- nativeContext
- [NativeContextSlot::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP]);
- properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
- }
- case (prototype: JSReceiver): {
- properties = kEmptyFixedArray;
- const objectFunction = UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]);
- map = UnsafeCast<Map>(objectFunction.prototype_or_initial_map);
- if (prototype != map.prototype) {
- const prototypeInfo =
- prototype.map.PrototypeInfo() otherwise Runtime;
- typeswitch (prototypeInfo.object_create_map) {
- case (Undefined): {
- goto Runtime;
- }
- case (weak_map: Weak<Map>): {
- map = WeakToStrong(weak_map) otherwise Runtime;
- }
+transitioning macro
+ObjectIsExtensibleImpl(implicit context: Context)(object: JSAny): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::ObjectIsExtensible(objectJSReceiver);
+ return proxy::ProxyIsExtensible(objectJSProxy);
+}
+
+transitioning macro
+ObjectPreventExtensionsThrow(implicit context: Context)(object: JSAny): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverPreventExtensionsThrow(objectJSReceiver);
+ proxy::ProxyPreventExtensions(objectJSProxy, True);
+ return objectJSReceiver;
+}
+
+transitioning macro
+ObjectPreventExtensionsDontThrow(implicit context: Context)(object: JSAny):
+ JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverPreventExtensionsDontThrow(
+ objectJSReceiver);
+ return proxy::ProxyPreventExtensions(objectJSProxy, False);
+}
+
+transitioning macro
+ObjectGetPrototypeOfImpl(implicit context: Context)(object: JSAny): JSAny {
+ const objectJSReceiver: JSReceiver = ToObject_Inline(context, object);
+ return object::JSReceiverGetPrototypeOf(objectJSReceiver);
+}
+
+transitioning macro
+JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver): JSAny {
+ const objectJSProxy = Cast<JSProxy>(object)
+ otherwise return runtime::JSReceiverGetPrototypeOf(object);
+ return proxy::ProxyGetPrototypeOf(objectJSProxy);
+}
+
+transitioning macro
+ObjectSetPrototypeOfThrow(implicit context: Context)(
+ object: JSAny, proto: JSReceiver|Null): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverSetPrototypeOfThrow(
+ objectJSReceiver, proto);
+ proxy::ProxySetPrototypeOf(objectJSProxy, proto, True);
+ return objectJSReceiver;
+}
+
+transitioning macro
+ObjectSetPrototypeOfDontThrow(implicit context: Context)(
+ object: JSAny, proto: JSReceiver|Null): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverSetPrototypeOfDontThrow(
+ objectJSReceiver, proto);
+ return proxy::ProxySetPrototypeOf(objectJSProxy, proto, False);
+}
+
+transitioning builtin CreateObjectWithoutProperties(implicit context: Context)(
+ prototype: JSAny): JSAny {
+ const nativeContext = LoadNativeContext(context);
+
+ try {
+ let map: Map;
+ let properties: NameDictionary|EmptyFixedArray;
+ typeswitch (prototype) {
+ case (Null): {
+ map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP]);
+ properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
+ }
+ case (prototype: JSReceiver): {
+ properties = kEmptyFixedArray;
+ const objectFunction = UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]);
+ map = UnsafeCast<Map>(objectFunction.prototype_or_initial_map);
+ if (prototype != map.prototype) {
+ const prototypeInfo = prototype.map.PrototypeInfo() otherwise Runtime;
+ typeswitch (prototypeInfo.object_create_map) {
+ case (Undefined): {
+ goto Runtime;
+ }
+ case (weak_map: Weak<Map>): {
+ map = WeakToStrong(weak_map) otherwise Runtime;
}
}
}
- case (JSAny): {
- goto Runtime;
- }
- }
- return AllocateJSObjectFromMap(map, properties);
- }
- label Runtime deferred {
- return runtime::ObjectCreate(prototype, Undefined);
- }
- }
-
- // ES6 section 19.1.2.11 Object.isExtensible ( O )
- transitioning javascript builtin
- ObjectIsExtensible(js-implicit context: NativeContext)(object: JSAny): JSAny {
- return object::ObjectIsExtensibleImpl(object);
- }
-
- // ES6 section 19.1.2.18 Object.preventExtensions ( O )
- transitioning javascript builtin
- ObjectPreventExtensions(js-implicit context: NativeContext)(object: JSAny):
- JSAny {
- return object::ObjectPreventExtensionsThrow(object);
- }
-
- // ES6 section 19.1.2.9 Object.getPrototypeOf ( O )
- transitioning javascript builtin
- ObjectGetPrototypeOf(js-implicit context: NativeContext)(object: JSAny):
- JSAny {
- return object::ObjectGetPrototypeOfImpl(object);
- }
-
- // ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
- transitioning javascript builtin ObjectSetPrototypeOf(
- js-implicit context: NativeContext)(object: JSAny, proto: JSAny): JSAny {
- // 1. Set O to ? RequireObjectCoercible(O).
- RequireObjectCoercible(object, 'Object.setPrototypeOf');
-
- // 2. If Type(proto) is neither Object nor Null, throw a TypeError
- // exception.
- // 3. If Type(O) is not Object, return O.
- // 4. Let status be ? O.[[SetPrototypeOf]](proto).
- // 5. If status is false, throw a TypeError exception.
- // 6. Return O.
- typeswitch (proto) {
- case (proto: JSReceiver|Null): {
- return object::ObjectSetPrototypeOfThrow(object, proto);
}
case (JSAny): {
- ThrowTypeError(MessageTemplate::kProtoObjectOrNull, proto);
+ goto Runtime;
}
}
+ return AllocateJSObjectFromMap(map, properties);
+ } label Runtime deferred {
+ return runtime::ObjectCreate(prototype, Undefined);
}
+}
+
+// ES6 section 19.1.2.11 Object.isExtensible ( O )
+transitioning javascript builtin
+ObjectIsExtensible(js-implicit context: NativeContext)(object: JSAny): JSAny {
+ return object::ObjectIsExtensibleImpl(object);
+}
+
+// ES6 section 19.1.2.18 Object.preventExtensions ( O )
+transitioning javascript builtin
+ObjectPreventExtensions(js-implicit context: NativeContext)(object: JSAny):
+ JSAny {
+ return object::ObjectPreventExtensionsThrow(object);
+}
+
+// ES6 section 19.1.2.9 Object.getPrototypeOf ( O )
+transitioning javascript builtin
+ObjectGetPrototypeOf(js-implicit context: NativeContext)(object: JSAny): JSAny {
+ return object::ObjectGetPrototypeOfImpl(object);
+}
+
+// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
+transitioning javascript builtin ObjectSetPrototypeOf(
+ js-implicit context: NativeContext)(object: JSAny, proto: JSAny): JSAny {
+ // 1. Set O to ? RequireObjectCoercible(O).
+ RequireObjectCoercible(object, 'Object.setPrototypeOf');
+
+ // 2. If Type(proto) is neither Object nor Null, throw a TypeError
+ // exception.
+ // 3. If Type(O) is not Object, return O.
+ // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+ // 5. If status is false, throw a TypeError exception.
+ // 6. Return O.
+ typeswitch (proto) {
+ case (proto: JSReceiver|Null): {
+ return object::ObjectSetPrototypeOfThrow(object, proto);
+ }
+ case (JSAny): {
+ ThrowTypeError(MessageTemplate::kProtoObjectOrNull, proto);
+ }
+ }
+}
+
+// ES #sec-object.prototype.tostring
+transitioning javascript builtin ObjectPrototypeToString(
+ js-implicit context: Context, receiver: JSAny)(): String {
+ return ObjectToString(context, receiver);
+}
+
+// ES #sec-object.prototype.valueof
+transitioning javascript builtin ObjectPrototypeValueOf(
+ js-implicit context: Context, receiver: JSAny)(): JSReceiver {
+ // 1. Return ? ToObject(this value).
+ return ToObject_Inline(context, receiver);
+}
+
+// ES #sec-object.prototype.tolocalestring
+transitioning javascript builtin ObjectPrototypeToLocaleString(
+ js-implicit context: Context, receiver: JSAny)(): JSAny {
+ // 1. Let O be the this value.
+ // 2. Return ? Invoke(O, "toString").
+ if (receiver == Null || receiver == Undefined) deferred {
+ ThrowTypeError(
+ MessageTemplate::kCalledOnNullOrUndefined,
+ 'Object.prototype.toLocaleString');
+ }
+ const method = GetProperty(receiver, 'toString');
+ return Call(context, method, receiver);
+}
} // namespace object
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 460d749297..367838f82c 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -87,7 +87,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r0, num_args, Operand(kSystemPointerSizeLog2));
__ cmp(scratch, r0);
__ ble(stack_overflow); // Signed comparison.
}
@@ -130,16 +130,16 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- r6: new target
// -- r7: pointer to last argument
// -- cr0: condition indicating whether r3 is zero
- // -- sp[0*kPointerSize]: the hole (receiver)
- // -- sp[1*kPointerSize]: number of arguments (tagged)
- // -- sp[2*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[2*kSystemPointerSize]: context
// -----------------------------------
__ beq(&no_args, cr0);
- __ ShiftLeftImm(scratch, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(scratch, r3, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, scratch);
__ mtctr(r3);
__ bind(&loop);
- __ subi(scratch, scratch, Operand(kPointerSize));
+ __ subi(scratch, scratch, Operand(kSystemPointerSize));
__ LoadPX(r0, MemOperand(r7, scratch));
__ StorePX(r0, MemOperand(sp, scratch));
__ bdnz(&loop);
@@ -166,7 +166,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kPointerSize));
+ __ addi(sp, sp, Operand(kSystemPointerSize));
__ blr();
__ bind(&stack_overflow);
@@ -202,14 +202,15 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r6);
// ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- r4 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- r4 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
__ JumpIfIsInRange(r7, kDefaultDerivedConstructor, kDerivedConstructor,
@@ -228,11 +229,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3: receiver
- // -- Slot 4 / sp[0*kPointerSize]: new target
- // -- Slot 3 / sp[1*kPointerSize]: padding
- // -- Slot 2 / sp[2*kPointerSize]: constructor function
- // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kPointerSize]: context
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
// -----------------------------------
// Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
@@ -248,12 +249,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r6: new target
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
// Restore constructor function and argument count.
@@ -284,20 +285,20 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- r6: new target
// -- r7: pointer to last argument
// -- cr0: condition indicating whether r3 is zero
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: implicit receiver
- // -- sp[2*kPointerSize]: padding
- // -- r4 and sp[3*kPointerSize]: constructor function
- // -- sp[4*kPointerSize]: number of arguments (tagged)
- // -- sp[5*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: padding
+ // -- r4 and sp[3*kSystemPointerSize]: constructor function
+ // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[5*kSystemPointerSize]: context
// -----------------------------------
__ cmpi(r3, Operand::Zero());
__ beq(&no_args);
- __ ShiftLeftImm(r9, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r9, r3, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, r9);
__ mtctr(r3);
__ bind(&loop);
- __ subi(r9, r9, Operand(kPointerSize));
+ __ subi(r9, r9, Operand(kSystemPointerSize));
__ LoadPX(r0, MemOperand(r7, r9));
__ StorePX(r0, MemOperand(sp, r9));
__ bdnz(&loop);
@@ -311,11 +312,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0: constructor result
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
// -----------------------------------
// Store offset of return address for deoptimizer.
@@ -366,7 +367,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kPointerSize));
+ __ addi(sp, sp, Operand(kSystemPointerSize));
__ blr();
}
@@ -381,8 +382,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ bne(&done);
- __ LoadP(sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
}
@@ -396,14 +398,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r4);
// Store input value into generator object.
- __ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
- r0);
+ __ StoreTaggedField(
+ r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
- __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
- __ LoadP(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(r7, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -436,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
// ----------- S t a t e -------------
@@ -448,23 +453,26 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadHalfWord(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadP(r5, FieldMemOperand(
- r4, JSGeneratorObject::kParametersAndRegistersOffset));
+ __ LoadTaggedPointerField(
+ r5,
+ FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
__ cmpi(r6, Operand::Zero());
__ ble(&done_loop);
- // setup r9 to first element address - kPointerSize
+ // setup r9 to first element address - kTaggedSize
__ addi(r9, r5,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r6);
__ bind(&loop);
- __ LoadPU(scratch, MemOperand(r9, kPointerSize));
+ __ LoadAnyTaggedField(scratch, MemOperand(r9, kTaggedSize));
+ __ addi(r9, r9, Operand(kTaggedSize));
__ push(scratch);
__ bdnz(&loop);
@@ -473,8 +481,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, r6, r3);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
@@ -488,7 +498,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r6, r4);
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ JumpCodeObject(r5);
}
@@ -500,7 +510,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
- __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -510,7 +521,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r4);
- __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
@@ -560,7 +572,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// PPC LINUX ABI:
// preserve LR in pre-reserved slot in caller's frame
__ mflr(r0);
- __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+ __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved);
@@ -695,7 +707,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPop(kCalleeSaved);
// Return
- __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+ __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
@@ -729,7 +741,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// here which will cause scratch1 to become negative.
__ sub(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- __ ShiftLeftImm(scratch2, argc, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(scratch2, argc, Operand(kSystemPointerSizeLog2));
__ cmp(scratch1, scratch2);
__ bgt(&okay); // Signed comparison.
@@ -787,13 +799,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: argc
// r8: argv, i.e. points to first arg
Label loop, entry;
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ add(r5, r8, r0);
// r5 points past last arg.
__ b(&entry);
__ bind(&loop);
__ LoadP(r9, MemOperand(r8)); // read next parameter
- __ addi(r8, r8, Operand(kPointerSize));
+ __ addi(r8, r8, Operand(kSystemPointerSize));
__ LoadP(r0, MemOperand(r9)); // dereference handle
__ push(r0); // push parameter
__ bind(&entry);
@@ -851,8 +863,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
// Store code entry in the closure.
- __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
- r0);
+ __ StoreTaggedField(optimized_code,
+ FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ mr(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
@@ -900,8 +912,9 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
- __ LoadP(scratch, FieldMemOperand(optimized_code_entry,
- Code::kCodeDataContainerOffset));
+ __ LoadTaggedPointerField(
+ scratch,
+ FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadWordArith(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
@@ -1057,10 +1070,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
// The bytecode array could have been flushed from the shared function info,
@@ -1071,15 +1086,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadP(r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadHalfWord(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
@@ -1087,9 +1104,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimized_code_entry = r7;
// Read off the optimized code slot in the feedback vector.
- __ LoadP(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ CmpSmiLiteral(optimized_code_entry,
@@ -1156,7 +1174,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC);
+ __ ShiftRightImm(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
__ beq(&no_args, cr0);
__ mtctr(r5);
__ bind(&loop);
@@ -1174,7 +1192,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
- __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
__ StorePX(r6, MemOperand(fp, r8));
__ bind(&no_incoming_new_target_or_generator_register);
@@ -1197,7 +1215,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
__ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r6, r6, Operand(kSystemPointerSizeLog2));
__ LoadPX(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, r6));
__ Call(kJavaScriptCallCodeStartRegister);
@@ -1282,10 +1300,10 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop, skip;
__ cmpi(count, Operand::Zero());
__ beq(&skip);
- __ addi(index, index, Operand(kPointerSize)); // Bias up for LoadPU
+ __ addi(index, index, Operand(kSystemPointerSize)); // Bias up for LoadPU
__ mtctr(count);
__ bind(&loop);
- __ LoadPU(scratch, MemOperand(index, -kPointerSize));
+ __ LoadPU(scratch, MemOperand(index, -kSystemPointerSize));
__ push(scratch);
__ bdnz(&loop);
__ bind(&skip);
@@ -1409,15 +1427,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
- __ LoadP(r5,
- FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
@@ -1472,7 +1492,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Register scratch = temps.Acquire();
__ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ LoadPX(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
@@ -1538,9 +1558,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point.
__ StoreP(
- r3, MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
+ r3,
+ MemOperand(sp, config->num_allocatable_general_registers() *
+ kSystemPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1591,8 +1612,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
- __ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
- __ addi(sp, sp, Operand(1 * kPointerSize));
+ __ LoadP(r3, MemOperand(sp, 0 * kSystemPointerSize));
+ __ addi(sp, sp, Operand(1 * kSystemPointerSize));
__ Ret();
}
@@ -1616,7 +1637,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -1628,10 +1650,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ LoadP(r4,
- FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
- __ SmiUntag(r4);
+ __ SmiUntagField(
+ r4, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code start + osr_offset
__ add(r0, r3, r4);
@@ -1659,16 +1680,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register arg_size = r8;
Register new_sp = r6;
Register scratch = r7;
- __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(arg_size, r3, Operand(kSystemPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ mr(r5, scratch);
__ LoadP(r4, MemOperand(new_sp, 0)); // receiver
- __ cmpi(arg_size, Operand(kPointerSize));
+ __ cmpi(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
- __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
+ __ LoadP(scratch, MemOperand(new_sp, 1 * -kSystemPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r5, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r5, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argArray
__ bind(&skip);
__ mr(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
@@ -1717,7 +1738,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the callable to call (passed as receiver) from the stack.
// r3: actual number of arguments
- __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r5, r3, Operand(kSystemPointerSizeLog2));
__ LoadPX(r4, MemOperand(sp, r5));
// 3. Shift arguments and return address one slot down on the stack
@@ -1733,9 +1754,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ mtctr(r3);
__ bind(&loop);
- __ LoadP(scratch, MemOperand(r5, -kPointerSize));
+ __ LoadP(scratch, MemOperand(r5, -kSystemPointerSize));
__ StoreP(scratch, MemOperand(r5));
- __ subi(r5, r5, Operand(kPointerSize));
+ __ subi(r5, r5, Operand(kSystemPointerSize));
__ bdnz(&loop);
// Adjust the actual number of arguments and remove the top element
// (which is a copy of the last argument).
@@ -1764,19 +1785,20 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register arg_size = r8;
Register new_sp = r6;
Register scratch = r7;
- __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(arg_size, r3, Operand(kSystemPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(scratch, r4);
__ mr(r5, r4);
- __ cmpi(arg_size, Operand(kPointerSize));
+ __ cmpi(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
- __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target
__ beq(&skip);
- __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
- __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ LoadP(scratch,
+ MemOperand(new_sp, 2 * -kSystemPointerSize)); // thisArgument
+ __ cmpi(arg_size, Operand(2 * kSystemPointerSize));
__ beq(&skip);
- __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r5, MemOperand(new_sp, 3 * -kSystemPointerSize)); // argumentsList
__ bind(&skip);
__ mr(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
@@ -1814,21 +1836,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label skip;
Register arg_size = r8;
Register new_sp = r7;
- __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(arg_size, r3, Operand(kSystemPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(r5, r4);
__ mr(r6, r4);
__ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
- __ cmpi(arg_size, Operand(kPointerSize));
+ __ cmpi(arg_size, Operand(kSystemPointerSize));
__ blt(&skip);
- __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target
__ mr(r6, r4); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r5, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
- __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ LoadP(r5, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argumentsList
+ __ cmpi(arg_size, Operand(2 * kSystemPointerSize));
__ beq(&skip);
- __ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
+ __ LoadP(r6, MemOperand(new_sp, 3 * -kSystemPointerSize)); // new.target
__ bind(&skip);
__ mr(sp, new_sp);
}
@@ -1875,7 +1897,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
__ LoadP(r4, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- int stack_adjustment = kPointerSize; // adjust for receiver
+ int stack_adjustment = kSystemPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r0, r4);
__ add(sp, sp, r0);
@@ -1898,7 +1920,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
- __ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(scratch,
+ FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadHalfWord(scratch,
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
@@ -1924,10 +1947,11 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmpi(r7, Operand::Zero());
__ beq(&no_args);
__ addi(r5, r5,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r7);
__ bind(&loop);
- __ LoadPU(scratch, MemOperand(r5, kPointerSize));
+ __ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize));
+ __ addi(r5, r5, Operand(kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -1961,7 +1985,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r6, &new_target_not_constructor);
- __ LoadP(scratch, FieldMemOperand(r6, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(scratch,
+ FieldMemOperand(r6, HeapObject::kMapOffset));
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
@@ -1985,7 +2010,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ beq(&arguments_adaptor);
{
__ LoadP(r8, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ LoadHalfWord(
r8,
FieldMemOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2011,11 +2037,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
{
Label loop;
- __ addi(r7, r7, Operand(kPointerSize));
+ __ addi(r7, r7, Operand(kSystemPointerSize));
__ add(r3, r3, r8);
__ bind(&loop);
{
- __ ShiftLeftImm(scratch, r8, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
__ LoadPX(scratch, MemOperand(r7, scratch));
__ push(scratch);
__ subi(r8, r8, Operand(1));
@@ -2045,7 +2071,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
@@ -2053,7 +2080,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(cp,
+ FieldMemOperand(r4, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ andi(r0, r6,
@@ -2073,7 +2101,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(r6);
} else {
Label convert_to_object, convert_receiver;
- __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r6, r3, Operand(kSystemPointerSizeLog2));
__ LoadPX(r6, MemOperand(sp, r6));
__ JumpIfSmi(r6, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2107,10 +2135,11 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r3, r4);
__ SmiUntag(r3);
}
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r7, r3, Operand(kSystemPointerSizeLog2));
__ StorePX(r6, MemOperand(sp, r7));
}
__ bind(&done_convert);
@@ -2146,9 +2175,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r5 and length of that into r7.
Label no_bound_arguments;
- __ LoadP(r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
- __ LoadP(r7, FieldMemOperand(r5, FixedArray::kLengthOffset));
- __ SmiUntag(r7, SetRC);
+ __ LoadTaggedPointerField(
+ r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntagField(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC);
__ beq(&no_bound_arguments, cr0);
{
// ----------- S t a t e -------------
@@ -2163,9 +2192,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ mr(scratch, sp); // preserve previous stack pointer
- __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
- __ sub(sp, sp, r10);
+ __ ShiftLeftImm(r10, r7, Operand(kSystemPointerSizeLog2));
+ __ sub(r0, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
@@ -2173,11 +2201,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
- __ cmpl(sp, scratch);
+ __ cmpl(r0, scratch);
}
__ bgt(&done); // Signed comparison.
- // Restore the stack pointer.
- __ mr(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2186,6 +2212,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+ __ mr(scratch, sp);
+ __ mr(sp, r0);
+
// Relocate arguments down the stack.
// -- r3 : the number of arguments (not including the receiver)
// -- r9 : the previous stack pointer
@@ -2199,7 +2228,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ LoadPX(r0, MemOperand(scratch, r8));
__ StorePX(r0, MemOperand(sp, r8));
- __ addi(r8, r8, Operand(kPointerSize));
+ __ addi(r8, r8, Operand(kSystemPointerSize));
__ bdnz(&loop);
__ bind(&skip);
}
@@ -2207,13 +2236,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
- __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ShiftLeftImm(r10, r7, Operand(kTaggedSizeLog2));
+ __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, r10);
__ mtctr(r7);
__ bind(&loop);
- __ LoadPU(r0, MemOperand(r5, -kPointerSize));
- __ StorePX(r0, MemOperand(sp, r8));
- __ addi(r8, r8, Operand(kPointerSize));
+ __ LoadAnyTaggedField(ip, MemOperand(r5, -kTaggedSize), r0);
+ __ StorePX(ip, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kSystemPointerSize));
+ __ addi(r5, r5, Operand(-kTaggedSize));
__ bdnz(&loop);
__ add(r3, r3, r7);
}
@@ -2232,16 +2263,17 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
- __ LoadP(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ LoadAnyTaggedField(r6,
+ FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+ __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ StorePX(r6, MemOperand(sp, r0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadP(r4,
- FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
@@ -2275,7 +2307,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r8, r3, Operand(kSystemPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
@@ -2309,7 +2341,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ and_(r7, r7, ip, SetRC);
@@ -2338,15 +2371,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
Label skip;
- __ cmp(r4, r6);
+ __ CompareTagged(r4, r6);
__ bne(&skip);
- __ LoadP(r6,
- FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadP(r4,
- FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ LoadTaggedPointerField(
+ r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2364,7 +2397,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(r4, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadTaggedPointerField(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::Bits1::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
@@ -2390,7 +2423,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r8, r3, Operand(kSystemPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
@@ -2416,7 +2449,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
__ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
r0);
@@ -2444,8 +2478,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
- __ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ addi(r3, r3, Operand(2 * kSystemPointerSize));
+ __ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
__ sub(r7, r3, r7);
// Copy the arguments (including the receiver) to the new stack frame.
@@ -2460,7 +2494,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadP(r0, MemOperand(r3, 0));
__ push(r0);
__ cmp(r3, r7); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kPointerSize));
+ __ subi(r3, r3, Operand(kSystemPointerSize));
__ bne(&copy);
__ b(&invoke);
@@ -2488,10 +2522,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r3, 2 * kPointerSize));
+ __ LoadP(r0, MemOperand(r3, 2 * kSystemPointerSize));
__ push(r0);
__ cmp(r3, fp); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kPointerSize));
+ __ subi(r3, r3, Operand(kSystemPointerSize));
__ bne(&copy);
// Fill the remaining expected arguments with undefined.
@@ -2499,12 +2533,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5: expected number of arguments
// r6: new target (passed through to callee)
__ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
__ sub(r7, fp, r7);
// Adjust for frame.
__ subi(r7, r7,
Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
+ kSystemPointerSize));
Label fill;
__ bind(&fill);
@@ -2520,7 +2554,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4 : function (passed through to callee)
// r6 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ CallCodeObject(r5);
// Store offset of return address for deoptimizer.
@@ -2549,7 +2583,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Remove superfluous parameters from the stack.
__ sub(r7, r3, r5);
__ mr(r3, r5);
- __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r7, r7, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, r7);
__ b(&dont_adapt_arguments);
}
@@ -2572,8 +2606,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ __ RecordComment("-- Call without adapting args --");
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ JumpCodeObject(r5);
__ bind(&stack_overflow);
@@ -2663,9 +2698,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mr(r4, r5);
} else {
// Compute the argv pointer.
- __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r4, r3, Operand(kSystemPointerSizeLog2));
__ add(r4, r4, sp);
- __ subi(r4, r4, Operand(kPointerSize));
+ __ subi(r4, r4, Operand(kSystemPointerSize));
}
// Enter the exit frame that transitions from JavaScript to C++.
@@ -2701,7 +2736,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// buffer as implicit first argument.
__ mr(r5, r4);
__ mr(r4, r3);
- __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ addi(r3, sp,
+ Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
isolate_reg = r6;
}
@@ -2713,7 +2749,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
- __ LoadP(r4, MemOperand(r3, kPointerSize));
+ __ LoadP(r4, MemOperand(r3, kSystemPointerSize));
__ LoadP(r3, MemOperand(r3));
}
@@ -2828,7 +2864,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Push(result_reg, scratch);
// Account for saved regs.
- int argument_offset = 2 * kPointerSize;
+ int argument_offset = 2 * kSystemPointerSize;
// Load double input.
__ lfd(double_scratch, MemOperand(sp, argument_offset));
@@ -2850,7 +2886,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ Push(scratch_high, scratch_low);
// Account for saved regs.
- argument_offset += 2 * kPointerSize;
+ argument_offset += 2 * kSystemPointerSize;
__ lwz(scratch_high,
MemOperand(sp, argument_offset + Register::kExponentOffset));
@@ -2921,7 +2957,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
__ bind(&done);
__ Pop(scratch_high, scratch_low);
// Account for saved regs.
- argument_offset -= 2 * kPointerSize;
+ argument_offset -= 2 * kSystemPointerSize;
__ bind(&fastpath_done);
__ StoreP(result_reg, MemOperand(sp, argument_offset));
@@ -3092,33 +3128,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kPointerSize]: kHolder
- // sp[1 * kPointerSize]: kIsolate
- // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
- // sp[3 * kPointerSize]: undefined (kReturnValue)
- // sp[4 * kPointerSize]: kData
- // sp[5 * kPointerSize]: undefined (kNewTarget)
+ // sp[0 * kSystemPointerSize]: kHolder
+ // sp[1 * kSystemPointerSize]: kIsolate
+ // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
+ // sp[4 * kSystemPointerSize]: kData
+ // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
- __ subi(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+ __ subi(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
// kHolder.
- __ StoreP(holder, MemOperand(sp, 0 * kPointerSize));
+ __ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize));
// kIsolate.
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ StoreP(scratch, MemOperand(sp, 1 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize));
// kReturnValueDefaultValue and kReturnValue.
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
- __ StoreP(scratch, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize));
// kData.
- __ StoreP(call_data, MemOperand(sp, 4 * kPointerSize));
+ __ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize));
// kNewTarget.
- __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize));
// Keep a pointer to kHolder (= implicit_args) in a scratch register.
// We use it below to set up the FunctionCallbackInfo object.
@@ -3140,31 +3176,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ StoreP(scratch,
- MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
+ kSystemPointerSize));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
- __ addi(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ ShiftLeftImm(ip, argc, Operand(kPointerSizeLog2));
+ __ addi(scratch, scratch,
+ Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
+ __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
- __ StoreP(scratch,
- MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
+ kSystemPointerSize));
// FunctionCallbackInfo::length_.
- __ stw(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize));
+ __ stw(argc,
+ MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kSystemPointerSize));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
__ mov(scratch,
- Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
- __ ShiftLeftImm(ip, argc, Operand(kPointerSizeLog2));
+ Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
+ __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
- __ StoreP(scratch,
- MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize));
+ __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
+ kSystemPointerSize));
// v8::InvocationCallback's argument.
- __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ __ addi(r3, sp,
+ Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
@@ -3172,11 +3211,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// TODO(jgruber): Document what these arguments are.
static constexpr int kStackSlotsAboveFCA = 2;
MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
static constexpr int kUseStackSpaceOperand = 0;
MemOperand stack_space_operand(
- sp, (kStackFrameExtraParamSlot + 4) * kPointerSize);
+ sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3210,14 +3249,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadAnyTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
@@ -3225,20 +3266,20 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mr(r3, sp); // r3 = Handle<Name>
- __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
+ __ addi(r4, r3, Operand(1 * kSystemPointerSize)); // r4 = v8::PCI::args_
-// If ABI passes Handles (pointer-sized struct) in a register:
-//
-// Create 2 extra slots on stack:
-// [0] space for DirectCEntryStub's LR save
-// [1] AccessorInfo&
-//
-// Otherwise:
-//
-// Create 3 extra slots on stack:
-// [0] space for DirectCEntryStub's LR save
-// [1] copy of Handle (first arg)
-// [2] AccessorInfo&
+ // If ABI passes Handles (pointer-sized struct) in a register:
+ //
+ // Create 2 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1] AccessorInfo&
+ //
+ // Otherwise:
+ //
+ // Create 3 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1] copy of Handle (first arg)
+ // [2] AccessorInfo&
if (ABI_PASSES_HANDLES_IN_REGS) {
accessorInfoSlot = kStackFrameExtraParamSlot + 1;
apiStackSpace = 2;
@@ -3253,26 +3294,28 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
- __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
- __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, arg0Slot * kSystemPointerSize));
+ __ addi(r3, sp, Operand(arg0Slot * kSystemPointerSize));
}
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
- __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
+ __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
+ __ addi(r4, sp, Operand(accessorInfoSlot * kSystemPointerSize));
// r4 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadP(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ fp,
+ (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,
@@ -3285,16 +3328,17 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
__ mflr(r0);
- __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux use a function descriptor;
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(temp2, kPointerSize));
+ __ LoadP(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(temp2, kSystemPointerSize));
__ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address
}
__ Call(temp2); // Call the C++ function.
- __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq
index 95ca356a0c..9cf6da102b 100644
--- a/deps/v8/src/builtins/promise-abstract-operations.tq
+++ b/deps/v8/src/builtins/promise-abstract-operations.tq
@@ -6,544 +6,534 @@
#include 'src/builtins/builtins-promise-gen.h'
namespace runtime {
- extern transitioning runtime
- RejectPromise(implicit context: Context)(JSPromise, JSAny, Boolean): JSAny;
+extern transitioning runtime
+RejectPromise(implicit context: Context)(JSPromise, JSAny, Boolean): JSAny;
- extern transitioning runtime
- PromiseRevokeReject(implicit context: Context)(JSPromise): JSAny;
+extern transitioning runtime
+PromiseRevokeReject(implicit context: Context)(JSPromise): JSAny;
- extern transitioning runtime
- PromiseRejectAfterResolved(implicit context: Context)(JSPromise, JSAny):
- JSAny;
+extern transitioning runtime
+PromiseRejectAfterResolved(implicit context: Context)(JSPromise, JSAny): JSAny;
- extern transitioning runtime
- PromiseResolveAfterResolved(implicit context: Context)(JSPromise, JSAny):
- JSAny;
+extern transitioning runtime
+PromiseResolveAfterResolved(implicit context: Context)(JSPromise, JSAny): JSAny;
- extern transitioning runtime
- PromiseRejectEventFromStack(implicit context: Context)(JSPromise, JSAny):
- JSAny;
+extern transitioning runtime
+PromiseRejectEventFromStack(implicit context: Context)(JSPromise, JSAny): JSAny;
}
// https://tc39.es/ecma262/#sec-promise-abstract-operations
namespace promise {
- extern macro AllocateFunctionWithMapAndContext(
- Map, SharedFunctionInfo, Context): JSFunction;
-
- extern macro PromiseReactionMapConstant(): Map;
- extern macro PromiseFulfillReactionJobTaskMapConstant(): Map;
- extern macro PromiseRejectReactionJobTaskMapConstant(): Map;
- extern transitioning builtin
- ResolvePromise(Context, JSPromise, JSAny): JSAny;
-
- extern transitioning builtin
- EnqueueMicrotask(Context, Microtask): Undefined;
-
- macro
- ExtractHandlerContextInternal(implicit context: Context)(handler: Callable|
- Undefined):
- Context labels NotFound {
- let iter: JSAny = handler;
- while (true) {
- typeswitch (iter) {
- case (b: JSBoundFunction): {
- iter = b.bound_target_function;
- }
- case (p: JSProxy): {
- iter = p.target;
- }
- case (f: JSFunction): {
- return f.context;
- }
- case (JSAny): {
- break;
- }
+extern macro AllocateFunctionWithMapAndContext(
+ Map, SharedFunctionInfo, Context): JSFunction;
+
+extern macro PromiseReactionMapConstant(): Map;
+extern macro PromiseFulfillReactionJobTaskMapConstant(): Map;
+extern macro PromiseRejectReactionJobTaskMapConstant(): Map;
+extern transitioning builtin
+ResolvePromise(Context, JSPromise, JSAny): JSAny;
+
+extern transitioning builtin
+EnqueueMicrotask(Context, Microtask): Undefined;
+
+macro
+ExtractHandlerContextInternal(implicit context: Context)(
+ handler: Callable|Undefined): Context labels NotFound {
+ let iter: JSAny = handler;
+ while (true) {
+ typeswitch (iter) {
+ case (b: JSBoundFunction): {
+ iter = b.bound_target_function;
+ }
+ case (p: JSProxy): {
+ iter = p.target;
+ }
+ case (f: JSFunction): {
+ return f.context;
+ }
+ case (JSAny): {
+ break;
}
}
- goto NotFound;
}
+ goto NotFound;
+}
- macro
- ExtractHandlerContext(implicit context: Context)(handler: Callable|
- Undefined): Context {
- try {
- return ExtractHandlerContextInternal(handler) otherwise NotFound;
- }
- label NotFound deferred {
- return context;
- }
+macro
+ExtractHandlerContext(implicit context: Context)(handler: Callable|
+ Undefined): Context {
+ try {
+ return ExtractHandlerContextInternal(handler) otherwise NotFound;
+ } label NotFound deferred {
+ return context;
}
+}
- macro
- ExtractHandlerContext(implicit context: Context)(
- primary: Callable|Undefined, secondary: Callable|Undefined): Context {
- try {
- return ExtractHandlerContextInternal(primary) otherwise NotFound;
- }
- label NotFound deferred {
- return ExtractHandlerContextInternal(secondary) otherwise Default;
- }
- label Default deferred {
- return context;
- }
+macro
+ExtractHandlerContext(implicit context: Context)(
+ primary: Callable|Undefined, secondary: Callable|Undefined): Context {
+ try {
+ return ExtractHandlerContextInternal(primary) otherwise NotFound;
+ } label NotFound deferred {
+ return ExtractHandlerContextInternal(secondary) otherwise Default;
+ } label Default deferred {
+ return context;
}
+}
- transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)(
- promiseReaction: PromiseReaction, argument: JSAny,
- reactionType: constexpr PromiseReactionType): void {
- let primaryHandler: Callable|Undefined;
- let secondaryHandler: Callable|Undefined;
- if constexpr (reactionType == kPromiseReactionFulfill) {
- primaryHandler = promiseReaction.fulfill_handler;
- secondaryHandler = promiseReaction.reject_handler;
- } else {
- StaticAssert(reactionType == kPromiseReactionReject);
- primaryHandler = promiseReaction.reject_handler;
- secondaryHandler = promiseReaction.fulfill_handler;
- }
-
- // According to HTML, we use the context of the appropriate handler as the
- // context of the microtask. See step 3 of HTML's EnqueueJob:
- // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
- const handlerContext: Context =
- ExtractHandlerContext(primaryHandler, secondaryHandler);
+transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)(
+ promiseReaction: PromiseReaction, argument: JSAny,
+ reactionType: constexpr PromiseReactionType): void {
+ let primaryHandler: Callable|Undefined;
+ let secondaryHandler: Callable|Undefined;
+ if constexpr (reactionType == kPromiseReactionFulfill) {
+ primaryHandler = promiseReaction.fulfill_handler;
+ secondaryHandler = promiseReaction.reject_handler;
+ } else {
+ StaticAssert(reactionType == kPromiseReactionReject);
+ primaryHandler = promiseReaction.reject_handler;
+ secondaryHandler = promiseReaction.fulfill_handler;
+ }
- // Morph {current} from a PromiseReaction into a PromiseReactionJobTask
- // and schedule that on the microtask queue. We try to minimize the number
- // of stores here to avoid screwing up the store buffer.
+ // According to HTML, we use the context of the appropriate handler as the
+ // context of the microtask. See step 3 of HTML's EnqueueJob:
+ // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
+ const handlerContext: Context =
+ ExtractHandlerContext(primaryHandler, secondaryHandler);
+
+ // Morph {current} from a PromiseReaction into a PromiseReactionJobTask
+ // and schedule that on the microtask queue. We try to minimize the number
+ // of stores here to avoid screwing up the store buffer.
+ StaticAssert(
+ kPromiseReactionSize ==
+ kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks);
+ if constexpr (reactionType == kPromiseReactionFulfill) {
+ * UnsafeConstCast(& promiseReaction.map) =
+ PromiseFulfillReactionJobTaskMapConstant();
+ const promiseReactionJobTask =
+ UnsafeCast<PromiseFulfillReactionJobTask>(promiseReaction);
+ promiseReactionJobTask.argument = argument;
+ promiseReactionJobTask.context = handlerContext;
+ EnqueueMicrotask(handlerContext, promiseReactionJobTask);
StaticAssert(
- kPromiseReactionSize ==
- kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks);
- if constexpr (reactionType == kPromiseReactionFulfill) {
- * UnsafeConstCast(& promiseReaction.map) =
- PromiseFulfillReactionJobTaskMapConstant();
- const promiseReactionJobTask =
- UnsafeCast<PromiseFulfillReactionJobTask>(promiseReaction);
- promiseReactionJobTask.argument = argument;
- promiseReactionJobTask.context = handlerContext;
- EnqueueMicrotask(handlerContext, promiseReactionJobTask);
- StaticAssert(
- kPromiseReactionFulfillHandlerOffset ==
- kPromiseReactionJobTaskHandlerOffset);
- StaticAssert(
- kPromiseReactionPromiseOrCapabilityOffset ==
- kPromiseReactionJobTaskPromiseOrCapabilityOffset);
- } else {
- StaticAssert(reactionType == kPromiseReactionReject);
- * UnsafeConstCast(& promiseReaction.map) =
- PromiseRejectReactionJobTaskMapConstant();
- const promiseReactionJobTask =
- UnsafeCast<PromiseRejectReactionJobTask>(promiseReaction);
- promiseReactionJobTask.argument = argument;
- promiseReactionJobTask.context = handlerContext;
- promiseReactionJobTask.handler = primaryHandler;
- EnqueueMicrotask(handlerContext, promiseReactionJobTask);
- StaticAssert(
- kPromiseReactionPromiseOrCapabilityOffset ==
- kPromiseReactionJobTaskPromiseOrCapabilityOffset);
- }
+ kPromiseReactionFulfillHandlerOffset ==
+ kPromiseReactionJobTaskHandlerOffset);
+ StaticAssert(
+ kPromiseReactionPromiseOrCapabilityOffset ==
+ kPromiseReactionJobTaskPromiseOrCapabilityOffset);
+ } else {
+ StaticAssert(reactionType == kPromiseReactionReject);
+ * UnsafeConstCast(& promiseReaction.map) =
+ PromiseRejectReactionJobTaskMapConstant();
+ const promiseReactionJobTask =
+ UnsafeCast<PromiseRejectReactionJobTask>(promiseReaction);
+ promiseReactionJobTask.argument = argument;
+ promiseReactionJobTask.context = handlerContext;
+ promiseReactionJobTask.handler = primaryHandler;
+ EnqueueMicrotask(handlerContext, promiseReactionJobTask);
+ StaticAssert(
+ kPromiseReactionPromiseOrCapabilityOffset ==
+ kPromiseReactionJobTaskPromiseOrCapabilityOffset);
}
+}
- // https://tc39.es/ecma262/#sec-triggerpromisereactions
- transitioning macro TriggerPromiseReactions(implicit context: Context)(
- reactions: Zero|PromiseReaction, argument: JSAny,
- reactionType: constexpr PromiseReactionType): void {
- // We need to reverse the {reactions} here, since we record them on the
- // JSPromise in the reverse order.
- let current = reactions;
- let reversed: Zero|PromiseReaction = kZero;
-
- // As an additional safety net against misuse of the V8 Extras API, we
- // sanity check the {reactions} to make sure that they are actually
- // PromiseReaction instances and not actual JavaScript values (which
- // would indicate that we're rejecting or resolving an already settled
- // promise), see https://crbug.com/931640 for details on this.
- while (true) {
- typeswitch (current) {
- case (Zero): {
- break;
- }
- case (currentReaction: PromiseReaction): {
- current = currentReaction.next;
- currentReaction.next = reversed;
- reversed = currentReaction;
- }
+// https://tc39.es/ecma262/#sec-triggerpromisereactions
+transitioning macro TriggerPromiseReactions(implicit context: Context)(
+ reactions: Zero|PromiseReaction, argument: JSAny,
+ reactionType: constexpr PromiseReactionType): void {
+ // We need to reverse the {reactions} here, since we record them on the
+ // JSPromise in the reverse order.
+ let current = reactions;
+ let reversed: Zero|PromiseReaction = kZero;
+
+ // As an additional safety net against misuse of the V8 Extras API, we
+ // sanity check the {reactions} to make sure that they are actually
+ // PromiseReaction instances and not actual JavaScript values (which
+ // would indicate that we're rejecting or resolving an already settled
+ // promise), see https://crbug.com/931640 for details on this.
+ while (true) {
+ typeswitch (current) {
+ case (Zero): {
+ break;
+ }
+ case (currentReaction: PromiseReaction): {
+ current = currentReaction.next;
+ currentReaction.next = reversed;
+ reversed = currentReaction;
}
}
- // Morph the {reactions} into PromiseReactionJobTasks and push them
- // onto the microtask queue.
- current = reversed;
- while (true) {
- typeswitch (current) {
- case (Zero): {
- break;
- }
- case (currentReaction: PromiseReaction): {
- current = currentReaction.next;
- MorphAndEnqueuePromiseReaction(
- currentReaction, argument, reactionType);
- }
+ }
+ // Morph the {reactions} into PromiseReactionJobTasks and push them
+ // onto the microtask queue.
+ current = reversed;
+ while (true) {
+ typeswitch (current) {
+ case (Zero): {
+ break;
+ }
+ case (currentReaction: PromiseReaction): {
+ current = currentReaction.next;
+ MorphAndEnqueuePromiseReaction(currentReaction, argument, reactionType);
}
}
}
+}
- // https://tc39.es/ecma262/#sec-fulfillpromise
- transitioning builtin
- FulfillPromise(implicit context: Context)(promise: JSPromise, value: JSAny):
- Undefined {
- // Assert: The value of promise.[[PromiseState]] is "pending".
- assert(promise.Status() == PromiseState::kPending);
+// https://tc39.es/ecma262/#sec-fulfillpromise
+transitioning builtin
+FulfillPromise(implicit context: Context)(
+ promise: JSPromise, value: JSAny): Undefined {
+ // Assert: The value of promise.[[PromiseState]] is "pending".
+ assert(promise.Status() == PromiseState::kPending);
- // 2. Let reactions be promise.[[PromiseFulfillReactions]].
- const reactions =
- UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ const reactions =
+ UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
- // 3. Set promise.[[PromiseResult]] to value.
- // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
- // 5. Set promise.[[PromiseRejectReactions]] to undefined.
- promise.reactions_or_result = value;
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise.reactions_or_result = value;
- // 6. Set promise.[[PromiseState]] to "fulfilled".
- promise.SetStatus(PromiseState::kFulfilled);
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ promise.SetStatus(PromiseState::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ TriggerPromiseReactions(reactions, value, kPromiseReactionFulfill);
+ return Undefined;
+}
- // 7. Return TriggerPromiseReactions(reactions, value).
- TriggerPromiseReactions(reactions, value, kPromiseReactionFulfill);
- return Undefined;
+extern macro PromiseBuiltinsAssembler::
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
+
+// https://tc39.es/ecma262/#sec-rejectpromise
+transitioning builtin
+RejectPromise(implicit context: Context)(
+ promise: JSPromise, reason: JSAny, debugEvent: Boolean): JSAny {
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ !promise.HasHandler()) {
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ // We don't try to handle rejecting {promise} without handler
+ // here, but we let the C++ code take care of this completely.
+ return runtime::RejectPromise(promise, reason, debugEvent);
}
- extern macro PromiseBuiltinsAssembler::
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
-
- // https://tc39.es/ecma262/#sec-rejectpromise
- transitioning builtin
- RejectPromise(implicit context: Context)(
- promise: JSPromise, reason: JSAny, debugEvent: Boolean): JSAny {
- // If promise hook is enabled or the debugger is active, let
- // the runtime handle this operation, which greatly reduces
- // the complexity here and also avoids a couple of back and
- // forth between JavaScript and C++ land.
- if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
- !promise.HasHandler()) {
- // 7. If promise.[[PromiseIsHandled]] is false, perform
- // HostPromiseRejectionTracker(promise, "reject").
- // We don't try to handle rejecting {promise} without handler
- // here, but we let the C++ code take care of this completely.
- return runtime::RejectPromise(promise, reason, debugEvent);
- }
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ const reactions =
+ UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
- // 2. Let reactions be promise.[[PromiseRejectReactions]].
- const reactions =
- UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise.reactions_or_result = reason;
- // 3. Set promise.[[PromiseResult]] to reason.
- // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
- // 5. Set promise.[[PromiseRejectReactions]] to undefined.
- promise.reactions_or_result = reason;
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ promise.SetStatus(PromiseState::kRejected);
- // 6. Set promise.[[PromiseState]] to "rejected".
- promise.SetStatus(PromiseState::kRejected);
+ // 8. Return TriggerPromiseReactions(reactions, reason).
+ TriggerPromiseReactions(reactions, reason, kPromiseReactionReject);
+ return Undefined;
+}
- // 8. Return TriggerPromiseReactions(reactions, reason).
- TriggerPromiseReactions(reactions, reason, kPromiseReactionReject);
- return Undefined;
- }
+const kPromiseCapabilitySize:
+ constexpr int31 generates 'PromiseCapability::kSize';
+const kPromiseBuiltinsCapabilitiesContextLength: constexpr int31
+ generates 'PromiseBuiltins::kCapabilitiesContextLength';
+const kPromiseBuiltinsCapabilitySlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kCapabilitySlot';
+const kPromiseBuiltinsPromiseSlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kPromiseSlot';
+const kPromiseBuiltinsAlreadyResolvedSlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kAlreadyResolvedSlot';
+const kPromiseBuiltinsDebugEventSlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kDebugEventSlot';
+
+@export
+macro CreatePromiseCapabilitiesExecutorContext(
+ nativeContext: NativeContext, capability: PromiseCapability): Context {
+ const executorContext = AllocateSyntheticFunctionContext(
+ nativeContext, kPromiseBuiltinsCapabilitiesContextLength);
+
+ executorContext[kPromiseBuiltinsCapabilitySlot] = capability;
+ return executorContext;
+}
- const kPromiseCapabilitySize:
- constexpr int31 generates 'PromiseCapability::kSize';
- const kPromiseBuiltinsCapabilitiesContextLength: constexpr int31
- generates 'PromiseBuiltins::kCapabilitiesContextLength';
- const kPromiseBuiltinsCapabilitySlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kCapabilitySlot';
- const kPromiseBuiltinsPromiseSlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kPromiseSlot';
- const kPromiseBuiltinsAlreadyResolvedSlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kAlreadyResolvedSlot';
- const kPromiseBuiltinsDebugEventSlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kDebugEventSlot';
-
- @export
- macro CreatePromiseCapabilitiesExecutorContext(
- nativeContext: NativeContext, capability: PromiseCapability): Context {
- const executorContext = AllocateSyntheticFunctionContext(
- nativeContext, kPromiseBuiltinsCapabilitiesContextLength);
-
- executorContext[kPromiseBuiltinsCapabilitySlot] = capability;
- return executorContext;
- }
+@export
+macro CreatePromiseCapability(
+ promise: JSReceiver|Undefined, resolve: JSFunction|Undefined,
+ reject: JSFunction|Undefined): PromiseCapability {
+ return new PromiseCapability{
+ map: kPromiseCapabilityMap,
+ promise: promise,
+ resolve: resolve,
+ reject: reject
+ };
+}
- @export
- macro CreatePromiseCapability(
- promise: JSReceiver|Undefined, resolve: JSFunction|Undefined,
- reject: JSFunction|Undefined): PromiseCapability {
- return new PromiseCapability{
- map: kPromiseCapabilityMap,
- promise: promise,
- resolve: resolve,
- reject: reject
- };
- }
+@export
+struct PromiseResolvingFunctions {
+ resolve: JSFunction;
+ reject: JSFunction;
+}
- @export
- struct PromiseResolvingFunctions {
- resolve: JSFunction;
- reject: JSFunction;
- }
+@export
+macro CreatePromiseResolvingFunctions(implicit context: Context)(
+ promise: JSPromise, debugEvent: Object, nativeContext: NativeContext):
+ PromiseResolvingFunctions {
+ const promiseContext = CreatePromiseResolvingFunctionsContext(
+ promise, debugEvent, nativeContext);
+ const map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
+ const resolveInfo = PromiseCapabilityDefaultResolveSharedFunConstant();
+
+ const resolve: JSFunction =
+ AllocateFunctionWithMapAndContext(map, resolveInfo, promiseContext);
+ const rejectInfo = PromiseCapabilityDefaultRejectSharedFunConstant();
+ const reject: JSFunction =
+ AllocateFunctionWithMapAndContext(map, rejectInfo, promiseContext);
+ return PromiseResolvingFunctions{resolve: resolve, reject: reject};
+}
- @export
- macro CreatePromiseResolvingFunctions(implicit context: Context)(
- promise: JSPromise, debugEvent: Object, nativeContext: NativeContext):
- PromiseResolvingFunctions {
- const promiseContext = CreatePromiseResolvingFunctionsContext(
- promise, debugEvent, nativeContext);
- const map = UnsafeCast<Map>(
+transitioning macro
+InnerNewPromiseCapability(implicit context: Context)(
+ constructor: HeapObject, debugEvent: Object): PromiseCapability {
+ const nativeContext = LoadNativeContext(context);
+ if (TaggedEqual(
+ constructor,
+ nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX])) {
+ const promise = NewJSPromise();
+
+ const pair =
+ CreatePromiseResolvingFunctions(promise, debugEvent, nativeContext);
+
+ return CreatePromiseCapability(promise, pair.resolve, pair.reject);
+ } else {
+ // We have to create the capability before the associated promise
+ // because the builtin PromiseConstructor uses the executor.
+ const capability = CreatePromiseCapability(Undefined, Undefined, Undefined);
+ const executorContext =
+ CreatePromiseCapabilitiesExecutorContext(nativeContext, capability);
+
+ const executorInfo = PromiseGetCapabilitiesExecutorSharedFunConstant();
+ const functionMap = UnsafeCast<Map>(
nativeContext
[NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
- const resolveInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext[NativeContextSlot::
- PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX]);
- const resolve: JSFunction =
- AllocateFunctionWithMapAndContext(map, resolveInfo, promiseContext);
- const rejectInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext[NativeContextSlot::
- PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX]);
- const reject: JSFunction =
- AllocateFunctionWithMapAndContext(map, rejectInfo, promiseContext);
- return PromiseResolvingFunctions{resolve: resolve, reject: reject};
- }
+ const executor = AllocateFunctionWithMapAndContext(
+ functionMap, executorInfo, executorContext);
- transitioning macro
- InnerNewPromiseCapability(implicit context: Context)(
- constructor: HeapObject, debugEvent: Object): PromiseCapability {
- const nativeContext = LoadNativeContext(context);
- if (TaggedEqual(
- constructor,
- nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX])) {
- const promise = NewJSPromise();
-
- const pair =
- CreatePromiseResolvingFunctions(promise, debugEvent, nativeContext);
-
- return CreatePromiseCapability(promise, pair.resolve, pair.reject);
- } else {
- // We have to create the capability before the associated promise
- // because the builtin PromiseConstructor uses the executor.
- const capability =
- CreatePromiseCapability(Undefined, Undefined, Undefined);
- const executorContext =
- CreatePromiseCapabilitiesExecutorContext(nativeContext, capability);
-
- const executorInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext[NativeContextSlot::
- PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN]);
- const functionMap = UnsafeCast<Map>(
- nativeContext
- [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
- const executor = AllocateFunctionWithMapAndContext(
- functionMap, executorInfo, executorContext);
-
- const promiseConstructor = UnsafeCast<Constructor>(constructor);
- const promise = Construct(promiseConstructor, executor);
- capability.promise = promise;
-
- if (!TaggedIsCallable(capability.resolve) ||
- !TaggedIsCallable(capability.reject)) {
- ThrowTypeError(MessageTemplate::kPromiseNonCallable);
- }
- return capability;
+ const promiseConstructor = UnsafeCast<Constructor>(constructor);
+ const promise = Construct(promiseConstructor, executor);
+ capability.promise = promise;
+
+ if (!Is<Callable>(capability.resolve) || !Is<Callable>(capability.reject)) {
+ ThrowTypeError(MessageTemplate::kPromiseNonCallable);
}
+ return capability;
}
+}
- // https://tc39.es/ecma262/#sec-newpromisecapability
- transitioning builtin
- NewPromiseCapability(implicit context: Context)(
- maybeConstructor: Object, debugEvent: Object): PromiseCapability {
- typeswitch (maybeConstructor) {
- case (Smi): {
+// https://tc39.es/ecma262/#sec-newpromisecapability
+transitioning builtin
+NewPromiseCapability(implicit context: Context)(
+ maybeConstructor: Object, debugEvent: Object): PromiseCapability {
+ typeswitch (maybeConstructor) {
+ case (Smi): {
+ ThrowTypeError(MessageTemplate::kNotConstructor, maybeConstructor);
+ }
+ case (constructor: HeapObject): {
+ if (!IsConstructor(constructor)) {
ThrowTypeError(MessageTemplate::kNotConstructor, maybeConstructor);
}
- case (constructor: HeapObject): {
- if (!IsConstructor(constructor)) {
- ThrowTypeError(MessageTemplate::kNotConstructor, maybeConstructor);
- }
- return InnerNewPromiseCapability(constructor, debugEvent);
- }
+ return InnerNewPromiseCapability(constructor, debugEvent);
}
}
+}
- // https://tc39.es/ecma262/#sec-promise-reject-functions
- transitioning javascript builtin
- PromiseCapabilityDefaultReject(
- js-implicit context: NativeContext,
- receiver: JSAny)(reason: JSAny): JSAny {
- // 2. Let promise be F.[[Promise]].
- const promise = UnsafeCast<JSPromise>(context[kPromiseBuiltinsPromiseSlot]);
-
- // 3. Let alreadyResolved be F.[[AlreadyResolved]].
- const alreadyResolved =
- UnsafeCast<Boolean>(context[kPromiseBuiltinsAlreadyResolvedSlot]);
-
- // 4. If alreadyResolved.[[Value]] is true, return undefined.
- if (alreadyResolved == True) {
- return runtime::PromiseRejectAfterResolved(promise, reason);
- }
+// https://tc39.es/ecma262/#sec-promise-reject-functions
+transitioning javascript builtin
+PromiseCapabilityDefaultReject(
+ js-implicit context: NativeContext, receiver: JSAny)(reason: JSAny): JSAny {
+ // 2. Let promise be F.[[Promise]].
+ const promise = UnsafeCast<JSPromise>(context[kPromiseBuiltinsPromiseSlot]);
- // 5. Set alreadyResolved.[[Value]] to true.
- context[kPromiseBuiltinsAlreadyResolvedSlot] = True;
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ const alreadyResolved =
+ UnsafeCast<Boolean>(context[kPromiseBuiltinsAlreadyResolvedSlot]);
- // 6. Return RejectPromise(promise, reason).
- const debugEvent =
- UnsafeCast<Boolean>(context[kPromiseBuiltinsDebugEventSlot]);
- return RejectPromise(promise, reason, debugEvent);
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ if (alreadyResolved == True) {
+ return runtime::PromiseRejectAfterResolved(promise, reason);
}
- // https://tc39.es/ecma262/#sec-promise-resolve-functions
- transitioning javascript builtin
- PromiseCapabilityDefaultResolve(
- js-implicit context: NativeContext,
- receiver: JSAny)(resolution: JSAny): JSAny {
- // 2. Let promise be F.[[Promise]].
- const promise = UnsafeCast<JSPromise>(context[kPromiseBuiltinsPromiseSlot]);
-
- // 3. Let alreadyResolved be F.[[AlreadyResolved]].
- const alreadyResolved =
- UnsafeCast<Boolean>(context[kPromiseBuiltinsAlreadyResolvedSlot]);
-
- // 4. If alreadyResolved.[[Value]] is true, return undefined.
- if (alreadyResolved == True) {
- return runtime::PromiseResolveAfterResolved(promise, resolution);
- }
+ // 5. Set alreadyResolved.[[Value]] to true.
+ context[kPromiseBuiltinsAlreadyResolvedSlot] = True;
- // 5. Set alreadyResolved.[[Value]] to true.
- context[kPromiseBuiltinsAlreadyResolvedSlot] = True;
+ // 6. Return RejectPromise(promise, reason).
+ const debugEvent =
+ UnsafeCast<Boolean>(context[kPromiseBuiltinsDebugEventSlot]);
+ return RejectPromise(promise, reason, debugEvent);
+}
- // The rest of the logic (and the catch prediction) is
- // encapsulated in the dedicated ResolvePromise builtin.
- return ResolvePromise(context, promise, resolution);
+// https://tc39.es/ecma262/#sec-promise-resolve-functions
+transitioning javascript builtin
+PromiseCapabilityDefaultResolve(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(resolution: JSAny): JSAny {
+ // 2. Let promise be F.[[Promise]].
+ const promise = UnsafeCast<JSPromise>(context[kPromiseBuiltinsPromiseSlot]);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ const alreadyResolved =
+ UnsafeCast<Boolean>(context[kPromiseBuiltinsAlreadyResolvedSlot]);
+
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ if (alreadyResolved == True) {
+ return runtime::PromiseResolveAfterResolved(promise, resolution);
}
- @export
- transitioning macro PerformPromiseThenImpl(implicit context: Context)(
- promise: JSPromise, onFulfilled: Callable|Undefined,
- onRejected: Callable|Undefined,
- resultPromiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
- if (promise.Status() == PromiseState::kPending) {
- // The {promise} is still in "Pending" state, so we just record a new
- // PromiseReaction holding both the onFulfilled and onRejected callbacks.
- // Once the {promise} is resolved we decide on the concrete handler to
- // push onto the microtask queue.
- const handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
- const promiseReactions =
- UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
- const reaction = NewPromiseReaction(
- handlerContext, promiseReactions, resultPromiseOrCapability,
- onFulfilled, onRejected);
- promise.reactions_or_result = reaction;
- } else {
- const reactionsOrResult = promise.reactions_or_result;
- let microtask: PromiseReactionJobTask;
- let handlerContext: Context;
- if (promise.Status() == PromiseState::kFulfilled) {
- handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
- microtask = NewPromiseFulfillReactionJobTask(
- handlerContext, reactionsOrResult, onFulfilled,
+ // 5. Set alreadyResolved.[[Value]] to true.
+ context[kPromiseBuiltinsAlreadyResolvedSlot] = True;
+
+ // The rest of the logic (and the catch prediction) is
+ // encapsulated in the dedicated ResolvePromise builtin.
+ return ResolvePromise(context, promise, resolution);
+}
+
+@export
+transitioning macro PerformPromiseThenImpl(implicit context: Context)(
+ promise: JSPromise, onFulfilled: Callable|Undefined,
+ onRejected: Callable|Undefined,
+ resultPromiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
+ if (promise.Status() == PromiseState::kPending) {
+ // The {promise} is still in "Pending" state, so we just record a new
+ // PromiseReaction holding both the onFulfilled and onRejected callbacks.
+ // Once the {promise} is resolved we decide on the concrete handler to
+ // push onto the microtask queue.
+ const handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
+ const promiseReactions =
+ UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
+ const reaction = NewPromiseReaction(
+ handlerContext, promiseReactions, resultPromiseOrCapability,
+ onFulfilled, onRejected);
+ promise.reactions_or_result = reaction;
+ } else {
+ const reactionsOrResult = promise.reactions_or_result;
+ let microtask: PromiseReactionJobTask;
+ let handlerContext: Context;
+ if (promise.Status() == PromiseState::kFulfilled) {
+ handlerContext = ExtractHandlerContext(onFulfilled, onRejected);
+ microtask = NewPromiseFulfillReactionJobTask(
+ handlerContext, reactionsOrResult, onFulfilled,
+ resultPromiseOrCapability);
+ } else
+ deferred {
+ assert(promise.Status() == PromiseState::kRejected);
+ handlerContext = ExtractHandlerContext(onRejected, onFulfilled);
+ microtask = NewPromiseRejectReactionJobTask(
+ handlerContext, reactionsOrResult, onRejected,
resultPromiseOrCapability);
- } else
- deferred {
- assert(promise.Status() == PromiseState::kRejected);
- handlerContext = ExtractHandlerContext(onRejected, onFulfilled);
- microtask = NewPromiseRejectReactionJobTask(
- handlerContext, reactionsOrResult, onRejected,
- resultPromiseOrCapability);
- if (!promise.HasHandler()) {
- runtime::PromiseRevokeReject(promise);
- }
+ if (!promise.HasHandler()) {
+ runtime::PromiseRevokeReject(promise);
}
- EnqueueMicrotask(handlerContext, microtask);
- }
- promise.SetHasHandler();
+ }
+ EnqueueMicrotask(handlerContext, microtask);
}
+ promise.SetHasHandler();
+}
- // https://tc39.es/ecma262/#sec-performpromisethen
- transitioning builtin
- PerformPromiseThen(implicit context: Context)(
- promise: JSPromise, onFulfilled: Callable|Undefined,
- onRejected: Callable|Undefined,
- resultPromise: JSPromise|Undefined): JSAny {
- PerformPromiseThenImpl(promise, onFulfilled, onRejected, resultPromise);
- return resultPromise;
- }
+// https://tc39.es/ecma262/#sec-performpromisethen
+transitioning builtin
+PerformPromiseThen(implicit context: Context)(
+ promise: JSPromise, onFulfilled: Callable|Undefined,
+ onRejected: Callable|Undefined, resultPromise: JSPromise|Undefined): JSAny {
+ PerformPromiseThenImpl(promise, onFulfilled, onRejected, resultPromise);
+ return resultPromise;
+}
- // https://tc39.es/ecma262/#sec-promise-reject-functions
- transitioning javascript builtin
- PromiseReject(js-implicit context: NativeContext, receiver: JSAny)(
- reason: JSAny): JSAny {
- // 1. Let C be the this value.
- // 2. If Type(C) is not Object, throw a TypeError exception.
- const receiver = Cast<JSReceiver>(receiver) otherwise
- ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'PromiseReject');
-
- const promiseFun = context[NativeContextSlot::PROMISE_FUNCTION_INDEX];
- if (promiseFun == receiver) {
- const promise = NewJSPromise(PromiseState::kRejected, reason);
- runtime::PromiseRejectEventFromStack(promise, reason);
- return promise;
- } else {
- // 3. Let promiseCapability be ? NewPromiseCapability(C).
- const capability = NewPromiseCapability(receiver, True);
-
- // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, Ā« r Ā»).
- const reject = UnsafeCast<Callable>(capability.reject);
- Call(context, reject, Undefined, reason);
-
- // 5. Return promiseCapability.[[Promise]].
- return capability.promise;
- }
- }
+// https://tc39.es/ecma262/#sec-promise-reject-functions
+transitioning javascript builtin
+PromiseReject(
+ js-implicit context: NativeContext, receiver: JSAny)(reason: JSAny): JSAny {
+ // 1. Let C be the this value.
+ // 2. If Type(C) is not Object, throw a TypeError exception.
+ const receiver = Cast<JSReceiver>(receiver) otherwise
+ ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'PromiseReject');
+
+ const promiseFun = context[NativeContextSlot::PROMISE_FUNCTION_INDEX];
+ if (promiseFun == receiver) {
+ const promise = NewJSPromise(PromiseState::kRejected, reason);
+ runtime::PromiseRejectEventFromStack(promise, reason);
+ return promise;
+ } else {
+ // 3. Let promiseCapability be ? NewPromiseCapability(C).
+ const capability = NewPromiseCapability(receiver, True);
- const kPromiseExecutorAlreadyInvoked: constexpr MessageTemplate
- generates 'MessageTemplate::kPromiseExecutorAlreadyInvoked';
-
- // https://tc39.es/ecma262/#sec-getcapabilitiesexecutor-functions
- transitioning javascript builtin
- PromiseGetCapabilitiesExecutor(
- js-implicit context: NativeContext,
- receiver: JSAny)(resolve: JSAny, reject: JSAny): JSAny {
- const capability =
- UnsafeCast<PromiseCapability>(context[kPromiseBuiltinsCapabilitySlot]);
- if (capability.resolve != Undefined || capability.reject != Undefined)
- deferred {
- ThrowTypeError(kPromiseExecutorAlreadyInvoked);
- }
+ // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, Ā« r Ā»).
+ const reject = UnsafeCast<Callable>(capability.reject);
+ Call(context, reject, Undefined, reason);
- capability.resolve = resolve;
- capability.reject = reject;
- return Undefined;
+ // 5. Return promiseCapability.[[Promise]].
+ return capability.promise;
}
+}
- transitioning macro CallResolve(implicit context: Context)(
- constructor: Constructor, resolve: JSAny, value: JSAny): JSAny {
- // Undefined can never be a valid value for the resolve function,
- // instead it is used as a special marker for the fast path.
- if (resolve == Undefined) {
- return PromiseResolve(constructor, value);
- } else
- deferred {
- return Call(context, UnsafeCast<Callable>(resolve), constructor, value);
- }
- }
+const kPromiseExecutorAlreadyInvoked: constexpr MessageTemplate
+ generates 'MessageTemplate::kPromiseExecutorAlreadyInvoked';
+
+// https://tc39.es/ecma262/#sec-getcapabilitiesexecutor-functions
+transitioning javascript builtin
+PromiseGetCapabilitiesExecutor(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ resolve: JSAny, reject: JSAny): JSAny {
+ const capability =
+ UnsafeCast<PromiseCapability>(context[kPromiseBuiltinsCapabilitySlot]);
+ if (capability.resolve != Undefined || capability.reject != Undefined)
+ deferred {
+ ThrowTypeError(kPromiseExecutorAlreadyInvoked);
+ }
- transitioning javascript builtin
- PromiseConstructorLazyDeoptContinuation(
- js-implicit context: NativeContext, receiver: JSAny)(
- promise: JSAny, reject: JSAny, exception: JSAny|TheHole,
- _result: JSAny): JSAny {
- typeswitch (exception) {
- case (TheHole): {
- }
- case (e: JSAny): {
- Call(context, reject, Undefined, e);
- }
+ capability.resolve = resolve;
+ capability.reject = reject;
+ return Undefined;
+}
+
+transitioning macro CallResolve(implicit context: Context)(
+ constructor: Constructor, resolve: JSAny, value: JSAny): JSAny {
+ // Undefined can never be a valid value for the resolve function,
+ // instead it is used as a special marker for the fast path.
+ if (resolve == Undefined) {
+ return PromiseResolve(constructor, value);
+ } else
+ deferred {
+ return Call(context, UnsafeCast<Callable>(resolve), constructor, value);
+ }
+}
+
+transitioning javascript builtin
+PromiseConstructorLazyDeoptContinuation(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ promise: JSAny, reject: JSAny, exception: JSAny|TheHole,
+ _result: JSAny): JSAny {
+ typeswitch (exception) {
+ case (TheHole): {
+ }
+ case (e: JSAny): {
+ Call(context, reject, Undefined, e);
}
- return promise;
}
+ return promise;
+}
+
+extern macro PromiseCapabilityDefaultRejectSharedFunConstant():
+ SharedFunctionInfo;
+extern macro PromiseCapabilityDefaultResolveSharedFunConstant():
+ SharedFunctionInfo;
+extern macro PromiseGetCapabilitiesExecutorSharedFunConstant():
+ SharedFunctionInfo;
}
diff --git a/deps/v8/src/builtins/promise-all-element-closure.tq b/deps/v8/src/builtins/promise-all-element-closure.tq
index c320b24f03..0b870ea3b1 100644
--- a/deps/v8/src/builtins/promise-all-element-closure.tq
+++ b/deps/v8/src/builtins/promise-all-element-closure.tq
@@ -8,182 +8,180 @@
namespace promise {
- struct PromiseAllWrapResultAsFulfilledFunctor {
- macro Call(_nativeContext: NativeContext, value: JSAny): JSAny {
- return value;
- }
+struct PromiseAllWrapResultAsFulfilledFunctor {
+ macro Call(_nativeContext: NativeContext, value: JSAny): JSAny {
+ return value;
}
+}
- struct PromiseAllSettledWrapResultAsFulfilledFunctor {
- transitioning
- macro Call(implicit context: Context)(
- nativeContext: NativeContext, value: JSAny): JSAny {
- // TODO(gsathya): Optimize the creation using a cached map to
- // prevent transitions here.
- // 9. Let obj be ! ObjectCreate(%ObjectPrototype%).
- const objectFunction = UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]);
- const objectFunctionMap =
- UnsafeCast<Map>(objectFunction.prototype_or_initial_map);
- const obj = AllocateJSObjectFromMap(objectFunctionMap);
-
- // 10. Perform ! CreateDataProperty(obj, "status", "fulfilled").
- FastCreateDataProperty(
- obj, StringConstant('status'), StringConstant('fulfilled'));
-
- // 11. Perform ! CreateDataProperty(obj, "value", x).
- FastCreateDataProperty(obj, StringConstant('value'), value);
- return obj;
- }
+struct PromiseAllSettledWrapResultAsFulfilledFunctor {
+ transitioning
+ macro Call(implicit context: Context)(
+ nativeContext: NativeContext, value: JSAny): JSAny {
+ // TODO(gsathya): Optimize the creation using a cached map to
+ // prevent transitions here.
+ // 9. Let obj be ! ObjectCreate(%ObjectPrototype%).
+ const objectFunction = UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]);
+ const objectFunctionMap =
+ UnsafeCast<Map>(objectFunction.prototype_or_initial_map);
+ const obj = AllocateJSObjectFromMap(objectFunctionMap);
+
+ // 10. Perform ! CreateDataProperty(obj, "status", "fulfilled").
+ FastCreateDataProperty(
+ obj, StringConstant('status'), StringConstant('fulfilled'));
+
+ // 11. Perform ! CreateDataProperty(obj, "value", x).
+ FastCreateDataProperty(obj, StringConstant('value'), value);
+ return obj;
}
+}
- struct PromiseAllSettledWrapResultAsRejectedFunctor {
- transitioning
- macro Call(implicit context: Context)(
- nativeContext: NativeContext, value: JSAny): JSAny {
- // TODO(gsathya): Optimize the creation using a cached map to
- // prevent transitions here.
- // 9. Let obj be ! ObjectCreate(%ObjectPrototype%).
- const objectFunction = UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]);
- const objectFunctionMap =
- UnsafeCast<Map>(objectFunction.prototype_or_initial_map);
- const obj = AllocateJSObjectFromMap(objectFunctionMap);
-
- // 10. Perform ! CreateDataProperty(obj, "status", "rejected").
- FastCreateDataProperty(
- obj, StringConstant('status'), StringConstant('rejected'));
-
- // 11. Perform ! CreateDataProperty(obj, "reason", x).
- FastCreateDataProperty(obj, StringConstant('reason'), value);
- return obj;
- }
+struct PromiseAllSettledWrapResultAsRejectedFunctor {
+ transitioning
+ macro Call(implicit context: Context)(
+ nativeContext: NativeContext, value: JSAny): JSAny {
+ // TODO(gsathya): Optimize the creation using a cached map to
+ // prevent transitions here.
+ // 9. Let obj be ! ObjectCreate(%ObjectPrototype%).
+ const objectFunction = UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]);
+ const objectFunctionMap =
+ UnsafeCast<Map>(objectFunction.prototype_or_initial_map);
+ const obj = AllocateJSObjectFromMap(objectFunctionMap);
+
+ // 10. Perform ! CreateDataProperty(obj, "status", "rejected").
+ FastCreateDataProperty(
+ obj, StringConstant('status'), StringConstant('rejected'));
+
+ // 11. Perform ! CreateDataProperty(obj, "reason", x).
+ FastCreateDataProperty(obj, StringConstant('reason'), value);
+ return obj;
}
+}
- extern macro LoadJSReceiverIdentityHash(Object): intptr labels IfNoHash;
+extern macro LoadJSReceiverIdentityHash(Object): intptr labels IfNoHash;
- extern enum PromiseAllResolveElementContextSlots extends int31
- constexpr 'PromiseBuiltins::PromiseAllResolveElementContextSlots' {
- kPromiseAllResolveElementRemainingSlot,
- kPromiseAllResolveElementCapabilitySlot,
- kPromiseAllResolveElementValuesArraySlot,
- kPromiseAllResolveElementLength
- }
- extern operator '[]=' macro StoreContextElement(
- Context, constexpr PromiseAllResolveElementContextSlots, Object): void;
- extern operator '[]' macro LoadContextElement(
- Context, constexpr PromiseAllResolveElementContextSlots): Object;
-
- const kPropertyArrayNoHashSentinel: constexpr int31
- generates 'PropertyArray::kNoHashSentinel';
-
- const kPropertyArrayHashFieldMax: constexpr int31
- generates 'PropertyArray::HashField::kMax';
-
- transitioning macro PromiseAllResolveElementClosure<F: type>(
- implicit context:
- Context)(value: JSAny, function: JSFunction, wrapResultFunctor: F):
- JSAny {
- // We use the {function}s context as the marker to remember whether this
- // resolve element closure was already called. It points to the resolve
- // element context (which is a FunctionContext) until it was called the
- // first time, in which case we make it point to the native context here
- // to mark this resolve element closure as done.
- if (IsNativeContext(context)) deferred {
- return Undefined;
- }
+extern enum PromiseAllResolveElementContextSlots extends int31
+constexpr 'PromiseBuiltins::PromiseAllResolveElementContextSlots' {
+ kPromiseAllResolveElementRemainingSlot,
+ kPromiseAllResolveElementCapabilitySlot,
+ kPromiseAllResolveElementValuesArraySlot,
+ kPromiseAllResolveElementLength
+}
+extern operator '[]=' macro StoreContextElement(
+ Context, constexpr PromiseAllResolveElementContextSlots, Object): void;
+extern operator '[]' macro LoadContextElement(
+ Context, constexpr PromiseAllResolveElementContextSlots): Object;
+
+const kPropertyArrayNoHashSentinel: constexpr int31
+ generates 'PropertyArray::kNoHashSentinel';
+
+const kPropertyArrayHashFieldMax: constexpr int31
+ generates 'PropertyArray::HashField::kMax';
+
+transitioning macro PromiseAllResolveElementClosure<F: type>(
+ implicit context: Context)(
+ value: JSAny, function: JSFunction, wrapResultFunctor: F): JSAny {
+ // We use the {function}s context as the marker to remember whether this
+ // resolve element closure was already called. It points to the resolve
+ // element context (which is a FunctionContext) until it was called the
+ // first time, in which case we make it point to the native context here
+ // to mark this resolve element closure as done.
+ if (IsNativeContext(context)) deferred {
+ return Undefined;
+ }
- assert(
- context.length ==
- PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength);
- const nativeContext = LoadNativeContext(context);
- function.context = nativeContext;
-
- // Update the value depending on whether Promise.all or
- // Promise.allSettled is called.
- const updatedValue = wrapResultFunctor.Call(nativeContext, value);
-
- // Determine the index from the {function}.
- assert(kPropertyArrayNoHashSentinel == 0);
- const identityHash =
- LoadJSReceiverIdentityHash(function) otherwise unreachable;
- assert(identityHash > 0);
- const index = identityHash - 1;
-
- // Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
- const valuesArray = UnsafeCast<JSArray>(
- context[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot]);
- const elements = UnsafeCast<FixedArray>(valuesArray.elements);
- const valuesLength = Convert<intptr>(valuesArray.length);
- if (index < valuesLength) {
- // The {index} is in bounds of the {values_array},
- // just store the {value} and continue.
+ assert(
+ context.length ==
+ PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength);
+ const nativeContext = LoadNativeContext(context);
+ function.context = nativeContext;
+
+ // Update the value depending on whether Promise.all or
+ // Promise.allSettled is called.
+ const updatedValue = wrapResultFunctor.Call(nativeContext, value);
+
+ // Determine the index from the {function}.
+ assert(kPropertyArrayNoHashSentinel == 0);
+ const identityHash =
+ LoadJSReceiverIdentityHash(function) otherwise unreachable;
+ assert(identityHash > 0);
+ const index = identityHash - 1;
+
+ // Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
+ const valuesArray = UnsafeCast<JSArray>(
+ context[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementValuesArraySlot]);
+ const elements = UnsafeCast<FixedArray>(valuesArray.elements);
+ const valuesLength = Convert<intptr>(valuesArray.length);
+ if (index < valuesLength) {
+ // The {index} is in bounds of the {values_array},
+ // just store the {value} and continue.
+ elements.objects[index] = updatedValue;
+ } else {
+ // Check if we need to grow the backing store.
+ const newLength = index + 1;
+ const elementsLength = elements.length_intptr;
+ if (index < elementsLength) {
+ // The {index} is within bounds of the {elements} backing store, so
+ // just store the {value} and update the "length" of the {values_array}.
+ valuesArray.length = Convert<Smi>(newLength);
elements.objects[index] = updatedValue;
- } else {
- // Check if we need to grow the backing store.
- const newLength = index + 1;
- const elementsLength = elements.length_intptr;
- if (index < elementsLength) {
- // The {index} is within bounds of the {elements} backing store, so
- // just store the {value} and update the "length" of the {values_array}.
+ } else
+ deferred {
+ // We need to grow the backing store to fit the {index} as well.
+ const newElementsLength = IntPtrMin(
+ CalculateNewElementsCapacity(newLength),
+ kPropertyArrayHashFieldMax + 1);
+ assert(index < newElementsLength);
+ assert(elementsLength < newElementsLength);
+ const newElements =
+ ExtractFixedArray(elements, 0, elementsLength, newElementsLength);
+ newElements.objects[index] = updatedValue;
+
+ // Update backing store and "length" on {values_array}.
+ valuesArray.elements = newElements;
valuesArray.length = Convert<Smi>(newLength);
- elements.objects[index] = updatedValue;
- } else
- deferred {
- // We need to grow the backing store to fit the {index} as well.
- const newElementsLength = IntPtrMin(
- CalculateNewElementsCapacity(newLength),
- kPropertyArrayHashFieldMax + 1);
- assert(index < newElementsLength);
- assert(elementsLength < newElementsLength);
- const newElements =
- ExtractFixedArray(elements, 0, elementsLength, newElementsLength);
- newElements.objects[index] = updatedValue;
-
- // Update backing store and "length" on {values_array}.
- valuesArray.elements = newElements;
- valuesArray.length = Convert<Smi>(newLength);
- }
- }
- let remainingElementsCount =
- UnsafeCast<Smi>(context[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot]);
- remainingElementsCount = remainingElementsCount - 1;
- context[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot] =
- remainingElementsCount;
- if (remainingElementsCount == 0) {
- const capability = UnsafeCast<PromiseCapability>(
- context[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementCapabilitySlot]);
- const resolve = UnsafeCast<JSAny>(capability.resolve);
- Call(context, resolve, Undefined, valuesArray);
- }
- return Undefined;
+ }
}
-
- transitioning javascript builtin
- PromiseAllResolveElementClosure(
- js-implicit context: Context, receiver: JSAny,
- target: JSFunction)(value: JSAny): JSAny {
- return PromiseAllResolveElementClosure(
- value, target, PromiseAllWrapResultAsFulfilledFunctor{});
+ let remainingElementsCount =
+ UnsafeCast<Smi>(context[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot]);
+ remainingElementsCount = remainingElementsCount - 1;
+ context[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot] = remainingElementsCount;
+ if (remainingElementsCount == 0) {
+ const capability = UnsafeCast<PromiseCapability>(
+ context[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementCapabilitySlot]);
+ const resolve = UnsafeCast<JSAny>(capability.resolve);
+ Call(context, resolve, Undefined, valuesArray);
}
+ return Undefined;
+}
- transitioning javascript builtin
- PromiseAllSettledResolveElementClosure(
- js-implicit context: Context, receiver: JSAny,
- target: JSFunction)(value: JSAny): JSAny {
- return PromiseAllResolveElementClosure(
- value, target, PromiseAllSettledWrapResultAsFulfilledFunctor{});
- }
+transitioning javascript builtin
+PromiseAllResolveElementClosure(
+ js-implicit context: Context, receiver: JSAny,
+ target: JSFunction)(value: JSAny): JSAny {
+ return PromiseAllResolveElementClosure(
+ value, target, PromiseAllWrapResultAsFulfilledFunctor{});
+}
- transitioning javascript builtin
- PromiseAllSettledRejectElementClosure(
- js-implicit context: Context, receiver: JSAny,
- target: JSFunction)(value: JSAny): JSAny {
- return PromiseAllResolveElementClosure(
- value, target, PromiseAllSettledWrapResultAsRejectedFunctor{});
- }
+transitioning javascript builtin
+PromiseAllSettledResolveElementClosure(
+ js-implicit context: Context, receiver: JSAny,
+ target: JSFunction)(value: JSAny): JSAny {
+ return PromiseAllResolveElementClosure(
+ value, target, PromiseAllSettledWrapResultAsFulfilledFunctor{});
+}
+
+transitioning javascript builtin
+PromiseAllSettledRejectElementClosure(
+ js-implicit context: Context, receiver: JSAny,
+ target: JSFunction)(value: JSAny): JSAny {
+ return PromiseAllResolveElementClosure(
+ value, target, PromiseAllSettledWrapResultAsRejectedFunctor{});
+}
}
diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq
index 19a16d8da8..b7fad88f6f 100644
--- a/deps/v8/src/builtins/promise-all.tq
+++ b/deps/v8/src/builtins/promise-all.tq
@@ -6,165 +6,160 @@
#include 'src/builtins/builtins-promise-gen.h'
namespace promise {
- const kPromiseBuiltinsPromiseContextLength: constexpr int31
- generates 'PromiseBuiltins::kPromiseContextLength';
-
- // Creates the context used by all Promise.all resolve element closures,
- // together with the values array. Since all closures for a single Promise.all
- // call use the same context, we need to store the indices for the individual
- // closures somewhere else (we put them into the identity hash field of the
- // closures), and we also need to have a separate marker for when the closure
- // was called already (we slap the native context onto the closure in that
- // case to mark it's done).
- macro CreatePromiseAllResolveElementContext(implicit context: Context)(
- capability: PromiseCapability, nativeContext: NativeContext): Context {
- // TODO(bmeurer): Manually fold this into a single allocation.
- const arrayMap = UnsafeCast<Map>(
- nativeContext[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]);
- const valuesArray = AllocateJSArray(
- ElementsKind::PACKED_ELEMENTS, arrayMap, IntPtrConstant(0),
- SmiConstant(0));
- const resolveContext = AllocateSyntheticFunctionContext(
- nativeContext,
- PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength);
- resolveContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot] = SmiConstant(1);
- resolveContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementCapabilitySlot] = capability;
- resolveContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot] = valuesArray;
- return resolveContext;
- }
+const kPromiseBuiltinsPromiseContextLength: constexpr int31
+ generates 'PromiseBuiltins::kPromiseContextLength';
+
+// Creates the context used by all Promise.all resolve element closures,
+// together with the values array. Since all closures for a single Promise.all
+// call use the same context, we need to store the indices for the individual
+// closures somewhere else (we put them into the identity hash field of the
+// closures), and we also need to have a separate marker for when the closure
+// was called already (we slap the native context onto the closure in that
+// case to mark it's done).
+macro CreatePromiseAllResolveElementContext(implicit context: Context)(
+ capability: PromiseCapability, nativeContext: NativeContext): Context {
+ // TODO(bmeurer): Manually fold this into a single allocation.
+ const arrayMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]);
+ const valuesArray = AllocateJSArray(
+ ElementsKind::PACKED_ELEMENTS, arrayMap, IntPtrConstant(0),
+ SmiConstant(0));
+ const resolveContext = AllocateSyntheticFunctionContext(
+ nativeContext,
+ PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength);
+ resolveContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot] = SmiConstant(1);
+ resolveContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementCapabilitySlot] = capability;
+ resolveContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementValuesArraySlot] = valuesArray;
+ return resolveContext;
+}
- macro CreatePromiseAllResolveElementFunction(implicit context: Context)(
- resolveElementContext: Context, index: Smi, nativeContext: NativeContext,
- slotIndex: constexpr NativeContextSlot): JSFunction {
- assert(index > 0);
- assert(index < kPropertyArrayHashFieldMax);
-
- const map = UnsafeCast<Map>(
- nativeContext
- [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
- const resolveInfo =
- UnsafeCast<SharedFunctionInfo>(nativeContext[slotIndex]);
- const resolve = AllocateFunctionWithMapAndContext(
- map, resolveInfo, resolveElementContext);
-
- assert(kPropertyArrayNoHashSentinel == 0);
- resolve.properties_or_hash = index;
- return resolve;
- }
+macro CreatePromiseAllResolveElementFunction(implicit context: Context)(
+ resolveElementContext: Context, index: Smi, nativeContext: NativeContext,
+ resolveFunction: SharedFunctionInfo): JSFunction {
+ assert(index > 0);
+ assert(index < kPropertyArrayHashFieldMax);
+
+ const map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
+ const resolve = AllocateFunctionWithMapAndContext(
+ map, resolveFunction, resolveElementContext);
+
+ assert(kPropertyArrayNoHashSentinel == 0);
+ resolve.properties_or_hash = index;
+ return resolve;
+}
- @export
- macro CreatePromiseResolvingFunctionsContext(implicit context: Context)(
- promise: JSPromise, debugEvent: Object, nativeContext: NativeContext):
- Context {
- const resolveContext = AllocateSyntheticFunctionContext(
- nativeContext, kPromiseBuiltinsPromiseContextLength);
- resolveContext[kPromiseBuiltinsPromiseSlot] = promise;
- resolveContext[kPromiseBuiltinsAlreadyResolvedSlot] = False;
- resolveContext[kPromiseBuiltinsDebugEventSlot] = debugEvent;
- return resolveContext;
- }
+@export
+macro CreatePromiseResolvingFunctionsContext(implicit context: Context)(
+ promise: JSPromise, debugEvent: Object, nativeContext: NativeContext):
+ Context {
+ const resolveContext = AllocateSyntheticFunctionContext(
+ nativeContext, kPromiseBuiltinsPromiseContextLength);
+ resolveContext[kPromiseBuiltinsPromiseSlot] = promise;
+ resolveContext[kPromiseBuiltinsAlreadyResolvedSlot] = False;
+ resolveContext[kPromiseBuiltinsDebugEventSlot] = debugEvent;
+ return resolveContext;
+}
- macro IsPromiseThenLookupChainIntact(implicit context: Context)(
- nativeContext: NativeContext, receiverMap: Map): bool {
- if (IsForceSlowPath()) return false;
- if (!IsJSPromiseMap(receiverMap)) return false;
- if (receiverMap.prototype !=
- nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX])
- return false;
- return !IsPromiseThenProtectorCellInvalid();
- }
+macro IsPromiseThenLookupChainIntact(implicit context: Context)(
+ nativeContext: NativeContext, receiverMap: Map): bool {
+ if (IsForceSlowPath()) return false;
+ if (!IsJSPromiseMap(receiverMap)) return false;
+ if (receiverMap.prototype !=
+ nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX])
+ return false;
+ return !IsPromiseThenProtectorCellInvalid();
+}
- struct PromiseAllResolveElementFunctor {
- macro Call(implicit context: Context)(
- resolveElementContext: Context, nativeContext: NativeContext,
- index: Smi, _capability: PromiseCapability): Callable {
- return CreatePromiseAllResolveElementFunction(
- resolveElementContext, index, nativeContext,
- NativeContextSlot::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
- }
+struct PromiseAllResolveElementFunctor {
+ macro Call(implicit context: Context)(
+ resolveElementContext: Context, nativeContext: NativeContext, index: Smi,
+ _capability: PromiseCapability): Callable {
+ return CreatePromiseAllResolveElementFunction(
+ resolveElementContext, index, nativeContext,
+ PromiseAllResolveElementSharedFunConstant());
}
+}
- struct PromiseAllRejectElementFunctor {
- macro Call(implicit context: Context)(
- _resolveElementContext: Context, _nativeContext: NativeContext,
- _index: Smi, capability: PromiseCapability): Callable {
- return UnsafeCast<Callable>(capability.reject);
- }
+struct PromiseAllRejectElementFunctor {
+ macro Call(implicit context: Context)(
+ _resolveElementContext: Context, _nativeContext: NativeContext,
+ _index: Smi, capability: PromiseCapability): Callable {
+ return UnsafeCast<Callable>(capability.reject);
}
+}
- struct PromiseAllSettledResolveElementFunctor {
- macro Call(implicit context: Context)(
- resolveElementContext: Context, nativeContext: NativeContext,
- index: Smi, _capability: PromiseCapability): Callable {
- return CreatePromiseAllResolveElementFunction(
- resolveElementContext, index, nativeContext,
- NativeContextSlot::PROMISE_ALL_SETTLED_RESOLVE_ELEMENT_SHARED_FUN);
- }
+struct PromiseAllSettledResolveElementFunctor {
+ macro Call(implicit context: Context)(
+ resolveElementContext: Context, nativeContext: NativeContext, index: Smi,
+ _capability: PromiseCapability): Callable {
+ return CreatePromiseAllResolveElementFunction(
+ resolveElementContext, index, nativeContext,
+ PromiseAllSettledResolveElementSharedFunConstant());
}
+}
- struct PromiseAllSettledRejectElementFunctor {
- macro Call(implicit context: Context)(
- resolveElementContext: Context, nativeContext: NativeContext,
- index: Smi, _capability: PromiseCapability): Callable {
- return CreatePromiseAllResolveElementFunction(
- resolveElementContext, index, nativeContext,
- NativeContextSlot::PROMISE_ALL_SETTLED_REJECT_ELEMENT_SHARED_FUN);
- }
+struct PromiseAllSettledRejectElementFunctor {
+ macro Call(implicit context: Context)(
+ resolveElementContext: Context, nativeContext: NativeContext, index: Smi,
+ _capability: PromiseCapability): Callable {
+ return CreatePromiseAllResolveElementFunction(
+ resolveElementContext, index, nativeContext,
+ PromiseAllSettledRejectElementSharedFunConstant());
}
+}
- transitioning macro PerformPromiseAll<F1: type, F2: type>(implicit context:
- Context)(
- constructor: JSReceiver, capability: PromiseCapability,
- iter: iterator::IteratorRecord, createResolveElementFunctor: F1,
- createRejectElementFunctor: F2): JSAny labels Reject(Object) {
- const nativeContext = LoadNativeContext(context);
- const promise = capability.promise;
- const resolve = capability.resolve;
- const reject = capability.reject;
-
- // For catch prediction, don't treat the .then calls as handling it;
- // instead, recurse outwards.
- if (IsDebugActive()) deferred {
- SetPropertyStrict(
- context, reject, kPromiseForwardingHandlerSymbol, True);
- }
+transitioning macro PerformPromiseAll<F1: type, F2: type>(
+ implicit context: Context)(
+ constructor: JSReceiver, capability: PromiseCapability,
+ iter: iterator::IteratorRecord, createResolveElementFunctor: F1,
+ createRejectElementFunctor: F2): JSAny labels
+Reject(Object) {
+ const nativeContext = LoadNativeContext(context);
+ const promise = capability.promise;
+ const resolve = capability.resolve;
+ const reject = capability.reject;
+
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ if (IsDebugActive()) deferred {
+ SetPropertyStrict(context, reject, kPromiseForwardingHandlerSymbol, True);
+ }
- const resolveElementContext =
- CreatePromiseAllResolveElementContext(capability, nativeContext);
+ const resolveElementContext =
+ CreatePromiseAllResolveElementContext(capability, nativeContext);
- let index: Smi = 1;
+ let index: Smi = 1;
- // We can skip the "resolve" lookup on {constructor} if it's the
- // Promise constructor and the Promise.resolve protector is intact,
- // as that guards the lookup path for the "resolve" property on the
- // Promise constructor.
- let promiseResolveFunction: JSAny = Undefined;
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ let promiseResolveFunction: JSAny = Undefined;
+ try {
try {
- try {
- if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) {
- // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
- let promiseResolve: JSAny;
- try {
- promiseResolve = GetProperty(constructor, kResolveString);
- } catch (e) deferred {
- iterator::IteratorCloseOnException(iter, e) otherwise Reject;
- }
-
- // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
- // exception.
- promiseResolveFunction = Cast<Callable>(promiseResolve)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledNonCallable, 'resolve');
- }
+ if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) {
+ let promiseResolve: JSAny;
+
+ // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
+ promiseResolve = GetProperty(constructor, kResolveString);
- const fastIteratorResultMap = UnsafeCast<Map>(
- nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
- while (true) {
- let nextValue: JSAny;
+ // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
+ // exception.
+ promiseResolveFunction =
+ Cast<Callable>(promiseResolve) otherwise ThrowTypeError(
+ MessageTemplate::kCalledNonCallable, 'resolve');
+ }
+ const fastIteratorResultMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
+ while (true) {
+ let nextValue: JSAny;
+ try {
// Let next be IteratorStep(iteratorRecord.[[Iterator]]).
// If next is an abrupt completion, set iteratorRecord.[[Done]] to
// true. ReturnIfAbrupt(next).
@@ -176,209 +171,207 @@ namespace promise {
// to true.
// ReturnIfAbrupt(nextValue).
nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
+ } catch (e) {
+ goto Reject(e);
+ }
- // Check if we reached the limit.
- if (index == kPropertyArrayHashFieldMax) {
- // If there are too many elements (currently more than 2**21-1),
- // raise a RangeError here (which is caught directly and turned into
- // a rejection) of the resulting promise. We could gracefully handle
- // this case as well and support more than this number of elements
- // by going to a separate function and pass the larger indices via a
- // separate context, but it doesn't seem likely that we need this,
- // and it's unclear how the rest of the system deals with 2**21 live
- // Promises anyways.
- try {
- ThrowRangeError(MessageTemplate::kTooManyElementsInPromiseAll);
- } catch (e) deferred {
- iterator::IteratorCloseOnException(iter, e) otherwise Reject;
- }
- }
-
- // Set remainingElementsCount.[[Value]] to
- // remainingElementsCount.[[Value]] + 1.
- const remainingElementsCount =
- UnsafeCast<Smi>(resolveElementContext
- [PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot]);
- resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot] =
- remainingElementsCount + 1;
-
- // Let resolveElement be CreateBuiltinFunction(steps,
- // Ā« [[AlreadyCalled]],
- // [[Index]],
- // [[Values]],
- // [[Capability]],
- // [[RemainingElements]]
- // Ā»).
- // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false
- // }. Set resolveElement.[[Index]] to index. Set
- // resolveElement.[[Values]] to values. Set
- // resolveElement.[[Capability]] to resultCapability. Set
- // resolveElement.[[RemainingElements]] to remainingElementsCount.
- const resolveElementFun = createResolveElementFunctor.Call(
- resolveElementContext, nativeContext, index, capability);
- const rejectElementFun = createRejectElementFunctor.Call(
- resolveElementContext, nativeContext, index, capability);
-
- // We can skip the "resolve" lookup on the {constructor} as well as
- // the "then" lookup on the result of the "resolve" call, and
- // immediately chain continuation onto the {next_value} if:
- //
- // (a) The {constructor} is the intrinsic %Promise% function, and
- // looking up "resolve" on {constructor} yields the initial
- // Promise.resolve() builtin, and
- // (b) the promise @@species protector cell is valid, meaning that
- // no one messed with the Symbol.species property on any
- // intrinsic promise or on the Promise.prototype, and
- // (c) the {next_value} is a JSPromise whose [[Prototype]] field
- // contains the intrinsic %PromisePrototype%, and
- // (d) we're not running with async_hooks or DevTools enabled.
- //
- // In that case we also don't need to allocate a chained promise for
- // the PromiseReaction (aka we can pass undefined to
- // PerformPromiseThen), since this is only necessary for DevTools and
- // PromiseHooks.
- if (promiseResolveFunction != Undefined ||
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
- IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
- !IsPromiseThenLookupChainIntact(
- nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
- try {
- // Let nextPromise be ? Call(constructor, _promiseResolve_, Ā«
- // nextValue Ā»).
- const nextPromise = CallResolve(
- UnsafeCast<Constructor>(constructor), promiseResolveFunction,
- nextValue);
-
- // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
- // resultCapability.[[Reject]] Ā»).
- const then = GetProperty(nextPromise, kThenString);
- const thenResult = Call(
- nativeContext, then, nextPromise, resolveElementFun,
- rejectElementFun);
-
- // For catch prediction, mark that rejections here are
- // semantically handled by the combined Promise.
- if (IsDebugActive() && Is<JSPromise>(thenResult)) deferred {
- SetPropertyStrict(
- context, thenResult, kPromiseHandledBySymbol, promise);
- }
- } catch (e) deferred {
- iterator::IteratorCloseOnException(iter, e) otherwise Reject;
+ // Check if we reached the limit.
+ if (index == kPropertyArrayHashFieldMax) {
+ // If there are too many elements (currently more than 2**21-1),
+ // raise a RangeError here (which is caught below and turned into
+ // a rejection of the resulting promise). We could gracefully handle
+ // this case as well and support more than this number of elements
+ // by going to a separate function and pass the larger indices via a
+ // separate context, but it doesn't seem likely that we need this,
+ // and it's unclear how the rest of the system deals with 2**21 live
+ // Promises anyway.
+ ThrowRangeError(
+ MessageTemplate::kTooManyElementsInPromiseCombinator, 'all');
+ }
+
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] + 1.
+ const remainingElementsCount = UnsafeCast<Smi>(
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot]);
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot] =
+ remainingElementsCount + 1;
+
+ // Let resolveElement be CreateBuiltinFunction(steps,
+ // Ā« [[AlreadyCalled]],
+ // [[Index]],
+ // [[Values]],
+ // [[Capability]],
+ // [[RemainingElements]]
+ // Ā»).
+ // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false
+ // }. Set resolveElement.[[Index]] to index. Set
+ // resolveElement.[[Values]] to values. Set
+ // resolveElement.[[Capability]] to resultCapability. Set
+ // resolveElement.[[RemainingElements]] to remainingElementsCount.
+ const resolveElementFun = createResolveElementFunctor.Call(
+ resolveElementContext, nativeContext, index, capability);
+ const rejectElementFun = createRejectElementFunctor.Call(
+ resolveElementContext, nativeContext, index, capability);
+
+ // We can skip the "resolve" lookup on the {constructor} as well as
+ // the "then" lookup on the result of the "resolve" call, and
+ // immediately chain continuation onto the {next_value} if:
+ //
+ // (a) The {constructor} is the intrinsic %Promise% function, and
+ // looking up "resolve" on {constructor} yields the initial
+ // Promise.resolve() builtin, and
+ // (b) the promise @@species protector cell is valid, meaning that
+ // no one messed with the Symbol.species property on any
+ // intrinsic promise or on the Promise.prototype, and
+ // (c) the {next_value} is a JSPromise whose [[Prototype]] field
+ // contains the intrinsic %PromisePrototype%, and
+ // (d) we're not running with async_hooks or DevTools enabled.
+ //
+ // In that case we also don't need to allocate a chained promise for
+ // the PromiseReaction (aka we can pass undefined to
+ // PerformPromiseThen), since this is only necessary for DevTools and
+ // PromiseHooks.
+ if (promiseResolveFunction != Undefined ||
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
+ !IsPromiseThenLookupChainIntact(
+ nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
+ // Let nextPromise be ? Call(constructor, _promiseResolve_, Ā«
+ // nextValue Ā»).
+ const nextPromise = CallResolve(
+ UnsafeCast<Constructor>(constructor), promiseResolveFunction,
+ nextValue);
+
+ // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
+ // resultCapability.[[Reject]] Ā»).
+ const then = GetProperty(nextPromise, kThenString);
+ const thenResult = Call(
+ nativeContext, then, nextPromise, resolveElementFun,
+ rejectElementFun);
+
+ // For catch prediction, mark that rejections here are
+ // semantically handled by the combined Promise.
+ if (IsDebugActive() && Is<JSPromise>(thenResult)) deferred {
+ SetPropertyStrict(
+ context, thenResult, kPromiseHandledBySymbol, promise);
}
- } else {
- PerformPromiseThenImpl(
- UnsafeCast<JSPromise>(nextValue), resolveElementFun,
- rejectElementFun, Undefined);
- }
-
- // Set index to index + 1.
- index += 1;
+ } else {
+ PerformPromiseThenImpl(
+ UnsafeCast<JSPromise>(nextValue), resolveElementFun,
+ rejectElementFun, Undefined);
}
+
+ // Set index to index + 1.
+ index += 1;
}
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(iter);
+ goto Reject(e);
}
- label Done {}
-
- // Set iteratorRecord.[[Done]] to true.
- // Set remainingElementsCount.[[Value]] to
- // remainingElementsCount.[[Value]] - 1.
- let remainingElementsCount = UnsafeCast<Smi>(
+ } label Done {}
+
+ // Set iteratorRecord.[[Done]] to true.
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] - 1.
+ let remainingElementsCount = UnsafeCast<Smi>(
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot]);
+ remainingElementsCount -= 1;
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot] =
+ remainingElementsCount;
+ if (remainingElementsCount > 0) {
+ // Pre-allocate the backing store for the {values_array} to the desired
+ // capacity here. We may already have elements here in case of some
+ // fancy Thenable that calls the resolve callback immediately, so we need
+ // to handle that correctly here.
+ const valuesArray = UnsafeCast<JSArray>(
resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot]);
- remainingElementsCount -= 1;
- resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot] =
- remainingElementsCount;
- if (remainingElementsCount > 0) {
- // Pre-allocate the backing store for the {values_array} to the desired
- // capacity here. We may already have elements here in case of some
- // fancy Thenable that calls the resolve callback immediately, so we need
- // to handle that correctly here.
- const valuesArray = UnsafeCast<JSArray>(
+ kPromiseAllResolveElementValuesArraySlot]);
+ const oldElements = UnsafeCast<FixedArray>(valuesArray.elements);
+ const oldCapacity = oldElements.length_intptr;
+ const newCapacity = SmiUntag(index);
+ if (oldCapacity < newCapacity) {
+ valuesArray.elements =
+ ExtractFixedArray(oldElements, 0, oldCapacity, newCapacity);
+ }
+ } else
+ deferred {
+ // If remainingElementsCount.[[Value]] is 0, then
+ // Let valuesArray be CreateArrayFromList(values).
+ // Perform ? Call(resultCapability.[[Resolve]], undefined,
+ // Ā« valuesArray Ā»).
+ assert(remainingElementsCount == 0);
+ const valuesArray = UnsafeCast<JSAny>(
resolveElementContext[PromiseAllResolveElementContextSlots::
kPromiseAllResolveElementValuesArraySlot]);
- const oldElements = UnsafeCast<FixedArray>(valuesArray.elements);
- const oldCapacity = oldElements.length_intptr;
- const newCapacity = SmiUntag(index);
- if (oldCapacity < newCapacity) {
- valuesArray.elements =
- ExtractFixedArray(oldElements, 0, oldCapacity, newCapacity);
- }
- } else
- deferred {
- // If remainingElementsCount.[[Value]] is 0, then
- // Let valuesArray be CreateArrayFromList(values).
- // Perform ? Call(resultCapability.[[Resolve]], undefined,
- // Ā« valuesArray Ā»).
- assert(remainingElementsCount == 0);
- const valuesArray = UnsafeCast<JSAny>(
- resolveElementContext
- [PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot]);
- Call(nativeContext, UnsafeCast<JSAny>(resolve), Undefined, valuesArray);
- }
-
- // Return resultCapability.[[Promise]].
- return promise;
- }
+ Call(nativeContext, UnsafeCast<JSAny>(resolve), Undefined, valuesArray);
+ }
- transitioning macro GeneratePromiseAll<F1: type, F2: type>(implicit context:
- Context)(
- receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1,
- createRejectElementFunctor: F2): JSAny {
- // Let C be the this value.
- // If Type(C) is not Object, throw a TypeError exception.
- const receiver = Cast<JSReceiver>(receiver)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Promise.all');
-
- // Let promiseCapability be ? NewPromiseCapability(C).
- // Don't fire debugEvent so that forwarding the rejection through all does
- // not trigger redundant ExceptionEvents
- const capability = NewPromiseCapability(receiver, False);
+ // Return resultCapability.[[Promise]].
+ return promise;
+}
- try {
- try {
- // Let iterator be GetIterator(iterable).
- // IfAbruptRejectPromise(iterator, promiseCapability).
- let i = iterator::GetIterator(iterable);
-
- // Let result be PerformPromiseAll(iteratorRecord, C,
- // promiseCapability). If result is an abrupt completion, then
- // If iteratorRecord.[[Done]] is false, let result be
- // IteratorClose(iterator, result).
- // IfAbruptRejectPromise(result, promiseCapability).
- return PerformPromiseAll(
- receiver, capability, i, createResolveElementFunctor,
- createRejectElementFunctor) otherwise Reject;
- } catch (e) deferred {
- goto Reject(e);
- }
- }
- label Reject(e: Object) deferred {
- // Exception must be bound to a JS value.
- const e = UnsafeCast<JSAny>(e);
- const reject = UnsafeCast<JSAny>(capability.reject);
- Call(context, reject, Undefined, e);
- return capability.promise;
- }
+transitioning macro GeneratePromiseAll<F1: type, F2: type>(
+ implicit context: Context)(
+ receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1,
+ createRejectElementFunctor: F2): JSAny {
+ // Let C be the this value.
+ // If Type(C) is not Object, throw a TypeError exception.
+ const receiver = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.all');
+
+ // Let promiseCapability be ? NewPromiseCapability(C).
+ // Don't fire debugEvent so that forwarding the rejection through all does
+ // not trigger redundant ExceptionEvents
+ const capability = NewPromiseCapability(receiver, False);
+
+ try {
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
+ let i = iterator::GetIterator(iterable);
+
+ // Let result be PerformPromiseAll(iteratorRecord, C,
+ // promiseCapability). If result is an abrupt completion, then
+ // If iteratorRecord.[[Done]] is false, let result be
+ // IteratorClose(iterator, result).
+ // IfAbruptRejectPromise(result, promiseCapability).
+ return PerformPromiseAll(
+ receiver, capability, i, createResolveElementFunctor,
+ createRejectElementFunctor) otherwise Reject;
+ } catch (e) deferred {
+ goto Reject(e);
+ } label Reject(e: Object) deferred {
+ // Exception must be bound to a JS value.
+ const e = UnsafeCast<JSAny>(e);
+ const reject = UnsafeCast<JSAny>(capability.reject);
+ Call(context, reject, Undefined, e);
+ return capability.promise;
}
+}
- // ES#sec-promise.all
- transitioning javascript builtin PromiseAll(
- js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
- return GeneratePromiseAll(
- receiver, iterable, PromiseAllResolveElementFunctor{},
- PromiseAllRejectElementFunctor{});
- }
+// ES#sec-promise.all
+transitioning javascript builtin PromiseAll(
+ js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
+ return GeneratePromiseAll(
+ receiver, iterable, PromiseAllResolveElementFunctor{},
+ PromiseAllRejectElementFunctor{});
+}
- // ES#sec-promise.allsettled
- // Promise.allSettled ( iterable )
- transitioning javascript builtin PromiseAllSettled(
- js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
- return GeneratePromiseAll(
- receiver, iterable, PromiseAllSettledResolveElementFunctor{},
- PromiseAllSettledRejectElementFunctor{});
- }
+// ES#sec-promise.allsettled
+// Promise.allSettled ( iterable )
+transitioning javascript builtin PromiseAllSettled(
+ js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
+ return GeneratePromiseAll(
+ receiver, iterable, PromiseAllSettledResolveElementFunctor{},
+ PromiseAllSettledRejectElementFunctor{});
+}
+
+extern macro PromiseAllResolveElementSharedFunConstant(): SharedFunctionInfo;
+extern macro PromiseAllSettledRejectElementSharedFunConstant():
+ SharedFunctionInfo;
+extern macro PromiseAllSettledResolveElementSharedFunConstant():
+ SharedFunctionInfo;
}
diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq
new file mode 100644
index 0000000000..1046ed0a89
--- /dev/null
+++ b/deps/v8/src/builtins/promise-any.tq
@@ -0,0 +1,372 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-promise-gen.h'
+
+namespace promise {
+extern enum PromiseAnyRejectElementContextSlots extends int31
+constexpr 'PromiseBuiltins::PromiseAnyRejectElementContextSlots' {
+ kPromiseAnyRejectElementRemainingSlot,
+ kPromiseAnyRejectElementCapabilitySlot,
+ kPromiseAnyRejectElementErrorsArraySlot,
+ kPromiseAnyRejectElementLength
+}
+
+extern operator '[]=' macro StoreContextElement(
+ Context, constexpr PromiseAnyRejectElementContextSlots, Object): void;
+extern operator '[]' macro LoadContextElement(
+ Context, constexpr PromiseAnyRejectElementContextSlots): Object;
+
+// Creates the context used by all Promise.any reject element closures,
+// together with the errors array. Since all closures for a single Promise.any
+// call use the same context, we need to store the indices for the individual
+// closures somewhere else (we put them into the identity hash field of the
+// closures), and we also need to have a separate marker for when the closure
+// was called already (we slap the native context onto the closure in that
+// case to mark it's done). See Promise.all which uses the same approach.
+transitioning macro CreatePromiseAnyRejectElementContext(
+ implicit context: Context)(
+ capability: PromiseCapability, nativeContext: NativeContext): Context {
+ const rejectContext = AllocateSyntheticFunctionContext(
+ nativeContext,
+ PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength);
+ rejectContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot] = SmiConstant(1);
+ rejectContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementCapabilitySlot] = capability;
+ // Will be set later.
+ rejectContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsArraySlot] = Undefined;
+ return rejectContext;
+}
+
+macro CreatePromiseAnyRejectElementFunction(implicit context: Context)(
+ rejectElementContext: Context, index: Smi,
+ nativeContext: NativeContext): JSFunction {
+ assert(index > 0);
+ assert(index < kPropertyArrayHashFieldMax);
+ const map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
+ const rejectInfo = PromiseAnyRejectElementSharedFunConstant();
+ const reject =
+ AllocateFunctionWithMapAndContext(map, rejectInfo, rejectElementContext);
+ assert(kPropertyArrayNoHashSentinel == 0);
+ reject.properties_or_hash = index;
+ return reject;
+}
+
+// https://tc39.es/proposal-promise-any/#sec-promise.any-reject-element-functions
+transitioning javascript builtin
+PromiseAnyRejectElementClosure(
+ js-implicit context: Context, receiver: JSAny,
+ target: JSFunction)(value: JSAny): JSAny {
+ // 1. Let F be the active function object.
+
+ // 2. Let alreadyCalled be F.[[AlreadyCalled]].
+
+ // 3. If alreadyCalled.[[Value]] is true, return undefined.
+
+ // We use the function's context as the marker to remember whether this
+ // reject element closure was already called. It points to the reject
+ // element context (which is a FunctionContext) until it was called the
+ // first time, in which case we make it point to the native context here
+ // to mark this reject element closure as done.
+ if (IsNativeContext(context)) deferred {
+ return Undefined;
+ }
+
+ assert(
+ context.length ==
+ PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength);
+
+ // 4. Set alreadyCalled.[[Value]] to true.
+ const nativeContext = LoadNativeContext(context);
+ target.context = nativeContext;
+
+ // 5. Let index be F.[[Index]].
+ assert(kPropertyArrayNoHashSentinel == 0);
+ const identityHash = LoadJSReceiverIdentityHash(target) otherwise unreachable;
+ assert(identityHash > 0);
+ const index = identityHash - 1;
+
+ // 6. Let errors be F.[[Errors]].
+ if (context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsArraySlot] == Undefined) {
+ // We're going to reject the Promise with a more fundamental error (e.g.,
+ // something went wrong with iterating the Promises). We don't need to
+ // construct the "errors" array.
+ return Undefined;
+ }
+
+ const errorsArray = UnsafeCast<FixedArray>(
+ context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsArraySlot]);
+
+ // 7. Let promiseCapability be F.[[Capability]].
+
+ // 8. Let remainingElementsCount be F.[[RemainingElements]].
+ let remainingElementsCount =
+ UnsafeCast<Smi>(context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot]);
+ // 9. Set errors[index] to x.
+ errorsArray.objects[index] = value;
+
+ // 10. Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] - 1.
+ remainingElementsCount = remainingElementsCount - 1;
+ context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot] = remainingElementsCount;
+
+ // 11. If remainingElementsCount.[[Value]] is 0, then
+ if (remainingElementsCount == 0) {
+ // a. Let error be a newly created AggregateError object.
+
+ // b. Set error.[[AggregateErrors]] to errors.
+ const error = ConstructAggregateError(errorsArray);
+ // c. Return ? Call(promiseCapability.[[Reject]], undefined, Ā« error Ā»).
+ const capability = UnsafeCast<PromiseCapability>(
+ context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementCapabilitySlot]);
+ Call(context, UnsafeCast<Callable>(capability.reject), Undefined, error);
+ }
+
+ // 12. Return undefined.
+ return Undefined;
+}
+
+transitioning macro PerformPromiseAny(implicit context: Context)(
+ iteratorRecord: iterator::IteratorRecord, constructor: Constructor,
+ resultCapability: PromiseCapability): JSAny labels
+Reject(Object) {
+ // 1. Assert: ! IsConstructor(constructor) is true.
+ // 2. Assert: resultCapability is a PromiseCapability Record.
+
+ const nativeContext = LoadNativeContext(context);
+
+ // 3. Let errors be a new empty List.
+ let growableErrorsArray = growable_fixed_array::NewGrowableFixedArray();
+
+ // 4. Let remainingElementsCount be a new Record { [[Value]]: 1 }.
+ const rejectElementContext =
+ CreatePromiseAnyRejectElementContext(resultCapability, nativeContext);
+
+ // 5. Let index be 0.
+ // (We subtract 1 in the PromiseAnyRejectElementClosure).
+ let index: Smi = 1;
+
+ try {
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ let promiseResolveFunction: JSAny = Undefined;
+ if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor))
+ deferred {
+ // 6. Let promiseResolve be ? Get(constructor, `"resolve"`).
+ const promiseResolve = GetProperty(constructor, kResolveString);
+ // 7. If IsCallable(promiseResolve) is false, throw a
+ // TypeError exception.
+ promiseResolveFunction = Cast<Callable>(promiseResolve)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledNonCallable, 'resolve');
+ }
+ const fastIteratorResultMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
+ // 8. Repeat,
+ while (true) {
+ let nextValue: JSAny;
+ try {
+ // a. Let next be IteratorStep(iteratorRecord).
+
+ // b. If next is an abrupt completion, set
+ // iteratorRecord.[[Done]] to true.
+
+ // c. ReturnIfAbrupt(next).
+
+ // d. if next is false, then [continues below in "Done"]
+ const next: JSReceiver = iterator::IteratorStep(
+ iteratorRecord, fastIteratorResultMap) otherwise goto Done;
+ // e. Let nextValue be IteratorValue(next).
+
+ // f. If nextValue is an abrupt completion, set
+ // iteratorRecord.[[Done]] to true.
+
+ // g. ReturnIfAbrupt(nextValue).
+ nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
+ } catch (e) {
+ goto Reject(e);
+ }
+
+ // We store the indices as identity hash on the reject element
+ // closures. Thus, we need this limit.
+ if (index == kPropertyArrayHashFieldMax) {
+ // If there are too many elements (currently more than
+ // 2**21-1), raise a RangeError here (which is caught later and
+ // turned into a rejection of the resulting promise). We could
+ // gracefully handle this case as well and support more than
+ // this number of elements by going to a separate function and
+ // pass the larger indices via a separate context, but it
+ // doesn't seem likely that we need this, and it's unclear how
+ // the rest of the system deals with 2**21 live Promises
+ // anyway.
+ ThrowRangeError(
+ MessageTemplate::kTooManyElementsInPromiseCombinator, 'any');
+ }
+
+ // h. Append undefined to errors.
+ growableErrorsArray.Push(Undefined);
+
+ let nextPromise: JSAny;
+ // i. Let nextPromise be ? Call(constructor, promiseResolve,
+ // Ā«nextValue Ā»).
+ nextPromise = CallResolve(constructor, promiseResolveFunction, nextValue);
+
+ // j. Let steps be the algorithm steps defined in Promise.any
+ // Reject Element Functions.
+
+ // k. Let rejectElement be ! CreateBuiltinFunction(steps, Ā«
+ // [[AlreadyCalled]], [[Index]],
+ // [[Errors]], [[Capability]], [[RemainingElements]] Ā»).
+
+ // l. Set rejectElement.[[AlreadyCalled]] to a new Record {
+ // [[Value]]: false }.
+
+ // m. Set rejectElement.[[Index]] to index.
+
+ // n. Set rejectElement.[[Errors]] to errors.
+
+ // o. Set rejectElement.[[Capability]] to resultCapability.
+
+ // p. Set rejectElement.[[RemainingElements]] to
+ // remainingElementsCount.
+ const rejectElement = CreatePromiseAnyRejectElementFunction(
+ rejectElementContext, index, nativeContext);
+ // q. Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] + 1.
+ const remainingElementsCount = UnsafeCast<Smi>(
+ rejectElementContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot]);
+ rejectElementContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot] =
+ remainingElementsCount + 1;
+
+ // r. Perform ? Invoke(nextPromise, "then", Ā«
+ // resultCapability.[[Resolve]], rejectElement Ā»).
+ let thenResult: JSAny;
+
+ const then = GetProperty(nextPromise, kThenString);
+ thenResult = Call(
+ context, then, nextPromise,
+ UnsafeCast<JSAny>(resultCapability.resolve), rejectElement);
+
+ // s. Increase index by 1.
+ index += 1;
+
+ // For catch prediction, mark that rejections here are
+ // semantically handled by the combined Promise.
+ if (IsDebugActive() && Is<JSPromise>(thenResult)) deferred {
+ SetPropertyStrict(
+ context, thenResult, kPromiseHandledBySymbol,
+ resultCapability.promise);
+ SetPropertyStrict(
+ context, rejectElement, kPromiseForwardingHandlerSymbol, True);
+ }
+ }
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(iteratorRecord);
+ goto Reject(e);
+ } label Done {}
+
+ // (8.d)
+ // i. Set iteratorRecord.[[Done]] to true.
+ // ii. Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] - 1.
+ let remainingElementsCount = UnsafeCast<Smi>(
+ rejectElementContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot]);
+ remainingElementsCount -= 1;
+ rejectElementContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementRemainingSlot] =
+ remainingElementsCount;
+
+ const errorsArray = growableErrorsArray.ToFixedArray();
+ rejectElementContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsArraySlot] =
+ errorsArray;
+
+ // iii. If remainingElementsCount.[[Value]] is 0, then
+ if (remainingElementsCount == 0) deferred {
+ // 1. Let error be a newly created AggregateError object.
+ // 2. Set error.[[AggregateErrors]] to errors.
+ const error = ConstructAggregateError(errorsArray);
+ // 3. Return ThrowCompletion(error).
+ goto Reject(error);
+ }
+ // iv. Return resultCapability.[[Promise]].
+ return resultCapability.promise;
+}
+
+// https://tc39.es/proposal-promise-any/#sec-promise.any
+transitioning javascript builtin
+PromiseAny(
+ js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
+ // 1. Let C be the this value.
+ const receiver = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.any');
+
+ // 2. Let promiseCapability be ? NewPromiseCapability(C).
+ const capability = NewPromiseCapability(receiver, False);
+
+ // NewPromiseCapability guarantees that receiver is Constructor
+ assert(Is<Constructor>(receiver));
+ const constructor = UnsafeCast<Constructor>(receiver);
+
+ try {
+ let iteratorRecord: iterator::IteratorRecord;
+ try {
+ // 3. Let iteratorRecord be GetIterator(iterable).
+
+ // 4. IfAbruptRejectPromise(iteratorRecord, promiseCapability).
+ // (catch below)
+ iteratorRecord = iterator::GetIterator(iterable);
+
+ // 5. Let result be PerformPromiseAny(iteratorRecord, C,
+ // promiseCapability).
+
+ // 6. If result is an abrupt completion, then
+
+ // a. If iteratorRecord.[[Done]] is false, set result to
+ // IteratorClose(iteratorRecord, result).
+
+ // b. IfAbruptRejectPromise(result, promiseCapability).
+
+ // [Iterator closing handled by PerformPromiseAny]
+
+ // 7. Return Completion(result).
+ return PerformPromiseAny(iteratorRecord, constructor, capability)
+ otherwise Reject;
+ } catch (e) deferred {
+ goto Reject(e);
+ }
+ } label Reject(e: Object) deferred {
+ // Exception must be bound to a JS value.
+ assert(e != TheHole);
+ Call(
+ context, UnsafeCast<Callable>(capability.reject), Undefined,
+ UnsafeCast<JSAny>(e));
+ return capability.promise;
+ }
+}
+
+transitioning macro ConstructAggregateError(implicit context: Context)(
+ errorsArray: FixedArray): JSObject {
+ const obj: JSAggregateError = error::ConstructInternalAggregateErrorHelper(
+ context, SmiConstant(MessageTemplate::kAllPromisesRejected));
+ obj.errors = errorsArray;
+ return obj;
+}
+
+extern macro PromiseAnyRejectElementSharedFunConstant(): SharedFunctionInfo;
+}
diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq
index dc0e077485..dbf1fe2f4d 100644
--- a/deps/v8/src/builtins/promise-constructor.tq
+++ b/deps/v8/src/builtins/promise-constructor.tq
@@ -6,104 +6,104 @@
#include 'src/builtins/builtins-promise-gen.h'
namespace runtime {
- extern transitioning runtime
- DebugPushPromise(implicit context: Context)(JSAny): JSAny;
+extern transitioning runtime
+DebugPushPromise(implicit context: Context)(JSAny): JSAny;
- extern transitioning runtime
- DebugPopPromise(implicit context: Context)(): JSAny;
+extern transitioning runtime
+DebugPopPromise(implicit context: Context)(): JSAny;
- extern transitioning runtime
- PromiseHookInit(implicit context: Context)(Object, Object): JSAny;
+extern transitioning runtime
+PromiseHookInit(implicit context: Context)(Object, Object): JSAny;
}
// https://tc39.es/ecma262/#sec-promise-constructor
namespace promise {
- extern runtime IncrementUseCounter(Context, Smi): void;
- type UseCounterFeature extends int31
- constexpr 'v8::Isolate::UseCounterFeature';
- const kPromiseConstructorReturnedUndefined: constexpr UseCounterFeature
- generates 'v8::Isolate::kPromiseConstructorReturnedUndefined';
-
- extern macro
- IsDebugActive(): bool;
-
- transitioning macro
- HasAccessCheckFailed(implicit context: Context)(
- nativeContext: NativeContext, promiseFun: JSAny, executor: JSAny): bool {
- BranchIfAccessCheckFailed(nativeContext, promiseFun, executor)
- otherwise return true;
- return false;
- }
+extern runtime IncrementUseCounter(Context, Smi): void;
+type UseCounterFeature extends int31
+constexpr 'v8::Isolate::UseCounterFeature';
+const kPromiseConstructorReturnedUndefined: constexpr UseCounterFeature
+ generates 'v8::Isolate::kPromiseConstructorReturnedUndefined';
+
+extern macro
+IsDebugActive(): bool;
+
+transitioning macro
+HasAccessCheckFailed(implicit context: Context)(
+ nativeContext: NativeContext, promiseFun: JSAny, executor: JSAny): bool {
+ BranchIfAccessCheckFailed(nativeContext, promiseFun, executor)
+ otherwise return true;
+ return false;
+}
- extern macro ConstructorBuiltinsAssembler::EmitFastNewObject(
- Context, JSFunction, JSReceiver): JSObject;
+extern macro ConstructorBuiltinsAssembler::EmitFastNewObject(
+ Context, JSFunction, JSReceiver): JSObject;
- extern macro
- PromiseBuiltinsAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate(): bool;
+extern macro
+PromiseBuiltinsAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate(): bool;
- // https://tc39.es/ecma262/#sec-promise-executor
- transitioning javascript builtin
- PromiseConstructor(
- js-implicit context: NativeContext, receiver: JSAny,
- newTarget: JSAny)(executor: JSAny): JSAny {
- // 1. If NewTarget is undefined, throw a TypeError exception.
- if (newTarget == Undefined) {
- ThrowTypeError(MessageTemplate::kNotAPromise, newTarget);
- }
+// https://tc39.es/ecma262/#sec-promise-executor
+transitioning javascript builtin
+PromiseConstructor(
+ js-implicit context: NativeContext, receiver: JSAny,
+ newTarget: JSAny)(executor: JSAny): JSAny {
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (newTarget == Undefined) {
+ ThrowTypeError(MessageTemplate::kNotAPromise, newTarget);
+ }
- // 2. If IsCallable(executor) is false, throw a TypeError exception.
- if (!TaggedIsCallable(executor)) {
- ThrowTypeError(MessageTemplate::kResolverNotAFunction, executor);
- }
+ // 2. If IsCallable(executor) is false, throw a TypeError exception.
+ if (!Is<Callable>(executor)) {
+ ThrowTypeError(MessageTemplate::kResolverNotAFunction, executor);
+ }
- const promiseFun = UnsafeCast<JSFunction>(
- context[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
+ const promiseFun = UnsafeCast<JSFunction>(
+ context[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
- // Silently fail if the stack looks fishy.
- if (HasAccessCheckFailed(context, promiseFun, executor)) {
- IncrementUseCounter(
- context, SmiConstant(kPromiseConstructorReturnedUndefined));
- return Undefined;
- }
+ // Silently fail if the stack looks fishy.
+ if (HasAccessCheckFailed(context, promiseFun, executor)) {
+ IncrementUseCounter(
+ context, SmiConstant(kPromiseConstructorReturnedUndefined));
+ return Undefined;
+ }
- let result: JSPromise;
- if (promiseFun == newTarget) {
- result = NewJSPromise();
- } else {
- result = UnsafeCast<JSPromise>(EmitFastNewObject(
- context, promiseFun, UnsafeCast<JSReceiver>(newTarget)));
- PromiseInit(result);
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(result, Undefined);
- }
+ let result: JSPromise;
+ if (promiseFun == newTarget) {
+ result = NewJSPromise();
+ } else {
+ result = UnsafeCast<JSPromise>(EmitFastNewObject(
+ context, promiseFun, UnsafeCast<JSReceiver>(newTarget)));
+ PromiseInit(result);
+ if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
+ runtime::PromiseHookInit(result, Undefined);
}
+ }
- const isDebugActive = IsDebugActive();
- if (isDebugActive) runtime::DebugPushPromise(result);
-
- const funcs = CreatePromiseResolvingFunctions(result, True, context);
- const resolve = funcs.resolve;
- const reject = funcs.reject;
- try {
- Call(context, UnsafeCast<Callable>(executor), Undefined, resolve, reject);
- } catch (e) {
- Call(context, reject, Undefined, e);
- }
+ const isDebugActive = IsDebugActive();
+ if (isDebugActive) runtime::DebugPushPromise(result);
- if (isDebugActive) runtime::DebugPopPromise();
- return result;
+ const funcs = CreatePromiseResolvingFunctions(result, True, context);
+ const resolve = funcs.resolve;
+ const reject = funcs.reject;
+ try {
+ Call(context, UnsafeCast<Callable>(executor), Undefined, resolve, reject);
+ } catch (e) {
+ Call(context, reject, Undefined, e);
}
- // Promise.prototype.catch ( onRejected )
- // https://tc39.es/ecma262/#sec-promise.prototype.catch
- transitioning javascript builtin
- PromisePrototypeCatch(js-implicit context: Context, receiver: JSAny)(
- onRejected: JSAny): JSAny {
- // 1. Let promise be the this value.
- // 2. Return ? Invoke(promise, "then", Ā« undefined, onRejected Ā»).
- const nativeContext = LoadNativeContext(context);
- return UnsafeCast<JSAny>(
- InvokeThen(nativeContext, receiver, Undefined, onRejected));
- }
+ if (isDebugActive) runtime::DebugPopPromise();
+ return result;
+}
+
+// Promise.prototype.catch ( onRejected )
+// https://tc39.es/ecma262/#sec-promise.prototype.catch
+transitioning javascript builtin
+PromisePrototypeCatch(
+ js-implicit context: Context, receiver: JSAny)(onRejected: JSAny): JSAny {
+ // 1. Let promise be the this value.
+ // 2. Return ? Invoke(promise, "then", Ā« undefined, onRejected Ā»).
+ const nativeContext = LoadNativeContext(context);
+ return UnsafeCast<JSAny>(
+ InvokeThen(nativeContext, receiver, Undefined, onRejected));
+}
}
diff --git a/deps/v8/src/builtins/promise-finally.tq b/deps/v8/src/builtins/promise-finally.tq
index 32028b819d..48928ca4ce 100644
--- a/deps/v8/src/builtins/promise-finally.tq
+++ b/deps/v8/src/builtins/promise-finally.tq
@@ -7,201 +7,201 @@
namespace promise {
- // TODO(joshualitt): The below ContextSlots are only available on synthetic
- // contexts created by the promise pipeline for use in the promise pipeline.
- // However, with Torque we should type the context and its slots to prevent
- // accidentially using these slots on contexts which don't support them.
- const kPromiseBuiltinsValueSlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kValueSlot';
- const kPromiseBuiltinsOnFinallySlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kOnFinallySlot';
- const kPromiseBuiltinsConstructorSlot: constexpr ContextSlot
- generates 'PromiseBuiltins::kConstructorSlot';
- const kPromiseBuiltinsPromiseValueThunkOrReasonContextLength: constexpr int31
- generates 'PromiseBuiltins::kPromiseValueThunkOrReasonContextLength';
- const kPromiseBuiltinsPromiseFinallyContextLength: constexpr int31
- generates 'PromiseBuiltins::kPromiseFinallyContextLength';
-
- transitioning javascript builtin
- PromiseValueThunkFinally(js-implicit context: Context, receiver: JSAny)():
- JSAny {
- return UnsafeCast<JSAny>(context[kPromiseBuiltinsValueSlot]);
- }
+// TODO(joshualitt): The below ContextSlots are only available on synthetic
+// contexts created by the promise pipeline for use in the promise pipeline.
+// However, with Torque we should type the context and its slots to prevent
+// accidentially using these slots on contexts which don't support them.
+const kPromiseBuiltinsValueSlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kValueSlot';
+const kPromiseBuiltinsOnFinallySlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kOnFinallySlot';
+const kPromiseBuiltinsConstructorSlot: constexpr ContextSlot
+ generates 'PromiseBuiltins::kConstructorSlot';
+const kPromiseBuiltinsPromiseValueThunkOrReasonContextLength: constexpr int31
+ generates 'PromiseBuiltins::kPromiseValueThunkOrReasonContextLength';
+const kPromiseBuiltinsPromiseFinallyContextLength: constexpr int31
+ generates 'PromiseBuiltins::kPromiseFinallyContextLength';
+
+transitioning javascript builtin
+PromiseValueThunkFinally(
+ js-implicit context: Context, receiver: JSAny)(): JSAny {
+ return UnsafeCast<JSAny>(context[kPromiseBuiltinsValueSlot]);
+}
- transitioning javascript builtin
- PromiseThrowerFinally(js-implicit context: Context, receiver: JSAny)():
- never {
- const reason = UnsafeCast<JSAny>(context[kPromiseBuiltinsValueSlot]);
- Throw(reason);
- }
+transitioning javascript builtin
+PromiseThrowerFinally(js-implicit context: Context, receiver: JSAny)(): never {
+ const reason = UnsafeCast<JSAny>(context[kPromiseBuiltinsValueSlot]);
+ Throw(reason);
+}
- macro CreateThrowerFunction(implicit context: Context)(
- nativeContext: NativeContext, reason: JSAny): JSFunction {
- const throwerContext = AllocateSyntheticFunctionContext(
- nativeContext, kPromiseBuiltinsPromiseValueThunkOrReasonContextLength);
- throwerContext[kPromiseBuiltinsValueSlot] = reason;
- const map = UnsafeCast<Map>(
- nativeContext
- [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
- const throwerInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext[NativeContextSlot::PROMISE_THROWER_FINALLY_SHARED_FUN]);
- return AllocateFunctionWithMapAndContext(map, throwerInfo, throwerContext);
- }
+macro CreateThrowerFunction(implicit context: Context)(
+ nativeContext: NativeContext, reason: JSAny): JSFunction {
+ const throwerContext = AllocateSyntheticFunctionContext(
+ nativeContext, kPromiseBuiltinsPromiseValueThunkOrReasonContextLength);
+ throwerContext[kPromiseBuiltinsValueSlot] = reason;
+ const map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
+ const throwerInfo = PromiseThrowerFinallySharedFunConstant();
+ return AllocateFunctionWithMapAndContext(map, throwerInfo, throwerContext);
+}
- transitioning javascript builtin
- PromiseCatchFinally(js-implicit context: Context, receiver: JSAny)(
- reason: JSAny): JSAny {
- // 1. Let onFinally be F.[[OnFinally]].
- // 2. Assert: IsCallable(onFinally) is true.
- const onFinally =
- UnsafeCast<Callable>(context[kPromiseBuiltinsOnFinallySlot]);
+transitioning javascript builtin
+PromiseCatchFinally(
+ js-implicit context: Context, receiver: JSAny)(reason: JSAny): JSAny {
+ // 1. Let onFinally be F.[[OnFinally]].
+ // 2. Assert: IsCallable(onFinally) is true.
+ const onFinally =
+ UnsafeCast<Callable>(context[kPromiseBuiltinsOnFinallySlot]);
- // 3. Let result be ? Call(onFinally).
- const result = Call(context, onFinally, Undefined);
+ // 3. Let result be ? Call(onFinally).
+ const result = Call(context, onFinally, Undefined);
- // 4. Let C be F.[[Constructor]].
- const constructor =
- UnsafeCast<JSFunction>(context[kPromiseBuiltinsConstructorSlot]);
+ // 4. Let C be F.[[Constructor]].
+ const constructor =
+ UnsafeCast<JSFunction>(context[kPromiseBuiltinsConstructorSlot]);
- // 5. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ // 5. Assert: IsConstructor(C) is true.
+ assert(IsConstructor(constructor));
- // 6. Let promise be ? PromiseResolve(C, result).
- const promise = PromiseResolve(constructor, result);
+ // 6. Let promise be ? PromiseResolve(C, result).
+ const promise = PromiseResolve(constructor, result);
- // 7. Let thrower be equivalent to a function that throws reason.
- const nativeContext = LoadNativeContext(context);
- const thrower = CreateThrowerFunction(nativeContext, reason);
+ // 7. Let thrower be equivalent to a function that throws reason.
+ const nativeContext = LoadNativeContext(context);
+ const thrower = CreateThrowerFunction(nativeContext, reason);
- // 8. Return ? Invoke(promise, "then", Ā« thrower Ā»).
- return UnsafeCast<JSAny>(InvokeThen(nativeContext, promise, thrower));
- }
+ // 8. Return ? Invoke(promise, "then", Ā« thrower Ā»).
+ return UnsafeCast<JSAny>(InvokeThen(nativeContext, promise, thrower));
+}
- macro CreateValueThunkFunction(implicit context: Context)(
- nativeContext: NativeContext, value: JSAny): JSFunction {
- const valueThunkContext = AllocateSyntheticFunctionContext(
- nativeContext, kPromiseBuiltinsPromiseValueThunkOrReasonContextLength);
- valueThunkContext[kPromiseBuiltinsValueSlot] = value;
- const map = UnsafeCast<Map>(
- nativeContext
- [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
- const valueThunkInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext
- [NativeContextSlot::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN]);
- return AllocateFunctionWithMapAndContext(
- map, valueThunkInfo, valueThunkContext);
- }
+macro CreateValueThunkFunction(implicit context: Context)(
+ nativeContext: NativeContext, value: JSAny): JSFunction {
+ const valueThunkContext = AllocateSyntheticFunctionContext(
+ nativeContext, kPromiseBuiltinsPromiseValueThunkOrReasonContextLength);
+ valueThunkContext[kPromiseBuiltinsValueSlot] = value;
+ const map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
+ const valueThunkInfo = PromiseValueThunkFinallySharedFunConstant();
+ return AllocateFunctionWithMapAndContext(
+ map, valueThunkInfo, valueThunkContext);
+}
- transitioning javascript builtin
- PromiseThenFinally(js-implicit context: Context, receiver: JSAny)(
- value: JSAny): JSAny {
- // 1. Let onFinally be F.[[OnFinally]].
- // 2. Assert: IsCallable(onFinally) is true.
- const onFinally =
- UnsafeCast<Callable>(context[kPromiseBuiltinsOnFinallySlot]);
+transitioning javascript builtin
+PromiseThenFinally(
+ js-implicit context: Context, receiver: JSAny)(value: JSAny): JSAny {
+ // 1. Let onFinally be F.[[OnFinally]].
+ // 2. Assert: IsCallable(onFinally) is true.
+ const onFinally =
+ UnsafeCast<Callable>(context[kPromiseBuiltinsOnFinallySlot]);
- // 3. Let result be ? Call(onFinally).
- const result = Call(context, onFinally, Undefined);
+ // 3. Let result be ? Call(onFinally).
+ const result = Call(context, onFinally, Undefined);
- // 4. Let C be F.[[Constructor]].
- const constructor =
- UnsafeCast<JSFunction>(context[kPromiseBuiltinsConstructorSlot]);
+ // 4. Let C be F.[[Constructor]].
+ const constructor =
+ UnsafeCast<JSFunction>(context[kPromiseBuiltinsConstructorSlot]);
- // 5. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ // 5. Assert: IsConstructor(C) is true.
+ assert(IsConstructor(constructor));
- // 6. Let promise be ? PromiseResolve(C, result).
- const promise = PromiseResolve(constructor, result);
+ // 6. Let promise be ? PromiseResolve(C, result).
+ const promise = PromiseResolve(constructor, result);
- // 7. Let valueThunk be equivalent to a function that returns value.
- const nativeContext = LoadNativeContext(context);
- const valueThunk = CreateValueThunkFunction(nativeContext, value);
+ // 7. Let valueThunk be equivalent to a function that returns value.
+ const nativeContext = LoadNativeContext(context);
+ const valueThunk = CreateValueThunkFunction(nativeContext, value);
- // 8. Return ? Invoke(promise, "then", Ā« valueThunk Ā»).
- return UnsafeCast<JSAny>(InvokeThen(nativeContext, promise, valueThunk));
- }
+ // 8. Return ? Invoke(promise, "then", Ā« valueThunk Ā»).
+ return UnsafeCast<JSAny>(InvokeThen(nativeContext, promise, valueThunk));
+}
- struct PromiseFinallyFunctions {
- then_finally: JSFunction;
- catch_finally: JSFunction;
- }
+struct PromiseFinallyFunctions {
+ then_finally: JSFunction;
+ catch_finally: JSFunction;
+}
- macro CreatePromiseFinallyFunctions(implicit context: Context)(
- nativeContext: NativeContext, onFinally: Callable,
- constructor: JSReceiver): PromiseFinallyFunctions {
- const promiseContext = AllocateSyntheticFunctionContext(
- nativeContext, kPromiseBuiltinsPromiseFinallyContextLength);
- promiseContext[kPromiseBuiltinsOnFinallySlot] = onFinally;
- promiseContext[kPromiseBuiltinsConstructorSlot] = constructor;
- const map = UnsafeCast<Map>(
- nativeContext
- [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
- const thenFinallyInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext[NativeContextSlot::PROMISE_THEN_FINALLY_SHARED_FUN]);
- const thenFinally =
- AllocateFunctionWithMapAndContext(map, thenFinallyInfo, promiseContext);
- const catchFinallyInfo = UnsafeCast<SharedFunctionInfo>(
- nativeContext[NativeContextSlot::PROMISE_CATCH_FINALLY_SHARED_FUN]);
- const catchFinally = AllocateFunctionWithMapAndContext(
- map, catchFinallyInfo, promiseContext);
- return PromiseFinallyFunctions{
- then_finally: thenFinally,
- catch_finally: catchFinally
- };
- }
+macro CreatePromiseFinallyFunctions(implicit context: Context)(
+ nativeContext: NativeContext, onFinally: Callable,
+ constructor: JSReceiver): PromiseFinallyFunctions {
+ const promiseContext = AllocateSyntheticFunctionContext(
+ nativeContext, kPromiseBuiltinsPromiseFinallyContextLength);
+ promiseContext[kPromiseBuiltinsOnFinallySlot] = onFinally;
+ promiseContext[kPromiseBuiltinsConstructorSlot] = constructor;
+ const map = UnsafeCast<Map>(
+ nativeContext
+ [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]);
+ const thenFinallyInfo = PromiseThenFinallySharedFunConstant();
+ const thenFinally =
+ AllocateFunctionWithMapAndContext(map, thenFinallyInfo, promiseContext);
+ const catchFinallyInfo = PromiseCatchFinallySharedFunConstant();
+ const catchFinally =
+ AllocateFunctionWithMapAndContext(map, catchFinallyInfo, promiseContext);
+ return PromiseFinallyFunctions{
+ then_finally: thenFinally,
+ catch_finally: catchFinally
+ };
+}
- transitioning javascript builtin
- PromisePrototypeFinally(js-implicit context: Context, receiver: JSAny)(
- onFinally: JSAny): JSAny {
- // 1. Let promise be the this value.
- // 2. If Type(promise) is not Object, throw a TypeError exception.
- const jsReceiver = Cast<JSReceiver>(receiver) otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Promise.prototype.finally');
-
- // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
- const nativeContext = LoadNativeContext(context);
- const promiseFun = UnsafeCast<Callable>(
- nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
-
- let constructor: JSReceiver = promiseFun;
- const receiverMap = jsReceiver.map;
- if (!IsJSPromiseMap(receiverMap) ||
- !IsPromiseSpeciesLookupChainIntact(nativeContext, receiverMap))
- deferred {
- constructor = SpeciesConstructor(jsReceiver, promiseFun);
- }
-
- // 4. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
-
- // 5. If IsCallable(onFinally) is not true,
- // a. Let thenFinally be onFinally.
- // b. Let catchFinally be onFinally.
- // 6. Else,
- // a. Let thenFinally be a new built-in function object as defined
- // in ThenFinally Function.
- // b. Let catchFinally be a new built-in function object as
- // defined in CatchFinally Function.
- // c. Set thenFinally and catchFinally's [[Constructor]] internal
- // slots to C.
- // d. Set thenFinally and catchFinally's [[OnFinally]] internal
- // slots to onFinally.
- let thenFinally: JSAny;
- let catchFinally: JSAny;
- if (!TaggedIsSmi(onFinally) &&
- IsCallable(UnsafeCast<HeapObject>(onFinally))) {
- const pair = CreatePromiseFinallyFunctions(
- nativeContext, UnsafeCast<Callable>(onFinally), constructor);
+transitioning javascript builtin
+PromisePrototypeFinally(
+ js-implicit context: Context, receiver: JSAny)(onFinally: JSAny): JSAny {
+ // 1. Let promise be the this value.
+ // 2. If Type(promise) is not Object, throw a TypeError exception.
+ const jsReceiver = Cast<JSReceiver>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Promise.prototype.finally');
+
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ const nativeContext = LoadNativeContext(context);
+ const promiseFun = UnsafeCast<Callable>(
+ nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
+
+ let constructor: JSReceiver = promiseFun;
+ const receiverMap = jsReceiver.map;
+ if (!IsJSPromiseMap(receiverMap) ||
+ !IsPromiseSpeciesLookupChainIntact(nativeContext, receiverMap))
+ deferred {
+ constructor = SpeciesConstructor(jsReceiver, promiseFun);
+ }
+
+ // 4. Assert: IsConstructor(C) is true.
+ assert(IsConstructor(constructor));
+
+ // 5. If IsCallable(onFinally) is not true,
+ // a. Let thenFinally be onFinally.
+ // b. Let catchFinally be onFinally.
+ // 6. Else,
+ // a. Let thenFinally be a new built-in function object as defined
+ // in ThenFinally Function.
+ // b. Let catchFinally be a new built-in function object as
+ // defined in CatchFinally Function.
+ // c. Set thenFinally and catchFinally's [[Constructor]] internal
+ // slots to C.
+ // d. Set thenFinally and catchFinally's [[OnFinally]] internal
+ // slots to onFinally.
+ let thenFinally: JSAny;
+ let catchFinally: JSAny;
+ typeswitch (onFinally) {
+ case (onFinally: Callable): {
+ const pair =
+ CreatePromiseFinallyFunctions(nativeContext, onFinally, constructor);
thenFinally = pair.then_finally;
catchFinally = pair.catch_finally;
- } else
- deferred {
- thenFinally = onFinally;
- catchFinally = onFinally;
- }
-
- // 7. Return ? Invoke(promise, "then", Ā« thenFinally, catchFinally Ā»).
- return UnsafeCast<JSAny>(
- InvokeThen(nativeContext, receiver, thenFinally, catchFinally));
+ }
+ case (JSAny): deferred {
+ thenFinally = onFinally;
+ catchFinally = onFinally;
+ }
}
+
+ // 7. Return ? Invoke(promise, "then", Ā« thenFinally, catchFinally Ā»).
+ return UnsafeCast<JSAny>(
+ InvokeThen(nativeContext, receiver, thenFinally, catchFinally));
+}
+
+extern macro PromiseCatchFinallySharedFunConstant(): SharedFunctionInfo;
+extern macro PromiseThenFinallySharedFunConstant(): SharedFunctionInfo;
+extern macro PromiseThrowerFinallySharedFunConstant(): SharedFunctionInfo;
+extern macro PromiseValueThunkFinallySharedFunConstant(): SharedFunctionInfo;
}
diff --git a/deps/v8/src/builtins/promise-jobs.tq b/deps/v8/src/builtins/promise-jobs.tq
index ee049605e1..6c64baf22d 100644
--- a/deps/v8/src/builtins/promise-jobs.tq
+++ b/deps/v8/src/builtins/promise-jobs.tq
@@ -6,68 +6,68 @@
// https://tc39.es/ecma262/#sec-promise-jobs
namespace promise {
- extern macro IsJSPromiseMap(Map): bool;
+extern macro IsJSPromiseMap(Map): bool;
- // https://tc39.es/ecma262/#sec-promiseresolvethenablejob
- transitioning builtin
- PromiseResolveThenableJob(implicit context: Context)(
- promiseToResolve: JSPromise, thenable: JSReceiver, then: JSAny): JSAny {
- // We can use a simple optimization here if we know that {then} is the
- // initial Promise.prototype.then method, and {thenable} is a JSPromise
- // whose
- // @@species lookup chain is intact: We can connect {thenable} and
- // {promise_to_resolve} directly in that case and avoid the allocation of a
- // temporary JSPromise and the closures plus context.
+// https://tc39.es/ecma262/#sec-promiseresolvethenablejob
+transitioning builtin
+PromiseResolveThenableJob(implicit context: Context)(
+ promiseToResolve: JSPromise, thenable: JSReceiver, then: JSAny): JSAny {
+ // We can use a simple optimization here if we know that {then} is the
+ // initial Promise.prototype.then method, and {thenable} is a JSPromise
+ // whose
+ // @@species lookup chain is intact: We can connect {thenable} and
+ // {promise_to_resolve} directly in that case and avoid the allocation of a
+ // temporary JSPromise and the closures plus context.
+ //
+ // We take the generic (slow-)path if a PromiseHook is enabled or the
+ // debugger is active, to make sure we expose spec compliant behavior.
+ const nativeContext = LoadNativeContext(context);
+ const promiseThen = nativeContext[NativeContextSlot::PROMISE_THEN_INDEX];
+ const thenableMap = thenable.map;
+ if (TaggedEqual(then, promiseThen) && IsJSPromiseMap(thenableMap) &&
+ !IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() &&
+ IsPromiseSpeciesLookupChainIntact(nativeContext, thenableMap)) {
+ // We know that the {thenable} is a JSPromise, which doesn't require
+ // any special treatment and that {then} corresponds to the initial
+ // Promise.prototype.then method. So instead of allocating a temporary
+ // JSPromise to connect the {thenable} with the {promise_to_resolve},
+ // we can directly schedule the {promise_to_resolve} with default
+ // handlers onto the {thenable} promise. This does not only save the
+ // JSPromise allocation, but also avoids the allocation of the two
+ // resolving closures and the shared context.
//
- // We take the generic (slow-)path if a PromiseHook is enabled or the
- // debugger is active, to make sure we expose spec compliant behavior.
- const nativeContext = LoadNativeContext(context);
- const promiseThen = nativeContext[NativeContextSlot::PROMISE_THEN_INDEX];
- const thenableMap = thenable.map;
- if (TaggedEqual(then, promiseThen) && IsJSPromiseMap(thenableMap) &&
- !IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() &&
- IsPromiseSpeciesLookupChainIntact(nativeContext, thenableMap)) {
- // We know that the {thenable} is a JSPromise, which doesn't require
- // any special treatment and that {then} corresponds to the initial
- // Promise.prototype.then method. So instead of allocating a temporary
- // JSPromise to connect the {thenable} with the {promise_to_resolve},
- // we can directly schedule the {promise_to_resolve} with default
- // handlers onto the {thenable} promise. This does not only save the
- // JSPromise allocation, but also avoids the allocation of the two
- // resolving closures and the shared context.
- //
- // What happens normally in this case is
- //
- // resolve, reject = CreateResolvingFunctions(promise_to_resolve)
- // result_capability = NewPromiseCapability(%Promise%)
- // PerformPromiseThen(thenable, resolve, reject, result_capability)
- //
- // which means that PerformPromiseThen will either schedule a new
- // PromiseReaction with resolve and reject or a PromiseReactionJob
- // with resolve or reject based on the state of {thenable}. And
- // resolve or reject will just invoke the default [[Resolve]] or
- // [[Reject]] functions on the {promise_to_resolve}.
- //
- // This is the same as just doing
- //
- // PerformPromiseThen(thenable, undefined, undefined,
- // promise_to_resolve)
- //
- // which performs exactly the same (observable) steps.
- return PerformPromiseThen(
- UnsafeCast<JSPromise>(thenable), UndefinedConstant(),
- UndefinedConstant(), promiseToResolve);
- } else {
- const funcs = CreatePromiseResolvingFunctions(
- promiseToResolve, False, nativeContext);
- const resolve = funcs.resolve;
- const reject = funcs.reject;
- try {
- return Call(
- context, UnsafeCast<Callable>(then), thenable, resolve, reject);
- } catch (e) {
- return Call(context, UnsafeCast<Callable>(reject), Undefined, e);
- }
+ // What happens normally in this case is
+ //
+ // resolve, reject = CreateResolvingFunctions(promise_to_resolve)
+ // result_capability = NewPromiseCapability(%Promise%)
+ // PerformPromiseThen(thenable, resolve, reject, result_capability)
+ //
+ // which means that PerformPromiseThen will either schedule a new
+ // PromiseReaction with resolve and reject or a PromiseReactionJob
+ // with resolve or reject based on the state of {thenable}. And
+ // resolve or reject will just invoke the default [[Resolve]] or
+ // [[Reject]] functions on the {promise_to_resolve}.
+ //
+ // This is the same as just doing
+ //
+ // PerformPromiseThen(thenable, undefined, undefined,
+ // promise_to_resolve)
+ //
+ // which performs exactly the same (observable) steps.
+ return PerformPromiseThen(
+ UnsafeCast<JSPromise>(thenable), UndefinedConstant(),
+ UndefinedConstant(), promiseToResolve);
+ } else {
+ const funcs =
+ CreatePromiseResolvingFunctions(promiseToResolve, False, nativeContext);
+ const resolve = funcs.resolve;
+ const reject = funcs.reject;
+ try {
+ return Call(
+ context, UnsafeCast<Callable>(then), thenable, resolve, reject);
+ } catch (e) {
+ return Call(context, UnsafeCast<Callable>(reject), Undefined, e);
}
}
}
+}
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index 61461de29f..7ed2f7909a 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -6,246 +6,267 @@
#include 'src/builtins/builtins-promise-gen.h'
namespace runtime {
- extern transitioning runtime
- AllowDynamicFunction(implicit context: Context)(JSAny): JSAny;
+extern transitioning runtime
+AllowDynamicFunction(implicit context: Context)(JSAny): JSAny;
}
// Unsafe functions that should be used very carefully.
namespace promise_internal {
- extern macro PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(JSPromise):
- void;
+extern macro PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(JSPromise): void;
- extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
+extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
}
namespace promise {
- extern macro IsFunctionWithPrototypeSlotMap(Map): bool;
+extern macro IsFunctionWithPrototypeSlotMap(Map): bool;
- @export
- macro PromiseHasHandler(promise: JSPromise): bool {
- return promise.HasHandler();
- }
+@export
+macro PromiseHasHandler(promise: JSPromise): bool {
+ return promise.HasHandler();
+}
- @export
- macro PromiseInit(promise: JSPromise): void {
- assert(PromiseState::kPending == 0);
- promise.reactions_or_result = kZero;
- promise.flags = 0;
- promise_internal::ZeroOutEmbedderOffsets(promise);
- }
+@export
+macro PromiseInit(promise: JSPromise): void {
+ promise.reactions_or_result = kZero;
+ promise.flags = SmiTag(JSPromiseFlags{
+ status: PromiseState::kPending,
+ has_handler: false,
+ handled_hint: false,
+ async_task_id: 0
+ });
+ promise_internal::ZeroOutEmbedderOffsets(promise);
+}
- macro InnerNewJSPromise(implicit context: Context)(): JSPromise {
- const nativeContext = LoadNativeContext(context);
- const promiseFun = UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
- assert(IsFunctionWithPrototypeSlotMap(promiseFun.map));
- const promiseMap = UnsafeCast<Map>(promiseFun.prototype_or_initial_map);
- const promiseHeapObject = promise_internal::AllocateJSPromise(context);
- * UnsafeConstCast(& promiseHeapObject.map) = promiseMap;
- const promise = UnsafeCast<JSPromise>(promiseHeapObject);
- promise.properties_or_hash = kEmptyFixedArray;
- promise.elements = kEmptyFixedArray;
- promise.reactions_or_result = kZero;
- promise.flags = 0;
- return promise;
- }
+macro InnerNewJSPromise(implicit context: Context)(): JSPromise {
+ const nativeContext = LoadNativeContext(context);
+ const promiseFun = UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
+ assert(IsFunctionWithPrototypeSlotMap(promiseFun.map));
+ const promiseMap = UnsafeCast<Map>(promiseFun.prototype_or_initial_map);
+ const promiseHeapObject = promise_internal::AllocateJSPromise(context);
+ * UnsafeConstCast(& promiseHeapObject.map) = promiseMap;
+ const promise = UnsafeCast<JSPromise>(promiseHeapObject);
+ promise.properties_or_hash = kEmptyFixedArray;
+ promise.elements = kEmptyFixedArray;
+ promise.reactions_or_result = kZero;
+ promise.flags = SmiTag(JSPromiseFlags{
+ status: PromiseState::kPending,
+ has_handler: false,
+ handled_hint: false,
+ async_task_id: 0
+ });
+ return promise;
+}
- macro NewPromiseFulfillReactionJobTask(implicit context: Context)(
- handlerContext: Context, argument: Object, handler: Callable|Undefined,
- promiseOrCapability: JSPromise|PromiseCapability|
- Undefined): PromiseFulfillReactionJobTask {
- const nativeContext = LoadNativeContext(handlerContext);
- return new PromiseFulfillReactionJobTask{
- map: PromiseFulfillReactionJobTaskMapConstant(),
- argument,
- context: handlerContext,
- handler,
- promise_or_capability: promiseOrCapability,
- continuation_preserved_embedder_data: nativeContext
- [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
- };
- }
+macro NewPromiseFulfillReactionJobTask(implicit context: Context)(
+ handlerContext: Context, argument: Object, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|
+ Undefined): PromiseFulfillReactionJobTask {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseFulfillReactionJobTask{
+ map: PromiseFulfillReactionJobTaskMapConstant(),
+ argument,
+ context: handlerContext,
+ handler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
+}
- macro NewPromiseRejectReactionJobTask(implicit context: Context)(
- handlerContext: Context, argument: Object, handler: Callable|Undefined,
- promiseOrCapability: JSPromise|PromiseCapability|
- Undefined): PromiseRejectReactionJobTask {
- const nativeContext = LoadNativeContext(handlerContext);
- return new PromiseRejectReactionJobTask{
- map: PromiseRejectReactionJobTaskMapConstant(),
- argument,
- context: handlerContext,
- handler,
- promise_or_capability: promiseOrCapability,
- continuation_preserved_embedder_data: nativeContext
- [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
- };
- }
+macro NewPromiseRejectReactionJobTask(implicit context: Context)(
+ handlerContext: Context, argument: Object, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|
+ Undefined): PromiseRejectReactionJobTask {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseRejectReactionJobTask{
+ map: PromiseRejectReactionJobTaskMapConstant(),
+ argument,
+ context: handlerContext,
+ handler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
+}
- // These allocate and initialize a promise with pending state and
- // undefined fields.
- //
- // This uses the given parent as the parent promise for the promise
- // init hook.
- @export
- transitioning macro NewJSPromise(implicit context: Context)(parent: Object):
- JSPromise {
- const instance = InnerNewJSPromise();
- PromiseInit(instance);
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(instance, parent);
- }
- return instance;
+// These allocate and initialize a promise with pending state and
+// undefined fields.
+//
+// This uses the given parent as the parent promise for the promise
+// init hook.
+@export
+transitioning macro NewJSPromise(implicit context: Context)(parent: Object):
+ JSPromise {
+ const instance = InnerNewJSPromise();
+ PromiseInit(instance);
+ if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
+ runtime::PromiseHookInit(instance, parent);
}
+ return instance;
+}
- // This uses undefined as the parent promise for the promise init
- // hook.
- @export
- transitioning macro NewJSPromise(implicit context: Context)(): JSPromise {
- return NewJSPromise(Undefined);
- }
+// This uses undefined as the parent promise for the promise init
+// hook.
+@export
+transitioning macro NewJSPromise(implicit context: Context)(): JSPromise {
+ return NewJSPromise(Undefined);
+}
- // This allocates and initializes a promise with the given state and
- // fields.
- @export
- transitioning macro NewJSPromise(implicit context: Context)(
- status: constexpr PromiseState, result: JSAny): JSPromise {
- assert(status != PromiseState::kPending);
- assert(kJSPromiseStatusShift == 0);
-
- const instance = InnerNewJSPromise();
- instance.reactions_or_result = result;
- instance.SetStatus(status);
- promise_internal::ZeroOutEmbedderOffsets(instance);
-
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(instance, Undefined);
- }
- return instance;
- }
+// This allocates and initializes a promise with the given state and
+// fields.
+@export
+transitioning macro NewJSPromise(implicit context: Context)(
+ status: constexpr PromiseState, result: JSAny): JSPromise {
+ assert(status != PromiseState::kPending);
- macro NewPromiseReaction(implicit context: Context)(
- handlerContext: Context, next: Zero|PromiseReaction,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined,
- fulfillHandler: Callable|Undefined,
- rejectHandler: Callable|Undefined): PromiseReaction {
- const nativeContext = LoadNativeContext(handlerContext);
- return new PromiseReaction{
- map: PromiseReactionMapConstant(),
- next: next,
- reject_handler: rejectHandler,
- fulfill_handler: fulfillHandler,
- promise_or_capability: promiseOrCapability,
- continuation_preserved_embedder_data: nativeContext
- [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
- };
- }
+ const instance = InnerNewJSPromise();
+ instance.reactions_or_result = result;
+ instance.SetStatus(status);
+ promise_internal::ZeroOutEmbedderOffsets(instance);
- extern macro PromiseResolveThenableJobTaskMapConstant(): Map;
-
- macro NewPromiseResolveThenableJobTask(implicit context: Context)(
- promiseToResolve: JSPromise, then: JSReceiver, thenable: JSReceiver,
- thenContext: Context): PromiseResolveThenableJobTask {
- return new PromiseResolveThenableJobTask{
- map: PromiseResolveThenableJobTaskMapConstant(),
- context: thenContext,
- promise_to_resolve: promiseToResolve,
- then: then,
- thenable: thenable
- };
+ if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
+ runtime::PromiseHookInit(instance, Undefined);
}
+ return instance;
+}
- struct InvokeThenOneArgFunctor {
- transitioning
- macro Call(
- nativeContext: NativeContext, then: JSAny, receiver: JSAny, arg1: JSAny,
- _arg2: JSAny): JSAny {
- return Call(nativeContext, then, receiver, arg1);
- }
- }
+macro NewPromiseReaction(implicit context: Context)(
+ handlerContext: Context, next: Zero|PromiseReaction,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined,
+ fulfillHandler: Callable|Undefined,
+ rejectHandler: Callable|Undefined): PromiseReaction {
+ const nativeContext = LoadNativeContext(handlerContext);
+ return new PromiseReaction{
+ map: PromiseReactionMapConstant(),
+ next: next,
+ reject_handler: rejectHandler,
+ fulfill_handler: fulfillHandler,
+ promise_or_capability: promiseOrCapability,
+ continuation_preserved_embedder_data: nativeContext
+ [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX]
+ };
+}
- struct InvokeThenTwoArgFunctor {
- transitioning
- macro Call(
- nativeContext: NativeContext, then: JSAny, receiver: JSAny, arg1: JSAny,
- arg2: JSAny): JSAny {
- return Call(nativeContext, then, receiver, arg1, arg2);
- }
- }
+extern macro PromiseResolveThenableJobTaskMapConstant(): Map;
- transitioning
- macro InvokeThen<F: type>(implicit context: Context)(
- nativeContext: NativeContext, receiver: JSAny, arg1: JSAny, arg2: JSAny,
- callFunctor: F): JSAny {
- // We can skip the "then" lookup on {receiver} if it's [[Prototype]]
- // is the (initial) Promise.prototype and the Promise#then protector
- // is intact, as that guards the lookup path for the "then" property
- // on JSPromise instances which have the (initial) %PromisePrototype%.
- if (!Is<Smi>(receiver) &&
- IsPromiseThenLookupChainIntact(
- nativeContext, UnsafeCast<HeapObject>(receiver).map)) {
- const then = UnsafeCast<JSAny>(
- nativeContext[NativeContextSlot::PROMISE_THEN_INDEX]);
- return callFunctor.Call(nativeContext, then, receiver, arg1, arg2);
- } else
- deferred {
- const then = UnsafeCast<JSAny>(GetProperty(receiver, kThenString));
- return callFunctor.Call(nativeContext, then, receiver, arg1, arg2);
- }
- }
+// https://tc39.es/ecma262/#sec-newpromiseresolvethenablejob
+macro NewPromiseResolveThenableJobTask(implicit context: Context)(
+ promiseToResolve: JSPromise, thenable: JSReceiver,
+ then: Callable): PromiseResolveThenableJobTask {
+ // 2. Let getThenRealmResult be GetFunctionRealm(then).
+ // 3. If getThenRealmResult is a normal completion, then let thenRealm be
+ // getThenRealmResult.[[Value]].
+ // 4. Otherwise, let thenRealm be null.
+ //
+ // The only cases where |thenRealm| can be null is when |then| is a revoked
+ // Proxy object, which would throw when it is called anyway. So instead of
+ // setting the context to null as the spec does, we just use the current
+ // realm.
+ const thenContext: Context = ExtractHandlerContext(then);
+ const nativeContext = LoadNativeContext(thenContext);
+
+ // 1. Let job be a new Job abstract closure with no parameters that
+ // captures promiseToResolve, thenable, and then...
+ // 5. Return { [[Job]]: job, [[Realm]]: thenRealm }.
+ return new PromiseResolveThenableJobTask{
+ map: PromiseResolveThenableJobTaskMapConstant(),
+ context: nativeContext,
+ promise_to_resolve: promiseToResolve,
+ thenable,
+ then
+ };
+}
+struct InvokeThenOneArgFunctor {
transitioning
- macro InvokeThen(implicit context: Context)(
- nativeContext: NativeContext, receiver: JSAny, arg: JSAny): JSAny {
- return InvokeThen(
- nativeContext, receiver, arg, Undefined, InvokeThenOneArgFunctor{});
+ macro Call(
+ nativeContext: NativeContext, then: JSAny, receiver: JSAny, arg1: JSAny,
+ _arg2: JSAny): JSAny {
+ return Call(nativeContext, then, receiver, arg1);
}
+}
+struct InvokeThenTwoArgFunctor {
transitioning
- macro InvokeThen(implicit context: Context)(
- nativeContext: NativeContext, receiver: JSAny, arg1: JSAny,
+ macro Call(
+ nativeContext: NativeContext, then: JSAny, receiver: JSAny, arg1: JSAny,
arg2: JSAny): JSAny {
- return InvokeThen(
- nativeContext, receiver, arg1, arg2, InvokeThenTwoArgFunctor{});
+ return Call(nativeContext, then, receiver, arg1, arg2);
}
+}
- transitioning
- macro BranchIfAccessCheckFailed(implicit context: Context)(
- nativeContext: NativeContext, promiseConstructor: JSAny,
- executor: JSAny): void labels IfNoAccess {
- try {
- // If executor is a bound function, load the bound function until we've
- // reached an actual function.
- let foundExecutor = executor;
- while (true) {
- typeswitch (foundExecutor) {
- case (f: JSFunction): {
- // Load the context from the function and compare it to the Promise
- // constructor's context. If they match, everything is fine,
- // otherwise, bail out to the runtime.
- const functionContext = f.context;
- const nativeFunctionContext = LoadNativeContext(functionContext);
- if (TaggedEqual(nativeContext, nativeFunctionContext)) {
- goto HasAccess;
- } else {
- goto CallRuntime;
- }
- }
- case (b: JSBoundFunction): {
- foundExecutor = b.bound_target_function;
- }
- case (Object): {
+transitioning
+macro InvokeThen<F: type>(implicit context: Context)(
+ nativeContext: NativeContext, receiver: JSAny, arg1: JSAny, arg2: JSAny,
+ callFunctor: F): JSAny {
+ // We can skip the "then" lookup on {receiver} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ if (!Is<Smi>(receiver) &&
+ IsPromiseThenLookupChainIntact(
+ nativeContext, UnsafeCast<HeapObject>(receiver).map)) {
+ const then =
+ UnsafeCast<JSAny>(nativeContext[NativeContextSlot::PROMISE_THEN_INDEX]);
+ return callFunctor.Call(nativeContext, then, receiver, arg1, arg2);
+ } else
+ deferred {
+ const then = UnsafeCast<JSAny>(GetProperty(receiver, kThenString));
+ return callFunctor.Call(nativeContext, then, receiver, arg1, arg2);
+ }
+}
+
+transitioning
+macro InvokeThen(implicit context: Context)(
+ nativeContext: NativeContext, receiver: JSAny, arg: JSAny): JSAny {
+ return InvokeThen(
+ nativeContext, receiver, arg, Undefined, InvokeThenOneArgFunctor{});
+}
+
+transitioning
+macro InvokeThen(implicit context: Context)(
+ nativeContext: NativeContext, receiver: JSAny, arg1: JSAny,
+ arg2: JSAny): JSAny {
+ return InvokeThen(
+ nativeContext, receiver, arg1, arg2, InvokeThenTwoArgFunctor{});
+}
+
+transitioning
+macro BranchIfAccessCheckFailed(implicit context: Context)(
+ nativeContext: NativeContext, promiseConstructor: JSAny,
+ executor: JSAny): void labels IfNoAccess {
+ try {
+ // If executor is a bound function, load the bound function until we've
+ // reached an actual function.
+ let foundExecutor = executor;
+ while (true) {
+ typeswitch (foundExecutor) {
+ case (f: JSFunction): {
+ // Load the context from the function and compare it to the Promise
+ // constructor's context. If they match, everything is fine,
+ // otherwise, bail out to the runtime.
+ const functionContext = f.context;
+ const nativeFunctionContext = LoadNativeContext(functionContext);
+ if (TaggedEqual(nativeContext, nativeFunctionContext)) {
+ goto HasAccess;
+ } else {
goto CallRuntime;
}
}
+ case (b: JSBoundFunction): {
+ foundExecutor = b.bound_target_function;
+ }
+ case (Object): {
+ goto CallRuntime;
+ }
}
}
- label CallRuntime deferred {
- const result = runtime::AllowDynamicFunction(promiseConstructor);
- if (result != True) {
- goto IfNoAccess;
- }
+ } label CallRuntime deferred {
+ const result = runtime::AllowDynamicFunction(promiseConstructor);
+ if (result != True) {
+ goto IfNoAccess;
}
- label HasAccess {}
- }
+ } label HasAccess {}
+}
}
diff --git a/deps/v8/src/builtins/promise-race.tq b/deps/v8/src/builtins/promise-race.tq
index 7e56a08c84..27d2038398 100644
--- a/deps/v8/src/builtins/promise-race.tq
+++ b/deps/v8/src/builtins/promise-race.tq
@@ -6,126 +6,124 @@
namespace promise {
- extern macro PromiseForwardingHandlerSymbolConstant(): Symbol;
- const kPromiseForwardingHandlerSymbol: Symbol =
- PromiseForwardingHandlerSymbolConstant();
- extern macro PromiseHandledBySymbolConstant(): Symbol;
- const kPromiseHandledBySymbol: Symbol = PromiseHandledBySymbolConstant();
- extern macro ResolveStringConstant(): String;
- const kResolveString: String = ResolveStringConstant();
- extern macro SetPropertyStrict(Context, Object, Object, Object): Object;
- extern macro IsPromiseResolveProtectorCellInvalid(): bool;
+extern macro PromiseForwardingHandlerSymbolConstant(): Symbol;
+const kPromiseForwardingHandlerSymbol: Symbol =
+ PromiseForwardingHandlerSymbolConstant();
+extern macro PromiseHandledBySymbolConstant(): Symbol;
+const kPromiseHandledBySymbol: Symbol = PromiseHandledBySymbolConstant();
+extern macro ResolveStringConstant(): String;
+const kResolveString: String = ResolveStringConstant();
+extern macro SetPropertyStrict(Context, Object, Object, Object): Object;
+extern macro IsPromiseResolveProtectorCellInvalid(): bool;
- macro IsPromiseResolveLookupChainIntact(implicit context: Context)(
- nativeContext: NativeContext, constructor: JSReceiver): bool {
- if (IsForceSlowPath()) return false;
- const promiseFun = UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
- return promiseFun == constructor && !IsPromiseResolveProtectorCellInvalid();
- }
+macro IsPromiseResolveLookupChainIntact(implicit context: Context)(
+ nativeContext: NativeContext, constructor: JSReceiver): bool {
+ if (IsForceSlowPath()) return false;
+ const promiseFun = UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
+ return promiseFun == constructor && !IsPromiseResolveProtectorCellInvalid();
+}
- // https://tc39.es/ecma262/#sec-promise.race
- transitioning javascript builtin
- PromiseRace(js-implicit context: Context, receiver: JSAny)(iterable: JSAny):
- JSAny {
- const receiver = Cast<JSReceiver>(receiver)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Promise.race');
+// https://tc39.es/ecma262/#sec-promise.race
+transitioning javascript builtin
+PromiseRace(
+ js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
+ const receiver = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.race');
- // Let promiseCapability be ? NewPromiseCapability(C).
- // Don't fire debugEvent so that forwarding the rejection through all does
- // not trigger redundant ExceptionEvents
- const capability = NewPromiseCapability(receiver, False);
- const resolve = capability.resolve;
- const reject = capability.reject;
- const promise = capability.promise;
+ // Let promiseCapability be ? NewPromiseCapability(C).
+ // Don't fire debugEvent so that forwarding the rejection through all does
+ // not trigger redundant ExceptionEvents
+ const capability = NewPromiseCapability(receiver, False);
+ const resolve = capability.resolve;
+ const reject = capability.reject;
+ const promise = capability.promise;
- // For catch prediction, don't treat the .then calls as handling it;
- // instead, recurse outwards.
- if (IsDebugActive()) deferred {
- SetPropertyStrict(
- context, reject, kPromiseForwardingHandlerSymbol, True);
- }
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ if (IsDebugActive()) deferred {
+ SetPropertyStrict(context, reject, kPromiseForwardingHandlerSymbol, True);
+ }
+ try {
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
+ let i: iterator::IteratorRecord;
try {
- // Let iterator be GetIterator(iterable).
- // IfAbruptRejectPromise(iterator, promiseCapability).
- let i: iterator::IteratorRecord;
- try {
- i = iterator::GetIterator(iterable);
- } catch (e) deferred {
- goto Reject(e);
- }
+ i = iterator::GetIterator(iterable);
+ } catch (e) deferred {
+ goto Reject(e);
+ }
- // Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
- try {
- // We can skip the "resolve" lookup on {constructor} if it's the
- // Promise constructor and the Promise.resolve protector is intact,
- // as that guards the lookup path for the "resolve" property on the
- // Promise constructor.
- const nativeContext = LoadNativeContext(context);
- let promiseResolveFunction: JSAny = Undefined;
- if (!IsPromiseResolveLookupChainIntact(nativeContext, receiver))
- deferred {
- // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
- const resolve = GetProperty(receiver, kResolveString);
+ // Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
+ try {
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ const nativeContext = LoadNativeContext(context);
+ let promiseResolveFunction: JSAny = Undefined;
+ if (!IsPromiseResolveLookupChainIntact(nativeContext, receiver))
+ deferred {
+ // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
+ const resolve = GetProperty(receiver, kResolveString);
- // 4. If IsCallable(_promiseResolve_) is *false*, throw a
- // *TypeError* exception.
- promiseResolveFunction = Cast<Callable>(resolve)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledNonCallable, 'resolve');
- }
+ // 4. If IsCallable(_promiseResolve_) is *false*, throw a
+ // *TypeError* exception.
+ promiseResolveFunction = Cast<Callable>(resolve)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledNonCallable, 'resolve');
+ }
- const fastIteratorResultMap = UnsafeCast<Map>(
- nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
- while (true) {
- let nextValue: JSAny;
- try {
- // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
- // If next is an abrupt completion, set iteratorRecord.[[Done]] to
- // true. ReturnIfAbrupt(next).
- const next: JSReceiver = iterator::IteratorStep(
- i, fastIteratorResultMap) otherwise return promise;
+ const fastIteratorResultMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
+ while (true) {
+ let nextValue: JSAny;
+ try {
+ // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
+ // If next is an abrupt completion, set iteratorRecord.[[Done]] to
+ // true. ReturnIfAbrupt(next).
+ const next: JSReceiver = iterator::IteratorStep(
+ i, fastIteratorResultMap) otherwise return promise;
- // Let nextValue be IteratorValue(next).
- // If nextValue is an abrupt completion, set iteratorRecord.[[Done]]
- // to true.
- // ReturnIfAbrupt(nextValue).
- nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
- } catch (e) {
- goto Reject(e);
- }
- // Let nextPromise be ? Call(constructor, _promiseResolve_, Ā«
- // nextValue Ā»).
- const nextPromise = CallResolve(
- UnsafeCast<Constructor>(receiver), promiseResolveFunction,
- nextValue);
+ // Let nextValue be IteratorValue(next).
+ // If nextValue is an abrupt completion, set iteratorRecord.[[Done]]
+ // to true.
+ // ReturnIfAbrupt(nextValue).
+ nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
+ } catch (e) {
+ goto Reject(e);
+ }
+ // Let nextPromise be ? Call(constructor, _promiseResolve_, Ā«
+ // nextValue Ā»).
+ const nextPromise = CallResolve(
+ UnsafeCast<Constructor>(receiver), promiseResolveFunction,
+ nextValue);
- // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
- // resultCapability.[[Reject]] Ā»).
- const then = GetProperty(nextPromise, kThenString);
- const thenResult = Call(
- context, then, nextPromise, UnsafeCast<JSAny>(resolve),
- UnsafeCast<JSAny>(reject));
+ // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
+ // resultCapability.[[Reject]] Ā»).
+ const then = GetProperty(nextPromise, kThenString);
+ const thenResult = Call(
+ context, then, nextPromise, UnsafeCast<JSAny>(resolve),
+ UnsafeCast<JSAny>(reject));
- // For catch prediction, mark that rejections here are semantically
- // handled by the combined Promise.
- if (IsDebugActive() && !Is<JSPromise>(promise)) deferred {
- SetPropertyStrict(
- context, thenResult, kPromiseHandledBySymbol, promise);
- }
- }
- } catch (e) deferred {
- iterator::IteratorCloseOnException(i, e) otherwise Reject;
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ if (IsDebugActive() && !Is<JSPromise>(promise)) deferred {
+ SetPropertyStrict(
+ context, thenResult, kPromiseHandledBySymbol, promise);
+ }
}
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(i);
+ goto Reject(e);
}
- label Reject(exception: Object) deferred {
- Call(
- context, UnsafeCast<JSAny>(reject), Undefined,
- UnsafeCast<JSAny>(exception));
- return promise;
- }
- unreachable;
+ } label Reject(exception: Object) deferred {
+ Call(
+ context, UnsafeCast<JSAny>(reject), Undefined,
+ UnsafeCast<JSAny>(exception));
+ return promise;
}
+ unreachable;
+}
}
diff --git a/deps/v8/src/builtins/promise-reaction-job.tq b/deps/v8/src/builtins/promise-reaction-job.tq
index f17886c0d1..1e89da0261 100644
--- a/deps/v8/src/builtins/promise-reaction-job.tq
+++ b/deps/v8/src/builtins/promise-reaction-job.tq
@@ -6,118 +6,118 @@
namespace promise {
- transitioning
- macro RejectPromiseReactionJob(
- context: Context,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, reason: JSAny,
- reactionType: constexpr PromiseReactionType): JSAny {
- if constexpr (reactionType == kPromiseReactionReject) {
- typeswitch (promiseOrCapability) {
- case (promise: JSPromise): {
- // For fast native promises we can skip the indirection via the
- // promiseCapability.[[Reject]] function and run the resolve logic
- // directly from here.
- return RejectPromise(promise, reason, False);
- }
- case (Undefined): {
- return Undefined;
- }
- case (capability: PromiseCapability): {
- // In the general case we need to call the (user provided)
- // promiseCapability.[[Reject]] function.
- const reject = UnsafeCast<Callable>(capability.reject);
- return Call(context, reject, Undefined, reason);
- }
- }
- } else {
- StaticAssert(reactionType == kPromiseReactionFulfill);
- // We have to call out to the dedicated PromiseRejectReactionJob
- // builtin here, instead of just doing the work inline, as otherwise
- // the catch predictions in the debugger will be wrong, which just
- // walks the stack and checks for certain builtins.
- return PromiseRejectReactionJob(reason, Undefined, promiseOrCapability);
- }
- }
-
- transitioning
- macro FuflfillPromiseReactionJob(
- context: Context,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, result: JSAny,
- reactionType: constexpr PromiseReactionType): JSAny {
+transitioning
+macro RejectPromiseReactionJob(
+ context: Context,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, reason: JSAny,
+ reactionType: constexpr PromiseReactionType): JSAny {
+ if constexpr (reactionType == kPromiseReactionReject) {
typeswitch (promiseOrCapability) {
case (promise: JSPromise): {
// For fast native promises we can skip the indirection via the
- // promiseCapability.[[Resolve]] function and run the resolve logic
+ // promiseCapability.[[Reject]] function and run the resolve logic
// directly from here.
- return ResolvePromise(context, promise, result);
+ return RejectPromise(promise, reason, False);
}
case (Undefined): {
return Undefined;
}
case (capability: PromiseCapability): {
// In the general case we need to call the (user provided)
- // promiseCapability.[[Resolve]] function.
- const resolve = UnsafeCast<Callable>(capability.resolve);
- try {
- return Call(context, resolve, Undefined, result);
- } catch (e) {
- return RejectPromiseReactionJob(
- context, promiseOrCapability, e, reactionType);
- }
+ // promiseCapability.[[Reject]] function.
+ const reject = UnsafeCast<Callable>(capability.reject);
+ return Call(context, reject, Undefined, reason);
}
}
+ } else {
+ StaticAssert(reactionType == kPromiseReactionFulfill);
+ // We have to call out to the dedicated PromiseRejectReactionJob
+ // builtin here, instead of just doing the work inline, as otherwise
+ // the catch predictions in the debugger will be wrong, which just
+ // walks the stack and checks for certain builtins.
+ return PromiseRejectReactionJob(reason, Undefined, promiseOrCapability);
}
+}
- // https://tc39.es/ecma262/#sec-promisereactionjob
- transitioning
- macro PromiseReactionJob(
- context: Context, argument: JSAny, handler: Callable|Undefined,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined,
- reactionType: constexpr PromiseReactionType): JSAny {
- if (handler == Undefined) {
- if constexpr (reactionType == kPromiseReactionFulfill) {
- return FuflfillPromiseReactionJob(
- context, promiseOrCapability, argument, reactionType);
- } else {
- StaticAssert(reactionType == kPromiseReactionReject);
- return RejectPromiseReactionJob(
- context, promiseOrCapability, argument, reactionType);
- }
- } else {
+transitioning
+macro FuflfillPromiseReactionJob(
+ context: Context,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, result: JSAny,
+ reactionType: constexpr PromiseReactionType): JSAny {
+ typeswitch (promiseOrCapability) {
+ case (promise: JSPromise): {
+ // For fast native promises we can skip the indirection via the
+ // promiseCapability.[[Resolve]] function and run the resolve logic
+ // directly from here.
+ return ResolvePromise(context, promise, result);
+ }
+ case (Undefined): {
+ return Undefined;
+ }
+ case (capability: PromiseCapability): {
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Resolve]] function.
+ const resolve = UnsafeCast<Callable>(capability.resolve);
try {
- const result =
- Call(context, UnsafeCast<Callable>(handler), Undefined, argument);
- if (promiseOrCapability == Undefined) {
- // There's no [[Capability]] for this promise reaction job, which
- // means that this is a specification-internal operation (aka
- // await) where the result does not matter (see the specification
- // change in https://github.com/tc39/ecma262/pull/1146 for
- // details).
- return Undefined;
- } else {
- return FuflfillPromiseReactionJob(
- context, promiseOrCapability, result, reactionType);
- }
+ return Call(context, resolve, Undefined, result);
} catch (e) {
return RejectPromiseReactionJob(
context, promiseOrCapability, e, reactionType);
}
}
}
+}
- transitioning builtin
- PromiseFulfillReactionJob(implicit context: Context)(
- value: JSAny, handler: Callable|Undefined,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined): JSAny {
- return PromiseReactionJob(
- context, value, handler, promiseOrCapability, kPromiseReactionFulfill);
+// https://tc39.es/ecma262/#sec-promisereactionjob
+transitioning
+macro PromiseReactionJob(
+ context: Context, argument: JSAny, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined,
+ reactionType: constexpr PromiseReactionType): JSAny {
+ if (handler == Undefined) {
+ if constexpr (reactionType == kPromiseReactionFulfill) {
+ return FuflfillPromiseReactionJob(
+ context, promiseOrCapability, argument, reactionType);
+ } else {
+ StaticAssert(reactionType == kPromiseReactionReject);
+ return RejectPromiseReactionJob(
+ context, promiseOrCapability, argument, reactionType);
+ }
+ } else {
+ try {
+ const result =
+ Call(context, UnsafeCast<Callable>(handler), Undefined, argument);
+ if (promiseOrCapability == Undefined) {
+ // There's no [[Capability]] for this promise reaction job, which
+ // means that this is a specification-internal operation (aka
+ // await) where the result does not matter (see the specification
+ // change in https://github.com/tc39/ecma262/pull/1146 for
+ // details).
+ return Undefined;
+ } else {
+ return FuflfillPromiseReactionJob(
+ context, promiseOrCapability, result, reactionType);
+ }
+ } catch (e) {
+ return RejectPromiseReactionJob(
+ context, promiseOrCapability, e, reactionType);
+ }
}
+}
- transitioning builtin
- PromiseRejectReactionJob(implicit context: Context)(
- reason: JSAny, handler: Callable|Undefined,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined): JSAny {
- return PromiseReactionJob(
- context, reason, handler, promiseOrCapability, kPromiseReactionReject);
- }
+transitioning builtin
+PromiseFulfillReactionJob(implicit context: Context)(
+ value: JSAny, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined): JSAny {
+ return PromiseReactionJob(
+ context, value, handler, promiseOrCapability, kPromiseReactionFulfill);
+}
+
+transitioning builtin
+PromiseRejectReactionJob(implicit context: Context)(
+ reason: JSAny, handler: Callable|Undefined,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined): JSAny {
+ return PromiseReactionJob(
+ context, reason, handler, promiseOrCapability, kPromiseReactionReject);
+}
}
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index 0fc98b556b..dbb60720c0 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -5,190 +5,180 @@
#include 'src/builtins/builtins-promise-gen.h'
namespace runtime {
- extern transitioning runtime
- ResolvePromise(implicit context: Context)(JSPromise, JSAny): JSAny;
+extern transitioning runtime
+ResolvePromise(implicit context: Context)(JSPromise, JSAny): JSAny;
}
namespace promise {
- extern macro ConstructorStringConstant(): String;
- const kConstructorString: String = ConstructorStringConstant();
-
- // https://tc39.es/ecma262/#sec-promise.resolve
- transitioning javascript builtin
- PromiseResolveTrampoline(js-implicit context: NativeContext, receiver: JSAny)(
- value: JSAny): JSAny {
- // 1. Let C be the this value.
- // 2. If Type(C) is not Object, throw a TypeError exception.
- const receiver = Cast<JSReceiver>(receiver) otherwise
- ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'PromiseResolve');
-
- // 3. Return ? PromiseResolve(C, x).
- return PromiseResolve(receiver, value);
- }
-
- transitioning builtin
- PromiseResolve(implicit context:
- Context)(constructor: JSReceiver, value: JSAny): JSAny {
- const nativeContext = LoadNativeContext(context);
- const promiseFun = nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX];
- try {
- // Check if {value} is a JSPromise.
- const value = Cast<JSPromise>(value) otherwise NeedToAllocate;
-
- // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
- // is the (initial) Promise.prototype and the @@species protector is
- // intact, as that guards the lookup path for "constructor" on
- // JSPromise instances which have the (initial) Promise.prototype.
- const promisePrototype =
- nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX];
- if (value.map.prototype != promisePrototype) {
- goto SlowConstructor;
- }
-
- if (IsPromiseSpeciesProtectorCellInvalid()) goto SlowConstructor;
+extern macro ConstructorStringConstant(): String;
+const kConstructorString: String = ConstructorStringConstant();
+
+// https://tc39.es/ecma262/#sec-promise.resolve
+transitioning javascript builtin
+PromiseResolveTrampoline(
+ js-implicit context: NativeContext, receiver: JSAny)(value: JSAny): JSAny {
+ // 1. Let C be the this value.
+ // 2. If Type(C) is not Object, throw a TypeError exception.
+ const receiver = Cast<JSReceiver>(receiver) otherwise
+ ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'PromiseResolve');
+
+ // 3. Return ? PromiseResolve(C, x).
+ return PromiseResolve(receiver, value);
+}
- // If the {constructor} is the Promise function, we just immediately
- // return the {value} here and don't bother wrapping it into a
- // native Promise.
- if (promiseFun != constructor) goto SlowConstructor;
- return value;
- }
- label SlowConstructor deferred {
- // At this point, value or/and constructor are not native promises, but
- // they could be of the same subclass.
- const valueConstructor = GetProperty(value, kConstructorString);
- if (valueConstructor != constructor) goto NeedToAllocate;
- return value;
- }
- label NeedToAllocate {
- if (promiseFun == constructor) {
- // This adds a fast path for native promises that don't need to
- // create NewPromiseCapability.
- const result = NewJSPromise();
- ResolvePromise(context, result, value);
- return result;
- } else
- deferred {
- const capability = NewPromiseCapability(constructor, True);
- const resolve = UnsafeCast<Callable>(capability.resolve);
- Call(context, resolve, Undefined, value);
- return capability.promise;
- }
+transitioning builtin
+PromiseResolve(implicit context: Context)(
+ constructor: JSReceiver, value: JSAny): JSAny {
+ const nativeContext = LoadNativeContext(context);
+ const promiseFun = nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX];
+ try {
+ // Check if {value} is a JSPromise.
+ const value = Cast<JSPromise>(value) otherwise NeedToAllocate;
+
+ // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the @@species protector is
+ // intact, as that guards the lookup path for "constructor" on
+ // JSPromise instances which have the (initial) Promise.prototype.
+ const promisePrototype =
+ nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX];
+ if (value.map.prototype != promisePrototype) {
+ goto SlowConstructor;
}
- }
-
- extern macro IsJSReceiverMap(Map): bool;
- extern macro IsPromiseThenProtectorCellInvalid(): bool;
-
- extern macro ThenStringConstant(): String;
-
- const kThenString: String = ThenStringConstant();
-
- transitioning builtin
- ResolvePromise(implicit context:
- Context)(promise: JSPromise, resolution: JSAny): JSAny {
- // 6. If SameValue(resolution, promise) is true, then
- // If promise hook is enabled or the debugger is active, let
- // the runtime handle this operation, which greatly reduces
- // the complexity here and also avoids a couple of back and
- // forth between JavaScript and C++ land.
- // We also let the runtime handle it if promise == resolution.
- // We can use pointer comparison here, since the {promise} is guaranteed
- // to be a JSPromise inside this function and thus is reference comparable.
- if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
- TaggedEqual(promise, resolution))
+ if (IsPromiseSpeciesProtectorCellInvalid()) goto SlowConstructor;
+
+ // If the {constructor} is the Promise function, we just immediately
+ // return the {value} here and don't bother wrapping it into a
+ // native Promise.
+ if (promiseFun != constructor) goto SlowConstructor;
+ return value;
+ } label SlowConstructor deferred {
+ // At this point, value or/and constructor are not native promises, but
+ // they could be of the same subclass.
+ const valueConstructor = GetProperty(value, kConstructorString);
+ if (valueConstructor != constructor) goto NeedToAllocate;
+ return value;
+ } label NeedToAllocate {
+ if (promiseFun == constructor) {
+ // This adds a fast path for native promises that don't need to
+ // create NewPromiseCapability.
+ const result = NewJSPromise();
+ ResolvePromise(context, result, value);
+ return result;
+ } else
deferred {
- return runtime::ResolvePromise(promise, resolution);
- }
-
- let then: Object = Undefined;
- try {
- // 7. If Type(resolution) is not Object, then
- // 7.b Return FulfillPromise(promise, resolution).
- if (TaggedIsSmi(resolution)) {
- return FulfillPromise(promise, resolution);
+ const capability = NewPromiseCapability(constructor, True);
+ const resolve = UnsafeCast<Callable>(capability.resolve);
+ Call(context, resolve, Undefined, value);
+ return capability.promise;
}
+ }
+}
- const heapResolution = UnsafeCast<HeapObject>(resolution);
- const resolutionMap = heapResolution.map;
- if (!IsJSReceiverMap(resolutionMap)) {
- return FulfillPromise(promise, resolution);
- }
+extern macro IsJSReceiverMap(Map): bool;
+
+extern macro IsPromiseThenProtectorCellInvalid(): bool;
+
+extern macro ThenStringConstant(): String;
+
+const kThenString: String = ThenStringConstant();
+
+// https://tc39.es/ecma262/#sec-promise-resolve-functions
+transitioning builtin
+ResolvePromise(implicit context: Context)(
+ promise: JSPromise, resolution: JSAny): JSAny {
+ // 7. If SameValue(resolution, promise) is true, then
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ // We also let the runtime handle it if promise == resolution.
+ // We can use pointer comparison here, since the {promise} is guaranteed
+ // to be a JSPromise inside this function and thus is reference comparable.
+ if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ TaggedEqual(promise, resolution))
+ deferred {
+ return runtime::ResolvePromise(promise, resolution);
+ }
- // We can skip the "then" lookup on {resolution} if its [[Prototype]]
- // is the (initial) Promise.prototype and the Promise#then protector
- // is intact, as that guards the lookup path for the "then" property
- // on JSPromise instances which have the (initial) %PromisePrototype%.
- if (IsForceSlowPath()) {
- goto Slow;
- }
+ let then: Object = Undefined;
+ try {
+ // 8. If Type(resolution) is not Object, then
+ // 8.a Return FulfillPromise(promise, resolution).
+ if (TaggedIsSmi(resolution)) {
+ return FulfillPromise(promise, resolution);
+ }
- if (IsPromiseThenProtectorCellInvalid()) {
- goto Slow;
- }
+ const heapResolution = UnsafeCast<HeapObject>(resolution);
+ const resolutionMap = heapResolution.map;
+ if (!IsJSReceiverMap(resolutionMap)) {
+ return FulfillPromise(promise, resolution);
+ }
- const nativeContext = LoadNativeContext(context);
- if (!IsJSPromiseMap(resolutionMap)) {
- // We can skip the lookup of "then" if the {resolution} is a (newly
- // created) IterResultObject, as the Promise#then() protector also
- // ensures that the intrinsic %ObjectPrototype% doesn't contain any
- // "then" property. This helps to avoid negative lookups on iterator
- // results from async generators.
- assert(IsJSReceiverMap(resolutionMap));
- assert(!IsPromiseThenProtectorCellInvalid());
- if (resolutionMap ==
- nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]) {
- return FulfillPromise(promise, resolution);
- } else {
- goto Slow;
- }
- }
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ if (IsForceSlowPath()) {
+ goto Slow;
+ }
- const promisePrototype =
- nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX];
- if (resolutionMap.prototype == promisePrototype) {
- // The {resolution} is a native Promise in this case.
- then = nativeContext[NativeContextSlot::PROMISE_THEN_INDEX];
- goto Enqueue;
- }
+ if (IsPromiseThenProtectorCellInvalid()) {
goto Slow;
}
- label Slow deferred {
- // 8. Let then be Get(resolution, "then").
- // 9. If then is an abrupt completion, then
- // 9.a Return RejectPromise(promise, then.[[Value]]).
- try {
- then = GetProperty(resolution, kThenString);
- } catch (e) {
- return RejectPromise(promise, e, False);
- }
- // 11. If IsCallable(thenAction) is false, then
- if (TaggedIsSmi(then)) {
+ const nativeContext = LoadNativeContext(context);
+ if (!IsJSPromiseMap(resolutionMap)) {
+ // We can skip the lookup of "then" if the {resolution} is a (newly
+ // created) IterResultObject, as the Promise#then() protector also
+ // ensures that the intrinsic %ObjectPrototype% doesn't contain any
+ // "then" property. This helps to avoid negative lookups on iterator
+ // results from async generators.
+ assert(IsJSReceiverMap(resolutionMap));
+ assert(!IsPromiseThenProtectorCellInvalid());
+ if (resolutionMap ==
+ nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]) {
return FulfillPromise(promise, resolution);
+ } else {
+ goto Slow;
}
+ }
- if (!IsCallable(UnsafeCast<HeapObject>(then))) {
- return FulfillPromise(promise, resolution);
- }
+ const promisePrototype =
+ nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX];
+ if (resolutionMap.prototype == promisePrototype) {
+ // The {resolution} is a native Promise in this case.
+ then = nativeContext[NativeContextSlot::PROMISE_THEN_INDEX];
goto Enqueue;
}
- label Enqueue {
- // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
- // Ā«promise, resolution, thenActionĀ»).
-
- // According to HTML, we use the context of the then function
- // (|thenAction|) as the context of the microtask. See step 3 of HTML's
- // EnqueueJob:
- // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
- const thenContext: Context =
- ExtractHandlerContext(UnsafeCast<Callable>(then));
- const nativeContext = LoadNativeContext(thenContext);
- const task = NewPromiseResolveThenableJobTask(
- promise, UnsafeCast<JSReceiver>(then),
- UnsafeCast<JSReceiver>(resolution), nativeContext);
- return EnqueueMicrotask(nativeContext, task);
+ goto Slow;
+ } label Slow deferred {
+ // 9. Let then be Get(resolution, "then").
+ // 10. If then is an abrupt completion, then
+ try {
+ then = GetProperty(resolution, kThenString);
+ } catch (e) {
+ // a. Return RejectPromise(promise, then.[[Value]]).
+ return RejectPromise(promise, e, False);
}
+
+ // 11. Let thenAction be then.[[Value]].
+ // 12. If IsCallable(thenAction) is false, then
+ if (!Is<Callable>(then)) {
+ // a. Return FulfillPromise(promise, resolution).
+ return FulfillPromise(promise, resolution);
+ }
+ goto Enqueue;
+ } label Enqueue {
+ // 13. Let job be NewPromiseResolveThenableJob(promise, resolution,
+ // thenAction).
+ const task = NewPromiseResolveThenableJobTask(
+ promise, UnsafeCast<JSReceiver>(resolution),
+ UnsafeCast<Callable>(then));
+
+ // 14. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]).
+ // 15. Return undefined.
+ return EnqueueMicrotask(task.context, task);
}
}
+}
diff --git a/deps/v8/src/builtins/promise-then.tq b/deps/v8/src/builtins/promise-then.tq
index 45f8fd0c81..3de6d277d8 100644
--- a/deps/v8/src/builtins/promise-then.tq
+++ b/deps/v8/src/builtins/promise-then.tq
@@ -6,74 +6,69 @@
namespace promise {
- macro
- IsPromiseSpeciesLookupChainIntact(
- nativeContext: NativeContext, promiseMap: Map): bool {
- const promisePrototype =
- nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX];
- if (IsForceSlowPath()) return false;
- if (promiseMap.prototype != promisePrototype) return false;
- return !IsPromiseSpeciesProtectorCellInvalid();
- }
-
- // https://tc39.es/ecma262/#sec-promise.prototype.then
- transitioning javascript builtin
- PromisePrototypeThen(js-implicit context: NativeContext, receiver: JSAny)(
- onFulfilled: JSAny, onRejected: JSAny): JSAny {
- // 1. Let promise be the this value.
- // 2. If IsPromise(promise) is false, throw a TypeError exception.
- const promise = Cast<JSPromise>(receiver) otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, 'Promise.prototype.then',
- receiver);
+macro
+IsPromiseSpeciesLookupChainIntact(
+ nativeContext: NativeContext, promiseMap: Map): bool {
+ const promisePrototype =
+ nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX];
+ if (IsForceSlowPath()) return false;
+ if (promiseMap.prototype != promisePrototype) return false;
+ return !IsPromiseSpeciesProtectorCellInvalid();
+}
- // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
- const promiseFun = UnsafeCast<JSFunction>(
- context[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
+// https://tc39.es/ecma262/#sec-promise.prototype.then
+transitioning javascript builtin
+PromisePrototypeThen(js-implicit context: NativeContext, receiver: JSAny)(
+ onFulfilled: JSAny, onRejected: JSAny): JSAny {
+ // 1. Let promise be the this value.
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ const promise = Cast<JSPromise>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, 'Promise.prototype.then',
+ receiver);
- // 4. Let resultCapability be ? NewPromiseCapability(C).
- let resultPromiseOrCapability: JSPromise|PromiseCapability;
- let resultPromise: JSAny;
- try {
- if (IsPromiseSpeciesLookupChainIntact(context, promise.map)) {
- goto AllocateAndInit;
- }
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ const promiseFun = UnsafeCast<JSFunction>(
+ context[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
- const constructor = SpeciesConstructor(promise, promiseFun);
- if (TaggedEqual(constructor, promiseFun)) {
- goto AllocateAndInit;
- } else {
- const promiseCapability = NewPromiseCapability(constructor, True);
- resultPromiseOrCapability = promiseCapability;
- resultPromise = promiseCapability.promise;
- }
+ // 4. Let resultCapability be ? NewPromiseCapability(C).
+ let resultPromiseOrCapability: JSPromise|PromiseCapability;
+ let resultPromise: JSAny;
+ try {
+ if (IsPromiseSpeciesLookupChainIntact(context, promise.map)) {
+ goto AllocateAndInit;
}
- label AllocateAndInit {
- const resultJSPromise = NewJSPromise(promise);
- resultPromiseOrCapability = resultJSPromise;
- resultPromise = resultJSPromise;
+
+ const constructor = SpeciesConstructor(promise, promiseFun);
+ if (TaggedEqual(constructor, promiseFun)) {
+ goto AllocateAndInit;
+ } else {
+ const promiseCapability = NewPromiseCapability(constructor, True);
+ resultPromiseOrCapability = promiseCapability;
+ resultPromise = promiseCapability.promise;
}
+ } label AllocateAndInit {
+ const resultJSPromise = NewJSPromise(promise);
+ resultPromiseOrCapability = resultJSPromise;
+ resultPromise = resultJSPromise;
+ }
- // We do some work of the PerformPromiseThen operation here, in that
- // we check the handlers and turn non-callable handlers into undefined.
- // This is because this is the one and only callsite of PerformPromiseThen
- // that has to do this.
+ // We do some work of the PerformPromiseThen operation here, in that
+ // we check the handlers and turn non-callable handlers into undefined.
+ // This is because this is the one and only callsite of PerformPromiseThen
+ // that has to do this.
- // 3. If IsCallable(onFulfilled) is false, then
- // a. Set onFulfilled to undefined.
- const onFulfilled = TaggedIsCallable(onFulfilled) ?
- UnsafeCast<Callable>(onFulfilled) :
- Undefined;
+ // 3. If IsCallable(onFulfilled) is false, then
+ // a. Set onFulfilled to undefined.
+ const onFulfilled = CastOrDefault<Callable>(onFulfilled, Undefined);
- // 4. If IsCallable(onRejected) is false, then
- // a. Set onRejected to undefined.
- const onRejected = TaggedIsCallable(onRejected) ?
- UnsafeCast<Callable>(onRejected) :
- Undefined;
+ // 4. If IsCallable(onRejected) is false, then
+ // a. Set onRejected to undefined.
+ const onRejected = CastOrDefault<Callable>(onRejected, Undefined);
- // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
- // resultCapability).
- PerformPromiseThenImpl(
- promise, onFulfilled, onRejected, resultPromiseOrCapability);
- return resultPromise;
- }
+ // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+ // resultCapability).
+ PerformPromiseThenImpl(
+ promise, onFulfilled, onRejected, resultPromiseOrCapability);
+ return resultPromise;
+}
}
diff --git a/deps/v8/src/builtins/proxy-constructor.tq b/deps/v8/src/builtins/proxy-constructor.tq
index ef886e4f28..ea31ff6db8 100644
--- a/deps/v8/src/builtins/proxy-constructor.tq
+++ b/deps/v8/src/builtins/proxy-constructor.tq
@@ -6,55 +6,40 @@
namespace proxy {
- // ES #sec-proxy-constructor
- // https://tc39.github.io/ecma262/#sec-proxy-constructor
- transitioning javascript builtin
- ProxyConstructor(
- js-implicit context: NativeContext, receiver: JSAny,
- newTarget: JSAny)(target: JSAny, handler: JSAny): JSProxy {
- try {
- // 1. If NewTarget is undefined, throw a TypeError exception.
- if (newTarget == Undefined) {
- ThrowTypeError(MessageTemplate::kConstructorNotFunction, 'Proxy');
- }
-
- // 2. Return ? ProxyCreate(target, handler).
- // https://tc39.github.io/ecma262/#sec-proxycreate
- // 1. If Type(target) is not Object, throw a TypeError exception.
- // 2. If target is a Proxy exotic object and target.[[ProxyHandler]] is
- // null, throw a TypeError exception.
- // 3. If Type(handler) is not Object, throw a TypeError exception.
- // 4. If handler is a Proxy exotic object and handler.[[ProxyHandler]]
- // is null, throw a TypeError exception.
- const targetJSReceiver =
- Cast<JSReceiver>(target) otherwise ThrowProxyNonObject;
- if (IsRevokedProxy(targetJSReceiver)) {
- goto ThrowProxyHandlerOrTargetRevoked;
- }
+// ES #sec-proxy-constructor
+// https://tc39.github.io/ecma262/#sec-proxy-constructor
+transitioning javascript builtin
+ProxyConstructor(
+ js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny)(
+ target: JSAny, handler: JSAny): JSProxy {
+ try {
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (newTarget == Undefined) {
+ ThrowTypeError(MessageTemplate::kConstructorNotFunction, 'Proxy');
+ }
- const handlerJSReceiver =
- Cast<JSReceiver>(handler) otherwise ThrowProxyNonObject;
- if (IsRevokedProxy(handlerJSReceiver)) {
- goto ThrowProxyHandlerOrTargetRevoked;
- }
+ // 2. Return ? ProxyCreate(target, handler).
+ // https://tc39.github.io/ecma262/#sec-proxycreate
+ // 1. If Type(target) is not Object, throw a TypeError exception.
+ // 2. If Type(handler) is not Object, throw a TypeError exception.
+ const targetJSReceiver =
+ Cast<JSReceiver>(target) otherwise ThrowProxyNonObject;
+ const handlerJSReceiver =
+ Cast<JSReceiver>(handler) otherwise ThrowProxyNonObject;
- // 5. Let P be a newly created object.
- // 6. Set P's essential internal methods (except for [[Call]] and
- // [[Construct]]) to the definitions specified in 9.5.
- // 7. If IsCallable(target) is true, then
- // a. Set P.[[Call]] as specified in 9.5.12.
- // b. If IsConstructor(target) is true, then
- // 1. Set P.[[Construct]] as specified in 9.5.13.
- // 8. Set P.[[ProxyTarget]] to target.
- // 9. Set P.[[ProxyHandler]] to handler.
- // 10. Return P.
- return AllocateProxy(targetJSReceiver, handlerJSReceiver);
- }
- label ThrowProxyNonObject deferred {
- ThrowTypeError(MessageTemplate::kProxyNonObject);
- }
- label ThrowProxyHandlerOrTargetRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked);
- }
+ // 5. Let P be a newly created object.
+ // 6. Set P's essential internal methods (except for [[Call]] and
+ // [[Construct]]) to the definitions specified in 9.5.
+ // 7. If IsCallable(target) is true, then
+ // a. Set P.[[Call]] as specified in 9.5.12.
+ // b. If IsConstructor(target) is true, then
+ // 1. Set P.[[Construct]] as specified in 9.5.13.
+ // 8. Set P.[[ProxyTarget]] to target.
+ // 9. Set P.[[ProxyHandler]] to handler.
+ // 10. Return P.
+ return AllocateProxy(targetJSReceiver, handlerJSReceiver);
+ } label ThrowProxyNonObject deferred {
+ ThrowTypeError(MessageTemplate::kProxyNonObject);
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq
index b068f3afb1..45914a6ed5 100644
--- a/deps/v8/src/builtins/proxy-delete-property.tq
+++ b/deps/v8/src/builtins/proxy-delete-property.tq
@@ -6,66 +6,64 @@
namespace proxy {
- // ES #sec-proxy-object-internal-methods-and-internal-slots-delete-p
- // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p
- transitioning builtin
- ProxyDeleteProperty(implicit context: Context)(
- proxy: JSProxy, name: PropertyKey, languageMode: LanguageModeSmi): JSAny {
- const kTrapName: constexpr string = 'deleteProperty';
- // Handle deeply nested proxy.
- PerformStackCheck();
- // 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(IsName(name));
- assert(!IsPrivateSymbol(name));
+// ES #sec-proxy-object-internal-methods-and-internal-slots-delete-p
+// https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p
+transitioning builtin
+ProxyDeleteProperty(implicit context: Context)(
+ proxy: JSProxy, name: PropertyKey, languageMode: LanguageModeSmi): JSAny {
+ const kTrapName: constexpr string = 'deleteProperty';
+ // Handle deeply nested proxy.
+ PerformStackCheck();
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
- try {
- // 2. Let handler be O.[[ProxyHandler]].
- // 3. If handler is null, throw a TypeError exception.
- // 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+ try {
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
- // 5. Let target be O.[[ProxyTarget]].
- const target = UnsafeCast<JSReceiver>(proxy.target);
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = UnsafeCast<JSReceiver>(proxy.target);
- // 6. Let trap be ? GetMethod(handler, "deleteProperty").
- // 7. If trap is undefined, then (see 7.a below).
- const trap: Callable = GetMethod(handler, kTrapName)
- otherwise goto TrapUndefined(target);
+ // 6. Let trap be ? GetMethod(handler, "deleteProperty").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
- // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
- // Ā« target, P Ā»)).
- const trapResult = Call(context, trap, handler, target, name);
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
+ // Ā« target, P Ā»)).
+ const trapResult = Call(context, trap, handler, target, name);
- // 9. If booleanTrapResult is false, return false.
- if (!ToBoolean(trapResult)) {
- const strictValue: LanguageModeSmi = LanguageMode::kStrict;
- if (languageMode == strictValue) {
- ThrowTypeError(
- MessageTemplate::kProxyTrapReturnedFalsishFor, kTrapName, name);
- }
- return False;
+ // 9. If booleanTrapResult is false, return false.
+ if (!ToBoolean(trapResult)) {
+ const strictValue: LanguageModeSmi = LanguageMode::kStrict;
+ if (languageMode == strictValue) {
+ ThrowTypeError(
+ MessageTemplate::kProxyTrapReturnedFalsishFor, kTrapName, name);
}
+ return False;
+ }
- // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
- // 11. If targetDesc is undefined, return true.
- // 12. If targetDesc.[[Configurable]] is false, throw a TypeError
- // exception.
- // 13. Let extensibleTarget be ? IsExtensible(target).
- // 14. If extensibleTarget is false, throw a TypeError exception.
- CheckDeleteTrapResult(target, proxy, name);
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 11. If targetDesc is undefined, return true.
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ CheckDeleteTrapResult(target, proxy, name);
- // 15. Return true.
- return True;
- }
- label TrapUndefined(target: JSAny) {
- // 7.a. Return ? target.[[Delete]](P).
- return DeleteProperty(target, name, languageMode);
- }
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
- }
+ // 15. Return true.
+ return True;
+ } label TrapUndefined(target: JSAny) {
+ // 7.a. Return ? target.[[Delete]](P).
+ return DeleteProperty(target, name, languageMode);
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq
index 7138648a85..2d6a1edee6 100644
--- a/deps/v8/src/builtins/proxy-get-property.tq
+++ b/deps/v8/src/builtins/proxy-get-property.tq
@@ -6,60 +6,59 @@
namespace proxy {
- extern transitioning builtin GetPropertyWithReceiver(
- implicit context: Context)(JSAny, Name, JSAny, Smi): JSAny;
+extern transitioning builtin GetPropertyWithReceiver(implicit context: Context)(
+ JSAny, Name, JSAny, Smi): JSAny;
- // ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
- // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
- transitioning builtin
- ProxyGetProperty(implicit context: Context)(
- proxy: JSProxy, name: PropertyKey, receiverValue: JSAny,
- onNonExistent: Smi): JSAny {
- PerformStackCheck();
- // 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(IsName(name));
- assert(!IsPrivateSymbol(name));
+// ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
+// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
+transitioning builtin
+ProxyGetProperty(implicit context: Context)(
+ proxy: JSProxy, name: PropertyKey, receiverValue: JSAny,
+ onNonExistent: Smi): JSAny {
+ PerformStackCheck();
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
- // 2. Let handler be O.[[ProxyHandler]].
- // 3. If handler is null, throw a TypeError exception.
- // 4. Assert: Type(handler) is Object.
- let handler: JSReceiver;
- typeswitch (proxy.handler) {
- case (Null): {
- ThrowTypeError(MessageTemplate::kProxyRevoked, 'get');
- }
- case (h: JSReceiver): {
- handler = h;
- }
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ let handler: JSReceiver;
+ typeswitch (proxy.handler) {
+ case (Null): {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, 'get');
}
+ case (h: JSReceiver): {
+ handler = h;
+ }
+ }
- // 5. Let target be O.[[ProxyTarget]].
- const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
- // 6. Let trap be ? GetMethod(handler, "get").
- // 7. If trap is undefined, then (see 7.a below).
- // 7.a. Return ? target.[[Get]](P, Receiver).
- const trap: Callable = GetMethod(handler, 'get')
- otherwise return GetPropertyWithReceiver(
- target, name, receiverValue, onNonExistent);
+ // 6. Let trap be ? GetMethod(handler, "get").
+ // 7. If trap is undefined, then (see 7.a below).
+ // 7.a. Return ? target.[[Get]](P, Receiver).
+ const trap: Callable = GetMethod(handler, 'get')
+ otherwise return GetPropertyWithReceiver(
+ target, name, receiverValue, onNonExistent);
- // 8. Let trapResult be ? Call(trap, handler, Ā« target, P, Receiver Ā»).
- const trapResult =
- Call(context, trap, handler, target, name, receiverValue);
+ // 8. Let trapResult be ? Call(trap, handler, Ā« target, P, Receiver Ā»).
+ const trapResult = Call(context, trap, handler, target, name, receiverValue);
- // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
- // 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is
- // false, then
- // a. If IsDataDescriptor(targetDesc) is true and targetDesc.[[Writable]]
- // is false, then
- // i. If SameValue(trapResult, targetDesc.[[Value]]) is false, throw a
- // TypeError exception.
- // b. If IsAccessorDescriptor(targetDesc) is true and targetDesc.[[Get]]
- // is undefined, then
- // i. If trapResult is not undefined, throw a TypeError exception.
- // 11. Return trapResult.
- CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
- return trapResult;
- }
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is
+ // false, then
+ // a. If IsDataDescriptor(targetDesc) is true and targetDesc.[[Writable]]
+ // is false, then
+ // i. If SameValue(trapResult, targetDesc.[[Value]]) is false, throw a
+ // TypeError exception.
+ // b. If IsAccessorDescriptor(targetDesc) is true and targetDesc.[[Get]]
+ // is undefined, then
+ // i. If trapResult is not undefined, throw a TypeError exception.
+ // 11. Return trapResult.
+ CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
+ return trapResult;
+}
}
diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq
index 8c556e27c2..152489ecb6 100644
--- a/deps/v8/src/builtins/proxy-get-prototype-of.tq
+++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq
@@ -6,65 +6,62 @@
namespace proxy {
- // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
- // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
- transitioning builtin
- ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
- PerformStackCheck();
- const kTrapName: constexpr string = 'getPrototypeOf';
- try {
- // 1. Let handler be O.[[ProxyHandler]].
- // 2. If handler is null, throw a TypeError exception.
- // 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+// ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
+// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
+transitioning builtin
+ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'getPrototypeOf';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
- // 4. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
- // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
- // 6. If trap is undefined, then (see 6.a below).
- const trap: Callable = GetMethod(handler, kTrapName)
- otherwise goto TrapUndefined(target);
+ // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
- // 7. Let handlerProto be ? Call(trap, handler, Ā« target Ā»).
- const handlerProto = Call(context, trap, handler, target);
+ // 7. Let handlerProto be ? Call(trap, handler, Ā« target Ā»).
+ const handlerProto = Call(context, trap, handler, target);
- // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError
- // exception.
- if (!Is<JSReceiver>(handlerProto) && handlerProto != Null) {
- goto ThrowProxyGetPrototypeOfInvalid;
- }
+ // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError
+ // exception.
+ if (!Is<JSReceiver>(handlerProto) && handlerProto != Null) {
+ goto ThrowProxyGetPrototypeOfInvalid;
+ }
- // 9. Let extensibleTarget be ? IsExtensible(target).
- // 10. If extensibleTarget is true, return handlerProto.
- const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
- if (extensibleTarget == True) {
- return handlerProto;
- }
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ // 10. If extensibleTarget is true, return handlerProto.
+ const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ return handlerProto;
+ }
- // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
- const targetProto = object::ObjectGetPrototypeOfImpl(target);
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ const targetProto = object::ObjectGetPrototypeOfImpl(target);
- // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError
- // exception.
- // 13. Return handlerProto.
- if (SameValue(targetProto, handlerProto)) {
- return handlerProto;
- }
- ThrowTypeError(MessageTemplate::kProxyGetPrototypeOfNonExtensible);
- }
- label TrapUndefined(target: JSAny) {
- // 6.a. Return ? target.[[GetPrototypeOf]]().
- return object::ObjectGetPrototypeOfImpl(target);
- }
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
- }
- label ThrowProxyGetPrototypeOfInvalid deferred {
- ThrowTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid);
+ // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError
+ // exception.
+ // 13. Return handlerProto.
+ if (SameValue(targetProto, handlerProto)) {
+ return handlerProto;
}
+ ThrowTypeError(MessageTemplate::kProxyGetPrototypeOfNonExtensible);
+ } label TrapUndefined(target: JSAny) {
+ // 6.a. Return ? target.[[GetPrototypeOf]]().
+ return object::ObjectGetPrototypeOfImpl(target);
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
+ } label ThrowProxyGetPrototypeOfInvalid deferred {
+ ThrowTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid);
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq
index 2863146667..488f6fabb3 100644
--- a/deps/v8/src/builtins/proxy-has-property.tq
+++ b/deps/v8/src/builtins/proxy-has-property.tq
@@ -6,52 +6,50 @@
namespace proxy {
- // ES #sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
- // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
- transitioning builtin ProxyHasProperty(implicit context: Context)(
- proxy: JSProxy, name: PropertyKey): JSAny {
- assert(IsJSProxy(proxy));
-
- PerformStackCheck();
-
- // 1. Assert: IsPropertyKey(P) is true.
- assert(IsName(name));
- assert(!IsPrivateSymbol(name));
-
- try {
- // 2. Let handler be O.[[ProxyHandler]].
- // 3. If handler is null, throw a TypeError exception.
- // 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
-
- // 5. Let target be O.[[ProxyTarget]].
- const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
-
- // 6. Let trap be ? GetMethod(handler, "has").
- // 7. If trap is undefined, then (see 7.a below).
- const trap: Callable = GetMethod(handler, 'has')
- otherwise goto TrapUndefined(target);
-
- // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā«
- // targetĀ»)).
- // 9. If booleanTrapResult is false, then (see 9.a. in
- // CheckHasTrapResult).
- // 10. Return booleanTrapResult.
- const trapResult = Call(context, trap, handler, target, name);
- if (ToBoolean(trapResult)) {
- return True;
- }
- CheckHasTrapResult(target, proxy, name);
- return False;
- }
- label TrapUndefined(target: JSAny) {
- // 7.a. Return ? target.[[HasProperty]](P).
- tail HasProperty(target, name);
- }
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, 'has');
+// ES #sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
+// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
+transitioning builtin ProxyHasProperty(implicit context: Context)(
+ proxy: JSProxy, name: PropertyKey): JSAny {
+ assert(IsJSProxy(proxy));
+
+ PerformStackCheck();
+
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
+
+ try {
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
+
+ // 6. Let trap be ? GetMethod(handler, "has").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, 'has')
+ otherwise goto TrapUndefined(target);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā«
+ // targetĀ»)).
+ // 9. If booleanTrapResult is false, then (see 9.a. in
+ // CheckHasTrapResult).
+ // 10. Return booleanTrapResult.
+ const trapResult = Call(context, trap, handler, target, name);
+ if (ToBoolean(trapResult)) {
+ return True;
}
+ CheckHasTrapResult(target, proxy, name);
+ return False;
+ } label TrapUndefined(target: JSAny) {
+ // 7.a. Return ? target.[[HasProperty]](P).
+ tail HasProperty(target, name);
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, 'has');
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq
index 9c0d45c529..a7c2c56d44 100644
--- a/deps/v8/src/builtins/proxy-is-extensible.tq
+++ b/deps/v8/src/builtins/proxy-is-extensible.tq
@@ -6,52 +6,50 @@
namespace proxy {
- // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
- // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
- transitioning builtin ProxyIsExtensible(implicit context:
- Context)(proxy: JSProxy): JSAny {
- PerformStackCheck();
- const kTrapName: constexpr string = 'isExtensible';
- try {
- // 1. Let handler be O.[[ProxyHandler]].
- // 2. If handler is null, throw a TypeError exception.
- // 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
-
- // 4. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
-
- // 5. Let trap be ? GetMethod(handler, "isExtensible").
- // 6. If trap is undefined, then (see 6.a below).
- const trap: Callable = GetMethod(handler, kTrapName)
- otherwise goto TrapUndefined(target);
-
- // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā«
- // targetĀ»)).
- const trapResult = ToBoolean(Call(context, trap, handler, target));
-
- // 8. Let targetResult be ? IsExtensible(target).
- const targetResult: bool =
- ToBoolean(object::ObjectIsExtensibleImpl(target));
-
- // 9. If SameValue(booleanTrapResult, targetResult) is false, throw a
- // TypeError exception.
- if (trapResult != targetResult) {
- ThrowTypeError(
- MessageTemplate::kProxyIsExtensibleInconsistent,
- SelectBooleanConstant(targetResult));
- }
- // 10. Return booleanTrapResult.
- return SelectBooleanConstant(trapResult);
- }
- label TrapUndefined(target: JSAny) {
- // 6.a. Return ? IsExtensible(target).
- return object::ObjectIsExtensibleImpl(target);
- }
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
+// ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
+// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
+transitioning builtin ProxyIsExtensible(implicit context: Context)(
+ proxy: JSProxy): JSAny {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'isExtensible';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "isExtensible").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā«
+ // targetĀ»)).
+ const trapResult = ToBoolean(Call(context, trap, handler, target));
+
+ // 8. Let targetResult be ? IsExtensible(target).
+ const targetResult: bool =
+ ToBoolean(object::ObjectIsExtensibleImpl(target));
+
+ // 9. If SameValue(booleanTrapResult, targetResult) is false, throw a
+ // TypeError exception.
+ if (trapResult != targetResult) {
+ ThrowTypeError(
+ MessageTemplate::kProxyIsExtensibleInconsistent,
+ SelectBooleanConstant(targetResult));
}
+ // 10. Return booleanTrapResult.
+ return SelectBooleanConstant(trapResult);
+ } label TrapUndefined(target: JSAny) {
+ // 6.a. Return ? IsExtensible(target).
+ return object::ObjectIsExtensibleImpl(target);
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq
index 10bd1f45f0..a5a3d93da4 100644
--- a/deps/v8/src/builtins/proxy-prevent-extensions.tq
+++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq
@@ -6,61 +6,59 @@
namespace proxy {
- // ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions
- // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions
- transitioning builtin
- ProxyPreventExtensions(implicit context:
- Context)(proxy: JSProxy, doThrow: Boolean): JSAny {
- PerformStackCheck();
- const kTrapName: constexpr string = 'preventExtensions';
- try {
- // 1. Let handler be O.[[ProxyHandler]].
- // 2. If handler is null, throw a TypeError exception.
- // 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+// ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions
+// https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions
+transitioning builtin
+ProxyPreventExtensions(implicit context: Context)(
+ proxy: JSProxy, doThrow: Boolean): JSAny {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'preventExtensions';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
- // 4. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
- // 5. Let trap be ? GetMethod(handler, "preventExtensions").
- // 6. If trap is undefined, then (see 6.a below).
- const trap: Callable = GetMethod(handler, kTrapName)
- otherwise goto TrapUndefined(target);
+ // 5. Let trap be ? GetMethod(handler, "preventExtensions").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
- // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā«
- // targetĀ»)).
- const trapResult = Call(context, trap, handler, target);
+ // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā«
+ // targetĀ»)).
+ const trapResult = Call(context, trap, handler, target);
- // 8. If booleanTrapResult is true, then
- // 8.a. Let extensibleTarget be ? IsExtensible(target).
- // 8.b If extensibleTarget is true, throw a TypeError exception.
- if (ToBoolean(trapResult)) {
- const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
- if (extensibleTarget == True) {
- ThrowTypeError(MessageTemplate::kProxyPreventExtensionsExtensible);
- }
- } else {
- if (doThrow == True) {
- ThrowTypeError(MessageTemplate::kProxyTrapReturnedFalsish, kTrapName);
- }
- return False;
+ // 8. If booleanTrapResult is true, then
+ // 8.a. Let extensibleTarget be ? IsExtensible(target).
+ // 8.b If extensibleTarget is true, throw a TypeError exception.
+ if (ToBoolean(trapResult)) {
+ const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ ThrowTypeError(MessageTemplate::kProxyPreventExtensionsExtensible);
}
-
- // 9. Return booleanTrapResult.
- return True;
- }
- label TrapUndefined(target: JSAny) {
- // 6.a. Return ? target.[[PreventExtensions]]().
+ } else {
if (doThrow == True) {
- return object::ObjectPreventExtensionsThrow(target);
+ ThrowTypeError(MessageTemplate::kProxyTrapReturnedFalsish, kTrapName);
}
- return object::ObjectPreventExtensionsDontThrow(target);
+ return False;
}
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
+
+ // 9. Return booleanTrapResult.
+ return True;
+ } label TrapUndefined(target: JSAny) {
+ // 6.a. Return ? target.[[PreventExtensions]]().
+ if (doThrow == True) {
+ return object::ObjectPreventExtensionsThrow(target);
}
+ return object::ObjectPreventExtensionsDontThrow(target);
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
}
+}
} // namespace proxy
diff --git a/deps/v8/src/builtins/proxy-revocable.tq b/deps/v8/src/builtins/proxy-revocable.tq
index 2b853afefe..989db1c04c 100644
--- a/deps/v8/src/builtins/proxy-revocable.tq
+++ b/deps/v8/src/builtins/proxy-revocable.tq
@@ -6,48 +6,35 @@
namespace proxy {
- extern macro ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
- implicit context: Context)(JSProxy): JSFunction;
-
- // Proxy.revocable(target, handler)
- // https://tc39.github.io/ecma262/#sec-proxy.revocable
- transitioning javascript builtin
- ProxyRevocable(js-implicit context: NativeContext)(
- target: JSAny, handler: JSAny): JSProxyRevocableResult {
- try {
- const targetJSReceiver =
- Cast<JSReceiver>(target) otherwise ThrowProxyNonObject;
- if (IsRevokedProxy(targetJSReceiver)) {
- goto ThrowProxyHandlerOrTargetRevoked;
- }
-
- const handlerJSReceiver =
- Cast<JSReceiver>(handler) otherwise ThrowProxyNonObject;
- if (IsRevokedProxy(handlerJSReceiver)) {
- goto ThrowProxyHandlerOrTargetRevoked;
- }
-
- // 1. Let p be ? ProxyCreate(target, handler).
- const proxy: JSProxy = AllocateProxy(targetJSReceiver, handlerJSReceiver);
-
- // 2. Let steps be the algorithm steps defined in Proxy Revocation
- // Functions.
- // 3. Let revoker be CreateBuiltinFunction(steps, Ā« [[RevocableProxy]] Ā»).
- // 4. Set revoker.[[RevocableProxy]] to p.
- const revoke: JSFunction = AllocateProxyRevokeFunction(proxy);
-
- // 5. Let result be ObjectCreate(%ObjectPrototype%).
- // 6. Perform CreateDataProperty(result, "proxy", p).
- // 7. Perform CreateDataProperty(result, "revoke", revoker).
- // 8. Return result.
- return NewJSProxyRevocableResult(proxy, revoke);
- }
- label ThrowProxyNonObject deferred {
- ThrowTypeError(MessageTemplate::kProxyNonObject, 'Proxy.revocable');
- }
- label ThrowProxyHandlerOrTargetRevoked deferred {
- ThrowTypeError(
- MessageTemplate::kProxyHandlerOrTargetRevoked, 'Proxy.revocable');
- }
+extern macro ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
+ implicit context: Context)(JSProxy): JSFunction;
+
+// Proxy.revocable(target, handler)
+// https://tc39.github.io/ecma262/#sec-proxy.revocable
+transitioning javascript builtin
+ProxyRevocable(js-implicit context: NativeContext)(
+ target: JSAny, handler: JSAny): JSProxyRevocableResult {
+ try {
+ // 1. Let p be ? ProxyCreate(target, handler).
+ const targetJSReceiver =
+ Cast<JSReceiver>(target) otherwise ThrowProxyNonObject;
+ const handlerJSReceiver =
+ Cast<JSReceiver>(handler) otherwise ThrowProxyNonObject;
+ const proxy: JSProxy = AllocateProxy(targetJSReceiver, handlerJSReceiver);
+
+ // 2. Let steps be the algorithm steps defined in Proxy Revocation
+ // Functions.
+ // 3. Let revoker be CreateBuiltinFunction(steps, Ā« [[RevocableProxy]] Ā»).
+ // 4. Set revoker.[[RevocableProxy]] to p.
+ const revoke: JSFunction = AllocateProxyRevokeFunction(proxy);
+
+ // 5. Let result be ObjectCreate(%ObjectPrototype%).
+ // 6. Perform CreateDataProperty(result, "proxy", p).
+ // 7. Perform CreateDataProperty(result, "revoke", revoker).
+ // 8. Return result.
+ return NewJSProxyRevocableResult(proxy, revoke);
+ } label ThrowProxyNonObject deferred {
+ ThrowTypeError(MessageTemplate::kProxyNonObject, 'Proxy.revocable');
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq
index 7300f4d717..5d2071b931 100644
--- a/deps/v8/src/builtins/proxy-revoke.tq
+++ b/deps/v8/src/builtins/proxy-revoke.tq
@@ -6,31 +6,31 @@
namespace proxy {
- // Proxy Revocation Functions
- // https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
- transitioning javascript builtin
- ProxyRevoke(js-implicit context: NativeContext)(): Undefined {
- // 1. Let p be F.[[RevocableProxy]].
- const proxyObject: Object = context[PROXY_SLOT];
-
- // 2. If p is null, return undefined
- if (proxyObject == Null) {
- return Undefined;
- }
+// Proxy Revocation Functions
+// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
+transitioning javascript builtin
+ProxyRevoke(js-implicit context: NativeContext)(): Undefined {
+ // 1. Let p be F.[[RevocableProxy]].
+ const proxyObject: Object = context[PROXY_SLOT];
+
+ // 2. If p is null, return undefined
+ if (proxyObject == Null) {
+ return Undefined;
+ }
- // 3. Set F.[[RevocableProxy]] to null.
- context[PROXY_SLOT] = Null;
+ // 3. Set F.[[RevocableProxy]] to null.
+ context[PROXY_SLOT] = Null;
- // 4. Assert: p is a Proxy object.
- const proxy: JSProxy = UnsafeCast<JSProxy>(proxyObject);
+ // 4. Assert: p is a Proxy object.
+ const proxy: JSProxy = UnsafeCast<JSProxy>(proxyObject);
- // 5. Set p.[[ProxyTarget]] to null.
- proxy.target = Null;
+ // 5. Set p.[[ProxyTarget]] to null.
+ proxy.target = Null;
- // 6. Set p.[[ProxyHandler]] to null.
- proxy.handler = Null;
+ // 6. Set p.[[ProxyHandler]] to null.
+ proxy.handler = Null;
- // 7. Return undefined.
- return Undefined;
- }
+ // 7. Return undefined.
+ return Undefined;
+}
}
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
index 2d084eac7a..49f55fcd33 100644
--- a/deps/v8/src/builtins/proxy-set-property.tq
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -6,84 +6,82 @@
namespace proxy {
- extern transitioning runtime
- SetPropertyWithReceiver(implicit context:
- Context)(Object, Name, Object, Object): void;
+extern transitioning runtime
+SetPropertyWithReceiver(implicit context: Context)(
+ Object, Name, Object, Object): void;
- transitioning macro CallThrowTypeErrorIfStrict(implicit context: Context)(
- message: constexpr MessageTemplate) {
- ThrowTypeErrorIfStrict(SmiConstant(message), Null, Null);
- }
+transitioning macro CallThrowTypeErrorIfStrict(implicit context: Context)(
+ message: constexpr MessageTemplate) {
+ ThrowTypeErrorIfStrict(SmiConstant(message), Null, Null);
+}
- // ES #sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
- // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
- transitioning builtin
- ProxySetProperty(implicit context: Context)(
- proxy: JSProxy, name: PropertyKey|PrivateSymbol, value: JSAny,
- receiverValue: JSAny): JSAny {
- // 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(IsName(name));
+// ES #sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
+// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
+transitioning builtin
+ProxySetProperty(implicit context: Context)(
+ proxy: JSProxy, name: PropertyKey|PrivateSymbol, value: JSAny,
+ receiverValue: JSAny): JSAny {
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
- let key: PropertyKey;
- typeswitch (name) {
- case (PrivateSymbol): {
- CallThrowTypeErrorIfStrict(MessageTemplate::kProxyPrivate);
- return Undefined;
- }
- case (name: PropertyKey): {
- key = name;
- }
+ let key: PropertyKey;
+ typeswitch (name) {
+ case (PrivateSymbol): {
+ CallThrowTypeErrorIfStrict(MessageTemplate::kProxyPrivate);
+ return Undefined;
}
+ case (name: PropertyKey): {
+ key = name;
+ }
+ }
- try {
- // 2. Let handler be O.[[ProxyHandler]].
- // 3. If handler is null, throw a TypeError exception.
- // 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+ try {
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
- // 5. Let target be O.[[ProxyTarget]].
- const target = UnsafeCast<JSReceiver>(proxy.target);
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = UnsafeCast<JSReceiver>(proxy.target);
- // 6. Let trap be ? GetMethod(handler, "set").
- // 7. If trap is undefined, then (see 7.a below).
- const trap: Callable = GetMethod(handler, 'set')
- otherwise goto TrapUndefined(target);
+ // 6. Let trap be ? GetMethod(handler, "set").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, 'set')
+ otherwise goto TrapUndefined(target);
- // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
- // Ā« target, P, V, Receiver Ā»)).
- // 9. If booleanTrapResult is false, return false.
- // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
- // 11. If targetDesc is not undefined and targetDesc.[[Configurable]] is
- // false, then
- // a. If IsDataDescriptor(targetDesc) is true and
- // targetDesc.[[Writable]] is false, then
- // i. If SameValue(V, targetDesc.[[Value]]) is false, throw a
- // TypeError exception.
- // b. If IsAccessorDescriptor(targetDesc) is true, then
- // i. If targetDesc.[[Set]] is undefined, throw a TypeError
- // exception.
- // 12. Return true.
- const trapResult =
- Call(context, trap, handler, target, key, value, receiverValue);
- if (ToBoolean(trapResult)) {
- CheckGetSetTrapResult(target, proxy, name, value, kProxySet);
- return value;
- }
- ThrowTypeErrorIfStrict(
- SmiConstant(MessageTemplate::kProxyTrapReturnedFalsishFor), 'set',
- name);
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
+ // Ā« target, P, V, Receiver Ā»)).
+ // 9. If booleanTrapResult is false, return false.
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 11. If targetDesc is not undefined and targetDesc.[[Configurable]] is
+ // false, then
+ // a. If IsDataDescriptor(targetDesc) is true and
+ // targetDesc.[[Writable]] is false, then
+ // i. If SameValue(V, targetDesc.[[Value]]) is false, throw a
+ // TypeError exception.
+ // b. If IsAccessorDescriptor(targetDesc) is true, then
+ // i. If targetDesc.[[Set]] is undefined, throw a TypeError
+ // exception.
+ // 12. Return true.
+ const trapResult =
+ Call(context, trap, handler, target, key, value, receiverValue);
+ if (ToBoolean(trapResult)) {
+ CheckGetSetTrapResult(target, proxy, name, value, kProxySet);
return value;
}
- label TrapUndefined(target: Object) {
- // 7.a. Return ? target.[[Set]](P, V, Receiver).
- SetPropertyWithReceiver(target, name, value, receiverValue);
- return value;
- }
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, 'set');
- }
+ ThrowTypeErrorIfStrict(
+ SmiConstant(MessageTemplate::kProxyTrapReturnedFalsishFor), 'set',
+ name);
+ return value;
+ } label TrapUndefined(target: Object) {
+ // 7.a. Return ? target.[[Set]](P, V, Receiver).
+ SetPropertyWithReceiver(target, name, value, receiverValue);
+ return value;
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, 'set');
}
}
+}
diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq
index a7a76b7535..ec68cef44c 100644
--- a/deps/v8/src/builtins/proxy-set-prototype-of.tq
+++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq
@@ -6,73 +6,71 @@
namespace proxy {
- // ES #sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
- // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
- transitioning builtin
- ProxySetPrototypeOf(implicit context: Context)(
- proxy: JSProxy, proto: Null|JSReceiver, doThrow: Boolean): JSAny {
- PerformStackCheck();
- const kTrapName: constexpr string = 'setPrototypeOf';
- try {
- // 1. Assert: Either Type(V) is Object or Type(V) is Null.
- assert(proto == Null || Is<JSReceiver>(proto));
+// ES #sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
+// https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
+transitioning builtin
+ProxySetPrototypeOf(implicit context: Context)(
+ proxy: JSProxy, proto: Null|JSReceiver, doThrow: Boolean): JSAny {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'setPrototypeOf';
+ try {
+ // 1. Assert: Either Type(V) is Object or Type(V) is Null.
+ assert(proto == Null || Is<JSReceiver>(proto));
- // 2. Let handler be O.[[ProxyHandler]].
- // 3. If handler is null, throw a TypeError exception.
- // 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
- const handler =
- Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
- // 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
- // 6. Let trap be ? GetMethod(handler, "setPrototypeOf").
- // 7. If trap is undefined, then (see 7.a below).
- const trap: Callable = GetMethod(handler, kTrapName)
- otherwise goto TrapUndefined(target, proto);
+ // 6. Let trap be ? GetMethod(handler, "setPrototypeOf").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target, proto);
- // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā« target, V
- // Ā»)).
- const trapResult = Call(context, trap, handler, target, proto);
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, Ā« target, V
+ // Ā»)).
+ const trapResult = Call(context, trap, handler, target, proto);
- // 9. If booleanTrapResult is false, return false.
- if (!ToBoolean(trapResult)) {
- if (doThrow == True) {
- ThrowTypeError(
- MessageTemplate::kProxyTrapReturnedFalsishFor, kTrapName);
- }
- return False;
+ // 9. If booleanTrapResult is false, return false.
+ if (!ToBoolean(trapResult)) {
+ if (doThrow == True) {
+ ThrowTypeError(
+ MessageTemplate::kProxyTrapReturnedFalsishFor, kTrapName);
}
+ return False;
+ }
- // 10. Let extensibleTarget be ? IsExtensible(target).
- // 11. If extensibleTarget is true, return true.
- const extensibleTarget: Object = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
- if (extensibleTarget == True) {
- return True;
- }
+ // 10. Let extensibleTarget be ? IsExtensible(target).
+ // 11. If extensibleTarget is true, return true.
+ const extensibleTarget: Object = object::ObjectIsExtensibleImpl(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ return True;
+ }
- // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
- const targetProto = object::ObjectGetPrototypeOfImpl(target);
+ // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
+ const targetProto = object::ObjectGetPrototypeOfImpl(target);
- // 13. If SameValue(V, targetProto) is false, throw a TypeError
- // exception.
- // 14. Return true.
- if (SameValue(proto, targetProto)) {
- return True;
- }
- ThrowTypeError(MessageTemplate::kProxySetPrototypeOfNonExtensible);
+ // 13. If SameValue(V, targetProto) is false, throw a TypeError
+ // exception.
+ // 14. Return true.
+ if (SameValue(proto, targetProto)) {
+ return True;
}
- label TrapUndefined(target: JSAny, proto: JSReceiver|Null) {
- // 7.a. Return ? target.[[SetPrototypeOf]]().
- if (doThrow == True) {
- return object::ObjectSetPrototypeOfThrow(target, proto);
- }
- return object::ObjectSetPrototypeOfDontThrow(target, proto);
- }
- label ThrowProxyHandlerRevoked deferred {
- ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
+ ThrowTypeError(MessageTemplate::kProxySetPrototypeOfNonExtensible);
+ } label TrapUndefined(target: JSAny, proto: JSReceiver|Null) {
+ // 7.a. Return ? target.[[SetPrototypeOf]]().
+ if (doThrow == True) {
+ return object::ObjectSetPrototypeOfThrow(target, proto);
}
+ return object::ObjectSetPrototypeOfDontThrow(target, proto);
+ } label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(MessageTemplate::kProxyRevoked, kTrapName);
}
}
+}
diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq
index 2db794e8e8..8f662a9f4d 100644
--- a/deps/v8/src/builtins/proxy.tq
+++ b/deps/v8/src/builtins/proxy.tq
@@ -6,27 +6,21 @@
namespace proxy {
- extern macro ProxiesCodeStubAssembler::AllocateProxy(
- implicit context: Context)(JSReceiver, JSReceiver): JSProxy;
+extern macro ProxiesCodeStubAssembler::AllocateProxy(implicit context: Context)(
+ JSReceiver, JSReceiver): JSProxy;
- macro IsRevokedProxy(implicit context: Context)(o: JSReceiver): bool {
- const proxy: JSProxy = Cast<JSProxy>(o) otherwise return false;
- Cast<JSReceiver>(proxy.handler) otherwise return true;
- return false;
- }
+extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
+ implicit context: Context)(
+ JSReceiver, JSProxy, Name, Object, constexpr int31);
- extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
- implicit context:
- Context)(JSReceiver, JSProxy, Name, Object, constexpr int31);
+extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult(
+ implicit context: Context)(JSReceiver, JSProxy, Name);
- extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult(
- implicit context: Context)(JSReceiver, JSProxy, Name);
+extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
+ implicit context: Context)(JSReceiver, JSProxy, Name);
- extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
- implicit context: Context)(JSReceiver, JSProxy, Name);
-
- const kProxyGet: constexpr int31
- generates 'JSProxy::AccessKind::kGet';
- const kProxySet: constexpr int31
- generates 'JSProxy::AccessKind::kSet';
+const kProxyGet: constexpr int31
+ generates 'JSProxy::AccessKind::kGet';
+const kProxySet: constexpr int31
+ generates 'JSProxy::AccessKind::kSet';
}
diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq
index f1818ed32d..477c586403 100644
--- a/deps/v8/src/builtins/reflect.tq
+++ b/deps/v8/src/builtins/reflect.tq
@@ -3,92 +3,88 @@
// found in the LICENSE file.
namespace reflect {
- // ES6 section 26.1.10 Reflect.isExtensible
- transitioning javascript builtin
- ReflectIsExtensible(js-implicit context: NativeContext)(object: JSAny):
- JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.isExtensible');
- return object::ObjectIsExtensibleImpl(objectJSReceiver);
- }
+// ES6 section 26.1.10 Reflect.isExtensible
+transitioning javascript builtin
+ReflectIsExtensible(js-implicit context: NativeContext)(object: JSAny): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Reflect.isExtensible');
+ return object::ObjectIsExtensibleImpl(objectJSReceiver);
+}
- // ES6 section 26.1.12 Reflect.preventExtensions
- transitioning javascript builtin
- ReflectPreventExtensions(js-implicit context: NativeContext)(object: JSAny):
- JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.preventExtensions');
- return object::ObjectPreventExtensionsDontThrow(objectJSReceiver);
- }
+// ES6 section 26.1.12 Reflect.preventExtensions
+transitioning javascript builtin
+ReflectPreventExtensions(js-implicit context: NativeContext)(object: JSAny):
+ JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Reflect.preventExtensions');
+ return object::ObjectPreventExtensionsDontThrow(objectJSReceiver);
+}
- // ES6 section 26.1.8 Reflect.getPrototypeOf
- transitioning javascript builtin
- ReflectGetPrototypeOf(js-implicit context: NativeContext)(object: JSAny):
- JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.getPrototypeOf');
- return object::JSReceiverGetPrototypeOf(objectJSReceiver);
- }
+// ES6 section 26.1.8 Reflect.getPrototypeOf
+transitioning javascript builtin
+ReflectGetPrototypeOf(js-implicit context: NativeContext)(object: JSAny):
+ JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Reflect.getPrototypeOf');
+ return object::JSReceiverGetPrototypeOf(objectJSReceiver);
+}
- // ES6 section 26.1.14 Reflect.setPrototypeOf
- transitioning javascript builtin ReflectSetPrototypeOf(
- js-implicit context: NativeContext)(object: JSAny, proto: JSAny): JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.setPrototypeOf');
- typeswitch (proto) {
- case (proto: JSReceiver|Null): {
- return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto);
- }
- case (JSAny): {
- ThrowTypeError(MessageTemplate::kProtoObjectOrNull, proto);
- }
+// ES6 section 26.1.14 Reflect.setPrototypeOf
+transitioning javascript builtin ReflectSetPrototypeOf(
+ js-implicit context: NativeContext)(object: JSAny, proto: JSAny): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Reflect.setPrototypeOf');
+ typeswitch (proto) {
+ case (proto: JSReceiver|Null): {
+ return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto);
+ }
+ case (JSAny): {
+ ThrowTypeError(MessageTemplate::kProtoObjectOrNull, proto);
}
}
+}
- extern transitioning builtin ToName(implicit context: Context)(JSAny):
- AnyName;
- type OnNonExistent constexpr 'OnNonExistent';
- const kReturnUndefined: constexpr OnNonExistent
- generates 'OnNonExistent::kReturnUndefined';
- extern macro SmiConstant(constexpr OnNonExistent): Smi;
- extern transitioning builtin GetPropertyWithReceiver(
- implicit context: Context)(JSAny, Name, JSAny, Smi): JSAny;
+extern transitioning builtin ToName(implicit context: Context)(JSAny): AnyName;
+type OnNonExistent constexpr 'OnNonExistent';
+const kReturnUndefined: constexpr OnNonExistent
+ generates 'OnNonExistent::kReturnUndefined';
+extern macro SmiConstant(constexpr OnNonExistent): Smi;
+extern transitioning builtin GetPropertyWithReceiver(implicit context: Context)(
+ JSAny, Name, JSAny, Smi): JSAny;
- // ES6 section 26.1.6 Reflect.get
- transitioning javascript builtin
- ReflectGet(js-implicit context: NativeContext)(...arguments): JSAny {
- const length = arguments.length;
- const object: JSAny = length > 0 ? arguments[0] : Undefined;
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.get');
- const propertyKey: JSAny = length > 1 ? arguments[1] : Undefined;
- const name: AnyName = ToName(propertyKey);
- const receiver: JSAny = length > 2 ? arguments[2] : objectJSReceiver;
- return GetPropertyWithReceiver(
- objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined));
- }
+// ES6 section 26.1.6 Reflect.get
+transitioning javascript builtin
+ReflectGet(js-implicit context: NativeContext)(...arguments): JSAny {
+ const object: JSAny = arguments[0];
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Reflect.get');
+ const propertyKey: JSAny = arguments[1];
+ const name: AnyName = ToName(propertyKey);
+ const receiver: JSAny =
+ arguments.length > 2 ? arguments[2] : objectJSReceiver;
+ return GetPropertyWithReceiver(
+ objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined));
+}
- // ES6 section 26.1.4 Reflect.deleteProperty
- transitioning javascript builtin ReflectDeleteProperty(
- js-implicit context: NativeContext)(object: JSAny, key: JSAny): JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.deleteProperty');
- return DeleteProperty(objectJSReceiver, key, LanguageMode::kSloppy);
- }
+// ES6 section 26.1.4 Reflect.deleteProperty
+transitioning javascript builtin ReflectDeleteProperty(
+ js-implicit context: NativeContext)(object: JSAny, key: JSAny): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(
+ MessageTemplate::kCalledOnNonObject, 'Reflect.deleteProperty');
+ return DeleteProperty(objectJSReceiver, key, LanguageMode::kSloppy);
+}
- // ES section #sec-reflect.has
- transitioning javascript builtin
- ReflectHas(js-implicit context: NativeContext)(object: JSAny, key: JSAny):
- JSAny {
- const objectJSReceiver = Cast<JSReceiver>(object)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledOnNonObject, 'Reflect.has');
- return HasProperty(objectJSReceiver, key);
- }
+// ES section #sec-reflect.has
+transitioning javascript builtin
+ReflectHas(js-implicit context: NativeContext)(
+ object: JSAny, key: JSAny): JSAny {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Reflect.has');
+ return HasProperty(objectJSReceiver, key);
+}
} // namespace reflect
diff --git a/deps/v8/src/builtins/regexp-exec.tq b/deps/v8/src/builtins/regexp-exec.tq
index 0b11c42fbf..87b00c1fdc 100644
--- a/deps/v8/src/builtins/regexp-exec.tq
+++ b/deps/v8/src/builtins/regexp-exec.tq
@@ -6,40 +6,39 @@
namespace regexp {
- @export
- transitioning macro RegExpPrototypeExecBodyFast(implicit context: Context)(
- receiver: JSReceiver, string: String): JSAny {
- return RegExpPrototypeExecBody(receiver, string, true);
- }
-
- transitioning macro RegExpPrototypeExecBodySlow(implicit context: Context)(
- receiver: JSReceiver, string: String): JSAny {
- return RegExpPrototypeExecBody(receiver, string, false);
- }
-
- // Slow path stub for RegExpPrototypeExec to decrease code size.
- transitioning builtin
- RegExpPrototypeExecSlow(implicit context: Context)(
- regexp: JSRegExp, string: String): JSAny {
- return RegExpPrototypeExecBodySlow(regexp, string);
- }
-
- extern macro RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
- implicit context: Context)(Object): bool;
-
- // ES#sec-regexp.prototype.exec
- // RegExp.prototype.exec ( string )
- transitioning javascript builtin RegExpPrototypeExec(
- js-implicit context: NativeContext,
- receiver: JSAny)(string: JSAny): JSAny {
- // Ensure {receiver} is a JSRegExp.
- const receiver = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, 'RegExp.prototype.exec',
- receiver);
- const string = ToString_Inline(string);
-
- return IsFastRegExpNoPrototype(receiver) ?
- RegExpPrototypeExecBodyFast(receiver, string) :
- RegExpPrototypeExecSlow(receiver, string);
- }
+@export
+transitioning macro RegExpPrototypeExecBodyFast(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ return RegExpPrototypeExecBody(receiver, string, true);
+}
+
+transitioning macro RegExpPrototypeExecBodySlow(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ return RegExpPrototypeExecBody(receiver, string, false);
+}
+
+// Slow path stub for RegExpPrototypeExec to decrease code size.
+transitioning builtin
+RegExpPrototypeExecSlow(implicit context: Context)(
+ regexp: JSRegExp, string: String): JSAny {
+ return RegExpPrototypeExecBodySlow(regexp, string);
+}
+
+extern macro RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
+ implicit context: Context)(Object): bool;
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+transitioning javascript builtin RegExpPrototypeExec(
+ js-implicit context: NativeContext, receiver: JSAny)(string: JSAny): JSAny {
+ // Ensure {receiver} is a JSRegExp.
+ const receiver = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, 'RegExp.prototype.exec',
+ receiver);
+ const string = ToString_Inline(string);
+
+ return IsFastRegExpNoPrototype(receiver) ?
+ RegExpPrototypeExecBodyFast(receiver, string) :
+ RegExpPrototypeExecSlow(receiver, string);
+}
}
diff --git a/deps/v8/src/builtins/regexp-match-all.tq b/deps/v8/src/builtins/regexp-match-all.tq
index 022f8bc53f..932972d844 100644
--- a/deps/v8/src/builtins/regexp-match-all.tq
+++ b/deps/v8/src/builtins/regexp-match-all.tq
@@ -6,247 +6,217 @@
namespace regexp {
- extern transitioning macro
- RegExpMatchAllAssembler::CreateRegExpStringIterator(
- NativeContext, Object, String, bool, bool): JSAny;
-
- @export
- transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)(
- nativeContext: NativeContext, receiver: JSAny, string: JSAny): JSAny {
- // 1. Let R be the this value.
- // 2. If Type(R) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(
- receiver, MessageTemplate::kIncompatibleMethodReceiver,
- 'RegExp.prototype.@@matchAll');
- const receiver = UnsafeCast<JSReceiver>(receiver);
-
- // 3. Let S be ? ToString(O).
- const string: String = ToString_Inline(string);
-
- let matcher: Object;
- let global: bool;
- let unicode: bool;
-
- // 'FastJSRegExp' uses the strict fast path check because following code
- // uses the flags property.
- // TODO(jgruber): Handle slow flag accesses on the fast path and make this
- // permissive.
- typeswitch (receiver) {
- case (fastRegExp: FastJSRegExp): {
- const source = fastRegExp.source;
-
- // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
- // 5. Let flags be ? ToString(? Get(R, "flags")).
- // 6. Let matcher be ? Construct(C, Ā« R, flags Ā»).
- const flags: String = FastFlagsGetter(fastRegExp);
- matcher = RegExpCreate(nativeContext, source, flags);
- const matcherRegExp = UnsafeCast<JSRegExp>(matcher);
- assert(IsFastRegExpPermissive(matcherRegExp));
-
- // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
- // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- const fastRegExp = UnsafeCast<FastJSRegExp>(receiver);
- FastStoreLastIndex(matcherRegExp, fastRegExp.lastIndex);
-
- // 9. If flags contains "g", let global be true.
- // 10. Else, let global be false.
- global = FastFlagGetter(matcherRegExp, Flag::kGlobal);
-
- // 11. If flags contains "u", let fullUnicode be true.
- // 12. Else, let fullUnicode be false.
- unicode = FastFlagGetter(matcherRegExp, Flag::kUnicode);
- }
- case (Object): {
- // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
- const regexpFun = LoadRegExpFunction(nativeContext);
- const speciesConstructor =
- UnsafeCast<Constructor>(SpeciesConstructor(receiver, regexpFun));
-
- // 5. Let flags be ? ToString(? Get(R, "flags")).
- const flags = GetProperty(receiver, 'flags');
- const flagsString = ToString_Inline(flags);
-
- // 6. Let matcher be ? Construct(C, Ā« R, flags Ā»).
- matcher = Construct(speciesConstructor, receiver, flagsString);
-
- // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
- const lastIndex: Number = ToLength_Inline(SlowLoadLastIndex(receiver));
-
- // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- SlowStoreLastIndex(UnsafeCast<JSReceiver>(matcher), lastIndex);
-
- // 9. If flags contains "g", let global be true.
- // 10. Else, let global be false.
- const globalCharString: String = StringConstant('g');
- const globalIndex: Smi =
- StringIndexOf(flagsString, globalCharString, 0);
- global = globalIndex != -1;
-
- // 11. If flags contains "u", let fullUnicode be true.
- // 12. Else, let fullUnicode be false.
- const unicodeCharString = StringConstant('u');
- const unicodeIndex: Smi =
- StringIndexOf(flagsString, unicodeCharString, 0);
- unicode = unicodeIndex != -1;
- }
+extern transitioning macro
+RegExpMatchAllAssembler::CreateRegExpStringIterator(
+ NativeContext, Object, String, bool, bool): JSAny;
+
+@export
+transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)(
+ nativeContext: NativeContext, receiver: JSAny, string: JSAny): JSAny {
+ // 1. Let R be the this value.
+ // 2. If Type(R) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(
+ receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ 'RegExp.prototype.@@matchAll');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+
+ // 3. Let S be ? ToString(O).
+ const string: String = ToString_Inline(string);
+
+ let matcher: Object;
+ let global: bool;
+ let unicode: bool;
+
+ // 'FastJSRegExp' uses the strict fast path check because following code
+ // uses the flags property.
+ // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+ // permissive.
+ typeswitch (receiver) {
+ case (fastRegExp: FastJSRegExp): {
+ const source = fastRegExp.source;
+
+ // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
+ // 5. Let flags be ? ToString(? Get(R, "flags")).
+ // 6. Let matcher be ? Construct(C, Ā« R, flags Ā»).
+ const flags: String = FastFlagsGetter(fastRegExp);
+ matcher = RegExpCreate(nativeContext, source, flags);
+ const matcherRegExp = UnsafeCast<JSRegExp>(matcher);
+ assert(IsFastRegExpPermissive(matcherRegExp));
+
+ // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ const fastRegExp = UnsafeCast<FastJSRegExp>(receiver);
+ FastStoreLastIndex(matcherRegExp, fastRegExp.lastIndex);
+
+ // 9. If flags contains "g", let global be true.
+ // 10. Else, let global be false.
+ global = FastFlagGetter(matcherRegExp, Flag::kGlobal);
+
+ // 11. If flags contains "u", let fullUnicode be true.
+ // 12. Else, let fullUnicode be false.
+ unicode = FastFlagGetter(matcherRegExp, Flag::kUnicode);
+ }
+ case (Object): {
+ // 4. Let C be ? SpeciesConstructor(R, %RegExp%).
+ const regexpFun = LoadRegExpFunction(nativeContext);
+ const speciesConstructor =
+ UnsafeCast<Constructor>(SpeciesConstructor(receiver, regexpFun));
+
+ // 5. Let flags be ? ToString(? Get(R, "flags")).
+ const flags = GetProperty(receiver, 'flags');
+ const flagsString = ToString_Inline(flags);
+
+ // 6. Let matcher be ? Construct(C, Ā« R, flags Ā»).
+ matcher = Construct(speciesConstructor, receiver, flagsString);
+
+ // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ const lastIndex: Number = ToLength_Inline(SlowLoadLastIndex(receiver));
+
+ // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ SlowStoreLastIndex(UnsafeCast<JSReceiver>(matcher), lastIndex);
+
+ // 9. If flags contains "g", let global be true.
+ // 10. Else, let global be false.
+ const globalCharString: String = StringConstant('g');
+ const globalIndex: Smi = StringIndexOf(flagsString, globalCharString, 0);
+ global = globalIndex != -1;
+
+ // 11. If flags contains "u", let fullUnicode be true.
+ // 12. Else, let fullUnicode be false.
+ const unicodeCharString = StringConstant('u');
+ const unicodeIndex: Smi =
+ StringIndexOf(flagsString, unicodeCharString, 0);
+ unicode = unicodeIndex != -1;
}
-
- // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
- return CreateRegExpStringIterator(
- nativeContext, matcher, string, global, unicode);
- }
-
- // https://tc39.github.io/proposal-string-matchall/
- // RegExp.prototype [ @@matchAll ] ( string )
- transitioning javascript builtin RegExpPrototypeMatchAll(
- js-implicit context: NativeContext,
- receiver: JSAny)(string: JSAny): JSAny {
- return RegExpPrototypeMatchAllImpl(context, receiver, string);
- }
-
- const kJSRegExpStringIteratorDone:
- constexpr int31 generates '1 << JSRegExpStringIterator::kDoneBit';
- const kJSRegExpStringIteratorGlobal: constexpr int31
- generates '1 << JSRegExpStringIterator::kGlobalBit';
- const kJSRegExpStringIteratorUnicode: constexpr int31
- generates '1 << JSRegExpStringIterator::kUnicodeBit';
-
- extern macro IsSetSmi(Smi, constexpr int31): bool;
-
- macro HasDoneFlag(flags: Smi): bool {
- return IsSetSmi(flags, kJSRegExpStringIteratorDone);
- }
-
- macro HasGlobalFlag(flags: Smi): bool {
- return IsSetSmi(flags, kJSRegExpStringIteratorGlobal);
- }
-
- macro HasUnicodeFlag(flags: Smi): bool {
- return IsSetSmi(flags, kJSRegExpStringIteratorUnicode);
}
- macro SetDoneFlag(iterator: JSRegExpStringIterator, flags: Smi) {
- const newFlags: Smi = flags | kJSRegExpStringIteratorDone;
- iterator.flags = newFlags;
- }
+ // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
+ return CreateRegExpStringIterator(
+ nativeContext, matcher, string, global, unicode);
+}
- // https://tc39.github.io/proposal-string-matchall/
- // %RegExpStringIteratorPrototype%.next ( )
- transitioning javascript builtin RegExpStringIteratorPrototypeNext(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- // 1. Let O be the this value.
- // 2. If Type(O) is not Object, throw a TypeError exception.
- // 3. If O does not have all of the internal slots of a RegExp String
- // Iterator Object Instance (see 5.3), throw a TypeError exception.
- const methodName: constexpr string =
- '%RegExpStringIterator%.prototype.next';
- const receiver = Cast<JSRegExpStringIterator>(receiver) otherwise
- ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver);
+// https://tc39.github.io/proposal-string-matchall/
+// RegExp.prototype [ @@matchAll ] ( string )
+transitioning javascript builtin RegExpPrototypeMatchAll(
+ js-implicit context: NativeContext, receiver: JSAny)(string: JSAny): JSAny {
+ return RegExpPrototypeMatchAllImpl(context, receiver, string);
+}
+// https://tc39.github.io/proposal-string-matchall/
+// %RegExpStringIteratorPrototype%.next ( )
+transitioning javascript builtin RegExpStringIteratorPrototypeNext(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Let O be the this value.
+ // 2. If Type(O) is not Object, throw a TypeError exception.
+ // 3. If O does not have all of the internal slots of a RegExp String
+ // Iterator Object Instance (see 5.3), throw a TypeError exception.
+ const methodName: constexpr string = '%RegExpStringIterator%.prototype.next';
+ const receiver = Cast<JSRegExpStringIterator>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver);
+
+ try {
+ // 4. If O.[[Done]] is true, then
+ // a. Return ! CreateIterResultObject(undefined, true).
+ const flags: SmiTagged<JSRegExpStringIteratorFlags> = receiver.flags;
+ if (flags.done) goto ReturnEmptyDoneResult;
+
+ // 5. Let R be O.[[iteratingRegExp]].
+ const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp;
+
+ // 6. Let S be O.[[IteratedString]].
+ const iteratingString: String = receiver.iterated_string;
+
+ // 7. Let global be O.[[Global]].
+ // 8. Let fullUnicode be O.[[Unicode]].
+ // 9. Let match be ? RegExpExec(R, S).
+ let match: Object;
+ let isFastRegExp: bool = false;
try {
- // 4. If O.[[Done]] is true, then
- // a. Return ! CreateIterResultObject(undefined, true).
- const flags: Smi = receiver.flags;
- if (HasDoneFlag(flags)) goto ReturnEmptyDoneResult;
-
- // 5. Let R be O.[[iteratingRegExp]].
- const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp;
-
- // 6. Let S be O.[[IteratedString]].
- const iteratingString: String = receiver.iterated_string;
-
- // 7. Let global be O.[[Global]].
- // 8. Let fullUnicode be O.[[Unicode]].
- // 9. Let match be ? RegExpExec(R, S).
- let match: Object;
- let isFastRegExp: bool = false;
- try {
- if (IsFastRegExpPermissive(iteratingRegExp)) {
- const regexp = UnsafeCast<JSRegExp>(iteratingRegExp);
- const lastIndex = LoadLastIndexAsLength(regexp, true);
- const matchIndices: RegExpMatchInfo =
- RegExpPrototypeExecBodyWithoutResultFast(
- regexp, iteratingString, lastIndex)
- otherwise IfNoMatch;
- match = ConstructNewResultFromMatchInfo(
- regexp, matchIndices, iteratingString, lastIndex);
- isFastRegExp = true;
- } else {
- match = RegExpExec(iteratingRegExp, iteratingString);
- if (match == Null) {
- goto IfNoMatch;
- }
+ if (IsFastRegExpPermissive(iteratingRegExp)) {
+ const regexp = UnsafeCast<JSRegExp>(iteratingRegExp);
+ const lastIndex = LoadLastIndexAsLength(regexp, true);
+ const matchIndices: RegExpMatchInfo =
+ RegExpPrototypeExecBodyWithoutResultFast(
+ regexp, iteratingString, lastIndex)
+ otherwise IfNoMatch;
+ match = ConstructNewResultFromMatchInfo(
+ regexp, matchIndices, iteratingString, lastIndex);
+ isFastRegExp = true;
+ } else {
+ match = RegExpExec(iteratingRegExp, iteratingString);
+ if (match == Null) {
+ goto IfNoMatch;
}
- // 11. Else,
- // b. Else, handle non-global case first.
- if (!HasGlobalFlag(flags)) {
- // i. Set O.[[Done]] to true.
- SetDoneFlag(receiver, flags);
-
- // ii. Return ! CreateIterResultObject(match, false).
- return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False);
- }
- // a. If global is true,
- assert(HasGlobalFlag(flags));
- if (isFastRegExp) {
- // i. Let matchStr be ? ToString(? Get(match, "0")).
- const match = UnsafeCast<JSRegExpResult>(match);
- const resultFixedArray = UnsafeCast<FixedArray>(match.elements);
- const matchStr = UnsafeCast<String>(resultFixedArray.objects[0]);
-
- // When iterating_regexp is fast, we assume it stays fast even after
- // accessing the first match from the RegExp result.
- assert(IsFastRegExpPermissive(iteratingRegExp));
- const iteratingRegExp = UnsafeCast<JSRegExp>(iteratingRegExp);
- if (matchStr == kEmptyString) {
- // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- const thisIndex: Smi = FastLoadLastIndex(iteratingRegExp);
-
- // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
- // fullUnicode).
- const nextIndex: Smi = AdvanceStringIndexFast(
- iteratingString, thisIndex, HasUnicodeFlag(flags));
-
- // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
- FastStoreLastIndex(iteratingRegExp, nextIndex);
- }
-
- // iii. Return ! CreateIterResultObject(match, false).
- return AllocateJSIteratorResult(match, False);
- }
- assert(!isFastRegExp);
+ }
+ // 11. Else,
+ // b. Else, handle non-global case first.
+ if (!flags.global) {
+ // i. Set O.[[Done]] to true.
+ receiver.flags.done = true;
+
+ // ii. Return ! CreateIterResultObject(match, false).
+ return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False);
+ }
+ // a. If global is true,
+ assert(flags.global);
+ if (isFastRegExp) {
// i. Let matchStr be ? ToString(? Get(match, "0")).
- const match = UnsafeCast<JSAny>(match);
- const matchStr = ToString_Inline(GetProperty(match, SmiConstant(0)));
-
+ const match = UnsafeCast<JSRegExpResult>(match);
+ const resultFixedArray = UnsafeCast<FixedArray>(match.elements);
+ const matchStr = UnsafeCast<String>(resultFixedArray.objects[0]);
+
+ // When iterating_regexp is fast, we assume it stays fast even after
+ // accessing the first match from the RegExp result.
+ assert(IsFastRegExpPermissive(iteratingRegExp));
+ const iteratingRegExp = UnsafeCast<JSRegExp>(iteratingRegExp);
if (matchStr == kEmptyString) {
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- const lastIndex: JSAny = SlowLoadLastIndex(iteratingRegExp);
- const thisIndex: Number = ToLength_Inline(lastIndex);
+ const thisIndex: Smi = FastLoadLastIndex(iteratingRegExp);
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
// fullUnicode).
- const nextIndex: Number = AdvanceStringIndexSlow(
- iteratingString, thisIndex, HasUnicodeFlag(flags));
+ const nextIndex: Smi =
+ AdvanceStringIndexFast(iteratingString, thisIndex, flags.unicode);
// 3. Perform ? Set(R, "lastIndex", nextIndex, true).
- SlowStoreLastIndex(iteratingRegExp, nextIndex);
+ FastStoreLastIndex(iteratingRegExp, nextIndex);
}
+
// iii. Return ! CreateIterResultObject(match, false).
return AllocateJSIteratorResult(match, False);
}
- // 10. If match is null, then
- label IfNoMatch {
- // a. Set O.[[Done]] to true.
- SetDoneFlag(receiver, flags);
-
- // b. Return ! CreateIterResultObject(undefined, true).
- goto ReturnEmptyDoneResult;
+ assert(!isFastRegExp);
+ // i. Let matchStr be ? ToString(? Get(match, "0")).
+ const match = UnsafeCast<JSAny>(match);
+ const matchStr = ToString_Inline(GetProperty(match, SmiConstant(0)));
+
+ if (matchStr == kEmptyString) {
+ // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
+ const lastIndex: JSAny = SlowLoadLastIndex(iteratingRegExp);
+ const thisIndex: Number = ToLength_Inline(lastIndex);
+
+ // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
+ // fullUnicode).
+ const nextIndex: Number =
+ AdvanceStringIndexSlow(iteratingString, thisIndex, flags.unicode);
+
+ // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
+ SlowStoreLastIndex(iteratingRegExp, nextIndex);
}
+ // iii. Return ! CreateIterResultObject(match, false).
+ return AllocateJSIteratorResult(match, False);
}
- label ReturnEmptyDoneResult {
- return AllocateJSIteratorResult(Undefined, True);
+ // 10. If match is null, then
+ label IfNoMatch {
+ // a. Set O.[[Done]] to true.
+ receiver.flags.done = true;
+
+ // b. Return ! CreateIterResultObject(undefined, true).
+ goto ReturnEmptyDoneResult;
}
+ } label ReturnEmptyDoneResult {
+ return AllocateJSIteratorResult(Undefined, True);
}
}
+}
diff --git a/deps/v8/src/builtins/regexp-match.tq b/deps/v8/src/builtins/regexp-match.tq
index 9b0255762c..d5581e0509 100644
--- a/deps/v8/src/builtins/regexp-match.tq
+++ b/deps/v8/src/builtins/regexp-match.tq
@@ -6,160 +6,157 @@
namespace regexp {
- const kATOM: constexpr int31
- generates 'JSRegExp::ATOM';
- const kTagIndex: constexpr int31
- generates 'JSRegExp::kTagIndex';
- const kAtomPatternIndex: constexpr int31
- generates 'JSRegExp::kAtomPatternIndex';
-
- extern transitioning macro RegExpBuiltinsAssembler::FlagGetter(
- implicit context: Context)(Object, constexpr Flag, constexpr bool): bool;
-
- extern macro UnsafeLoadFixedArrayElement(RegExpMatchInfo, constexpr int31):
- Object;
-
- transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
- regexp: JSReceiver, string: String, isFastPath: constexpr bool): JSAny {
- if constexpr (isFastPath) {
- assert(Is<FastJSRegExp>(regexp));
- }
+const kATOM: constexpr int31
+ generates 'JSRegExp::ATOM';
+const kTagIndex: constexpr int31
+ generates 'JSRegExp::kTagIndex';
+const kAtomPatternIndex: constexpr int31
+ generates 'JSRegExp::kAtomPatternIndex';
+
+extern transitioning macro RegExpBuiltinsAssembler::FlagGetter(
+ implicit context: Context)(Object, constexpr Flag, constexpr bool): bool;
+
+extern macro UnsafeLoadFixedArrayElement(
+ RegExpMatchInfo, constexpr int31): Object;
+
+transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
+ regexp: JSReceiver, string: String, isFastPath: constexpr bool): JSAny {
+ if constexpr (isFastPath) {
+ assert(Is<FastJSRegExp>(regexp));
+ }
- const isGlobal: bool = FlagGetter(regexp, Flag::kGlobal, isFastPath);
+ const isGlobal: bool = FlagGetter(regexp, Flag::kGlobal, isFastPath);
- if (!isGlobal) {
- return isFastPath ? RegExpPrototypeExecBodyFast(regexp, string) :
- RegExpExec(regexp, string);
- }
+ if (!isGlobal) {
+ return isFastPath ? RegExpPrototypeExecBodyFast(regexp, string) :
+ RegExpExec(regexp, string);
+ }
- assert(isGlobal);
- const isUnicode: bool = FlagGetter(regexp, Flag::kUnicode, isFastPath);
+ assert(isGlobal);
+ const isUnicode: bool = FlagGetter(regexp, Flag::kUnicode, isFastPath);
- StoreLastIndex(regexp, 0, isFastPath);
+ StoreLastIndex(regexp, 0, isFastPath);
- // Allocate an array to store the resulting match strings.
+ // Allocate an array to store the resulting match strings.
- let array = growable_fixed_array::NewGrowableFixedArray();
+ let array = growable_fixed_array::NewGrowableFixedArray();
- // Check if the regexp is an ATOM type. If so, then keep the literal string
- // to search for so that we can avoid calling substring in the loop below.
- let atom: bool = false;
- let searchString: String = EmptyStringConstant();
- if constexpr (isFastPath) {
- const maybeAtomRegexp = UnsafeCast<JSRegExp>(regexp);
- const data = UnsafeCast<FixedArray>(maybeAtomRegexp.data);
- if (UnsafeCast<Smi>(data.objects[kTagIndex]) == kATOM) {
- searchString = UnsafeCast<String>(data.objects[kAtomPatternIndex]);
- atom = true;
- }
+ // Check if the regexp is an ATOM type. If so, then keep the literal string
+ // to search for so that we can avoid calling substring in the loop below.
+ let atom: bool = false;
+ let searchString: String = EmptyStringConstant();
+ if constexpr (isFastPath) {
+ const maybeAtomRegexp = UnsafeCast<JSRegExp>(regexp);
+ const data = UnsafeCast<FixedArray>(maybeAtomRegexp.data);
+ if (UnsafeCast<Smi>(data.objects[kTagIndex]) == kATOM) {
+ searchString = UnsafeCast<String>(data.objects[kAtomPatternIndex]);
+ atom = true;
}
+ }
- while (true) {
- let match: String = EmptyStringConstant();
- try {
- if constexpr (isFastPath) {
- // On the fast path, grab the matching string from the raw match index
- // array.
- const matchIndices: RegExpMatchInfo =
- RegExpPrototypeExecBodyWithoutResultFast(
- UnsafeCast<JSRegExp>(regexp), string) otherwise IfDidNotMatch;
- if (atom) {
- match = searchString;
- } else {
- const matchFrom = UnsafeLoadFixedArrayElement(
- matchIndices, kRegExpMatchInfoFirstCaptureIndex);
- const matchTo = UnsafeLoadFixedArrayElement(
- matchIndices, kRegExpMatchInfoFirstCaptureIndex + 1);
- match = SubString(
- string, UnsafeCast<Smi>(matchFrom), UnsafeCast<Smi>(matchTo));
- }
+ while (true) {
+ let match: String = EmptyStringConstant();
+ try {
+ if constexpr (isFastPath) {
+ // On the fast path, grab the matching string from the raw match index
+ // array.
+ const matchIndices: RegExpMatchInfo =
+ RegExpPrototypeExecBodyWithoutResultFast(
+ UnsafeCast<JSRegExp>(regexp), string) otherwise IfDidNotMatch;
+ if (atom) {
+ match = searchString;
} else {
- assert(!isFastPath);
- const resultTemp = RegExpExec(regexp, string);
- if (resultTemp == Null) {
- goto IfDidNotMatch;
- }
- match = ToString_Inline(GetProperty(resultTemp, SmiConstant(0)));
+ const matchFrom = UnsafeLoadFixedArrayElement(
+ matchIndices, kRegExpMatchInfoFirstCaptureIndex);
+ const matchTo = UnsafeLoadFixedArrayElement(
+ matchIndices, kRegExpMatchInfoFirstCaptureIndex + 1);
+ match = SubString(
+ string, UnsafeCast<Smi>(matchFrom), UnsafeCast<Smi>(matchTo));
+ }
+ } else {
+ assert(!isFastPath);
+ const resultTemp = RegExpExec(regexp, string);
+ if (resultTemp == Null) {
+ goto IfDidNotMatch;
}
- goto IfDidMatch;
+ match = ToString_Inline(GetProperty(resultTemp, SmiConstant(0)));
}
- label IfDidNotMatch {
- return array.length == 0 ? Null : array.ToJSArray();
+ goto IfDidMatch;
+ } label IfDidNotMatch {
+ return array.length == 0 ? Null : array.ToJSArray();
+ } label IfDidMatch {
+ // Store the match, growing the fixed array if needed.
+
+ array.Push(match);
+
+ // Advance last index if the match is the empty string.
+ const matchLength: Smi = match.length_smi;
+ if (matchLength != 0) {
+ continue;
+ }
+ let lastIndex = LoadLastIndex(regexp, isFastPath);
+ if constexpr (isFastPath) {
+ assert(TaggedIsPositiveSmi(lastIndex));
+ } else {
+ lastIndex = ToLength_Inline(lastIndex);
}
- label IfDidMatch {
- // Store the match, growing the fixed array if needed.
-
- array.Push(match);
-
- // Advance last index if the match is the empty string.
- const matchLength: Smi = match.length_smi;
- if (matchLength != 0) {
- continue;
- }
- let lastIndex = LoadLastIndex(regexp, isFastPath);
- if constexpr (isFastPath) {
- assert(TaggedIsPositiveSmi(lastIndex));
- } else {
- lastIndex = ToLength_Inline(lastIndex);
- }
-
- const newLastIndex: Number = AdvanceStringIndex(
- string, UnsafeCast<Number>(lastIndex), isUnicode, isFastPath);
-
- if constexpr (isFastPath) {
- // On the fast path, we can be certain that lastIndex can never be
- // incremented to overflow the Smi range since the maximal string
- // length is less than the maximal Smi value.
- const kMaxStringLengthFitsSmi: constexpr bool =
- kStringMaxLengthUintptr < kSmiMaxValue;
- StaticAssert(kMaxStringLengthFitsSmi);
- assert(TaggedIsPositiveSmi(newLastIndex));
- }
- StoreLastIndex(regexp, newLastIndex, isFastPath);
+ const newLastIndex: Number = AdvanceStringIndex(
+ string, UnsafeCast<Number>(lastIndex), isUnicode, isFastPath);
+
+ if constexpr (isFastPath) {
+ // On the fast path, we can be certain that lastIndex can never be
+ // incremented to overflow the Smi range since the maximal string
+ // length is less than the maximal Smi value.
+ const kMaxStringLengthFitsSmi: constexpr bool =
+ kStringMaxLengthUintptr < kSmiMaxValue;
+ StaticAssert(kMaxStringLengthFitsSmi);
+ assert(TaggedIsPositiveSmi(newLastIndex));
}
- }
- VerifiedUnreachable();
+ StoreLastIndex(regexp, newLastIndex, isFastPath);
+ }
}
- transitioning macro FastRegExpPrototypeMatchBody(implicit context: Context)(
- receiver: FastJSRegExp, string: String): JSAny {
- return RegExpPrototypeMatchBody(receiver, string, true);
- }
+ VerifiedUnreachable();
+}
- transitioning macro SlowRegExpPrototypeMatchBody(implicit context: Context)(
- receiver: JSReceiver, string: String): JSAny {
- return RegExpPrototypeMatchBody(receiver, string, false);
- }
+transitioning macro FastRegExpPrototypeMatchBody(implicit context: Context)(
+ receiver: FastJSRegExp, string: String): JSAny {
+ return RegExpPrototypeMatchBody(receiver, string, true);
+}
- // Helper that skips a few initial checks. and assumes...
- // 1) receiver is a "fast" RegExp
- // 2) pattern is a string
- transitioning builtin RegExpMatchFast(implicit context: Context)(
- receiver: FastJSRegExp, string: String): JSAny {
- return FastRegExpPrototypeMatchBody(receiver, string);
- }
+transitioning macro SlowRegExpPrototypeMatchBody(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ return RegExpPrototypeMatchBody(receiver, string, false);
+}
- // ES#sec-regexp.prototype-@@match
- // RegExp.prototype [ @@match ] ( string )
- transitioning javascript builtin RegExpPrototypeMatch(
- js-implicit context: NativeContext,
- receiver: JSAny)(string: JSAny): JSAny {
- ThrowIfNotJSReceiver(
- receiver, MessageTemplate::kIncompatibleMethodReceiver,
- 'RegExp.prototype.@@match');
- const receiver = UnsafeCast<JSReceiver>(receiver);
- const string: String = ToString_Inline(string);
-
- // Strict: Reads global and unicode properties.
- // TODO(jgruber): Handle slow flag accesses on the fast path and make this
- // permissive.
- const fastRegExp = Cast<FastJSRegExp>(receiver)
- otherwise return SlowRegExpPrototypeMatchBody(receiver, string);
-
- // TODO(pwong): Could be optimized to remove the overhead of calling the
- // builtin (at the cost of a larger builtin).
- return RegExpMatchFast(fastRegExp, string);
- }
+// Helper that skips a few initial checks. and assumes...
+// 1) receiver is a "fast" RegExp
+// 2) pattern is a string
+transitioning builtin RegExpMatchFast(implicit context: Context)(
+ receiver: FastJSRegExp, string: String): JSAny {
+ return FastRegExpPrototypeMatchBody(receiver, string);
+}
+
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+transitioning javascript builtin RegExpPrototypeMatch(
+ js-implicit context: NativeContext, receiver: JSAny)(string: JSAny): JSAny {
+ ThrowIfNotJSReceiver(
+ receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ 'RegExp.prototype.@@match');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+ const string: String = ToString_Inline(string);
+
+ // Strict: Reads global and unicode properties.
+ // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+ // permissive.
+ const fastRegExp = Cast<FastJSRegExp>(receiver)
+ otherwise return SlowRegExpPrototypeMatchBody(receiver, string);
+
+ // TODO(pwong): Could be optimized to remove the overhead of calling the
+ // builtin (at the cost of a larger builtin).
+ return RegExpMatchFast(fastRegExp, string);
+}
}
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index fc9d13cf3c..c59a41b27f 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -6,259 +6,252 @@
namespace regexp {
- extern builtin
- SubString(implicit context: Context)(String, Smi, Smi): String;
-
- extern runtime RegExpExecMultiple(implicit context: Context)(
- JSRegExp, String, RegExpMatchInfo, JSArray): Null|JSArray;
- extern transitioning runtime
- RegExpReplaceRT(Context, JSReceiver, String, Object): String;
- extern transitioning runtime
- StringBuilderConcat(implicit context: Context)(JSArray, Smi, String): String;
- extern transitioning runtime
- StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
- String, JSRegExp, Callable): String;
-
- transitioning macro RegExpReplaceCallableNoExplicitCaptures(implicit context:
- Context)(
- matchesElements: FixedArray, matchesLength: intptr, string: String,
- replaceFn: Callable) {
- let matchStart: Smi = 0;
- for (let i: intptr = 0; i < matchesLength; i++) {
- typeswitch (matchesElements.objects[i]) {
- // Element represents a slice.
- case (elSmi: Smi): {
- // The slice's match start and end is either encoded as one or two
- // smis. A positive smi indicates a single smi encoding (see
- // ReplacementStringBuilder::AddSubjectSlice()).
- if (elSmi > 0) {
- // For single smi encoding, see
- // StringBuilderSubstringLength::encode() and
- // StringBuilderSubstringPosition::encode().
- const elInt: intptr = Convert<intptr>(elSmi);
- const newMatchStart: intptr = (elInt >> 11) + (elInt & 0x7FF);
- matchStart = Convert<Smi>(newMatchStart);
- } else {
- // For two smi encoding, the length is negative followed by the
- // match start.
- const nextEl: Smi = UnsafeCast<Smi>(matchesElements.objects[++i]);
- matchStart = nextEl - elSmi;
- }
- }
- // Element represents the matched substring, which is then passed to the
- // replace function.
- case (elString: String): {
- const replacementObj: JSAny =
- Call(context, replaceFn, Undefined, elString, matchStart, string);
- const replacement: String = ToString_Inline(replacementObj);
- matchesElements.objects[i] = replacement;
- matchStart += elString.length_smi;
- }
- case (Object): deferred {
- unreachable;
+extern builtin
+SubString(implicit context: Context)(String, Smi, Smi): String;
+
+extern runtime RegExpExecMultiple(implicit context: Context)(
+ JSRegExp, String, RegExpMatchInfo, JSArray): Null|JSArray;
+extern transitioning runtime
+RegExpReplaceRT(Context, JSReceiver, String, Object): String;
+extern transitioning runtime
+StringBuilderConcat(implicit context: Context)(JSArray, Smi, String): String;
+extern transitioning runtime
+StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
+ String, JSRegExp, Callable): String;
+
+transitioning macro RegExpReplaceCallableNoExplicitCaptures(
+ implicit context: Context)(
+ matchesElements: FixedArray, matchesLength: intptr, string: String,
+ replaceFn: Callable) {
+ let matchStart: Smi = 0;
+ for (let i: intptr = 0; i < matchesLength; i++) {
+ typeswitch (matchesElements.objects[i]) {
+ // Element represents a slice.
+ case (elSmi: Smi): {
+ // The slice's match start and end is either encoded as one or two
+ // smis. A positive smi indicates a single smi encoding (see
+ // ReplacementStringBuilder::AddSubjectSlice()).
+ if (elSmi > 0) {
+ // For single smi encoding, see
+ // StringBuilderSubstringLength::encode() and
+ // StringBuilderSubstringPosition::encode().
+ const elInt: intptr = Convert<intptr>(elSmi);
+ const newMatchStart: intptr = (elInt >> 11) + (elInt & 0x7FF);
+ matchStart = Convert<Smi>(newMatchStart);
+ } else {
+ // For two smi encoding, the length is negative followed by the
+ // match start.
+ const nextEl: Smi = UnsafeCast<Smi>(matchesElements.objects[++i]);
+ matchStart = nextEl - elSmi;
}
}
+ // Element represents the matched substring, which is then passed to the
+ // replace function.
+ case (elString: String): {
+ const replacementObj: JSAny =
+ Call(context, replaceFn, Undefined, elString, matchStart, string);
+ const replacement: String = ToString_Inline(replacementObj);
+ matchesElements.objects[i] = replacement;
+ matchStart += elString.length_smi;
+ }
+ case (Object): deferred {
+ unreachable;
+ }
}
}
+}
- transitioning macro
- RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) {
- for (let i: intptr = 0; i < matchesLength; i++) {
- const elArray =
- Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
-
- // The JSArray is expanded into the function args by Reflect.apply().
- // TODO(jgruber): Remove indirection through Call->ReflectApply.
- const replacementObj: JSAny = Call(
- context, GetReflectApply(), Undefined, replaceFn, Undefined, elArray);
-
- // Overwrite the i'th element in the results with the string
- // we got back from the callback function.
- matchesElements.objects[i] = ToString_Inline(replacementObj);
- }
+transitioning macro
+RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
+ matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) {
+ for (let i: intptr = 0; i < matchesLength; i++) {
+ const elArray =
+ Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
+
+ // The JSArray is expanded into the function args by Reflect.apply().
+ // TODO(jgruber): Remove indirection through Call->ReflectApply.
+ const replacementObj: JSAny = Call(
+ context, GetReflectApply(), Undefined, replaceFn, Undefined, elArray);
+
+ // Overwrite the i'th element in the results with the string
+ // we got back from the callback function.
+ matchesElements.objects[i] = ToString_Inline(replacementObj);
}
+}
- transitioning macro RegExpReplaceFastGlobalCallable(implicit context:
- Context)(
- regexp: FastJSRegExp, string: String, replaceFn: Callable): String {
- regexp.lastIndex = 0;
-
- const kInitialCapacity: intptr = 16;
- const kInitialLength: Smi = 0;
- const result: Null|JSArray = RegExpExecMultiple(
- regexp, string, GetRegExpLastMatchInfo(),
- AllocateJSArray(
- ElementsKind::PACKED_ELEMENTS, GetFastPackedElementsJSArrayMap(),
- kInitialCapacity, kInitialLength));
-
- regexp.lastIndex = 0;
-
- // If no matches, return the subject string.
- if (result == Null) return string;
-
- const matches: JSArray = UnsafeCast<JSArray>(result);
- const matchesLength: Smi = Cast<Smi>(matches.length) otherwise unreachable;
- const matchesLengthInt: intptr = Convert<intptr>(matchesLength);
- const matchesElements: FixedArray =
- UnsafeCast<FixedArray>(matches.elements);
-
- // Reload last match info since it might have changed.
- const nofCaptures: Smi = GetRegExpLastMatchInfo().NumberOfCaptures();
-
- // If the number of captures is two then there are no explicit captures in
- // the regexp, just the implicit capture that captures the whole match. In
- // this case we can simplify quite a bit and end up with something faster.
- if (nofCaptures == 2) {
- RegExpReplaceCallableNoExplicitCaptures(
- matchesElements, matchesLengthInt, string, replaceFn);
- } else {
- RegExpReplaceCallableWithExplicitCaptures(
- matchesElements, matchesLengthInt, replaceFn);
- }
-
- return StringBuilderConcat(matches, matchesLength, string);
+transitioning macro RegExpReplaceFastGlobalCallable(implicit context: Context)(
+ regexp: FastJSRegExp, string: String, replaceFn: Callable): String {
+ regexp.lastIndex = 0;
+
+ const kInitialCapacity: intptr = 16;
+ const kInitialLength: Smi = 0;
+ const result: Null|JSArray = RegExpExecMultiple(
+ regexp, string, GetRegExpLastMatchInfo(),
+ AllocateJSArray(
+ ElementsKind::PACKED_ELEMENTS, GetFastPackedElementsJSArrayMap(),
+ kInitialCapacity, kInitialLength));
+
+ regexp.lastIndex = 0;
+
+ // If no matches, return the subject string.
+ if (result == Null) return string;
+
+ const matches: JSArray = UnsafeCast<JSArray>(result);
+ const matchesLength: Smi = Cast<Smi>(matches.length) otherwise unreachable;
+ const matchesLengthInt: intptr = Convert<intptr>(matchesLength);
+ const matchesElements: FixedArray = UnsafeCast<FixedArray>(matches.elements);
+
+ // Reload last match info since it might have changed.
+ const nofCaptures: Smi = GetRegExpLastMatchInfo().NumberOfCaptures();
+
+ // If the number of captures is two then there are no explicit captures in
+ // the regexp, just the implicit capture that captures the whole match. In
+ // this case we can simplify quite a bit and end up with something faster.
+ if (nofCaptures == 2) {
+ RegExpReplaceCallableNoExplicitCaptures(
+ matchesElements, matchesLengthInt, string, replaceFn);
+ } else {
+ RegExpReplaceCallableWithExplicitCaptures(
+ matchesElements, matchesLengthInt, replaceFn);
}
- transitioning macro RegExpReplaceFastString(implicit context: Context)(
- regexp: JSRegExp, string: String, replaceString: String): String {
- // The fast path is reached only if {receiver} is an unmodified JSRegExp
- // instance, {replace_value} is non-callable, and ToString({replace_value})
- // does not contain '$', i.e. we're doing a simple string replacement.
- let result: String = kEmptyString;
- let lastMatchEnd: Smi = 0;
- let unicode: bool = false;
- const replaceLength: Smi = replaceString.length_smi;
- const fastRegexp = UnsafeCast<FastJSRegExp>(regexp);
- const global: bool = fastRegexp.global;
-
- if (global) {
- unicode = fastRegexp.unicode;
- fastRegexp.lastIndex = 0;
- }
-
- while (true) {
- const match: RegExpMatchInfo =
- RegExpPrototypeExecBodyWithoutResultFast(regexp, string)
- otherwise break;
- const matchStart: Smi = match.GetStartOfCapture(0);
- const matchEnd: Smi = match.GetEndOfCapture(0);
-
- // TODO(jgruber): We could skip many of the checks that using SubString
- // here entails.
- result = result + SubString(string, lastMatchEnd, matchStart);
- lastMatchEnd = matchEnd;
-
- if (replaceLength != 0) result = result + replaceString;
-
- // Non-global case ends here after the first replacement.
- if (!global) break;
-
- // If match is the empty string, we have to increment lastIndex.
- if (matchEnd == matchStart) {
- typeswitch (regexp) {
- case (fastRegexp: FastJSRegExp): {
- fastRegexp.lastIndex =
- AdvanceStringIndexFast(string, fastRegexp.lastIndex, unicode);
- }
- case (Object): {
- const lastIndex: JSAny = SlowLoadLastIndex(regexp);
- const thisIndex: Number = ToLength_Inline(lastIndex);
- const nextIndex: Number =
- AdvanceStringIndexSlow(string, thisIndex, unicode);
- SlowStoreLastIndex(regexp, nextIndex);
- }
- }
- }
- }
+ return StringBuilderConcat(matches, matchesLength, string);
+}
- return result + SubString(string, lastMatchEnd, string.length_smi);
+transitioning macro RegExpReplaceFastString(implicit context: Context)(
+ regexp: JSRegExp, string: String, replaceString: String): String {
+ // The fast path is reached only if {receiver} is an unmodified JSRegExp
+ // instance, {replace_value} is non-callable, and ToString({replace_value})
+ // does not contain '$', i.e. we're doing a simple string replacement.
+ let result: String = kEmptyString;
+ let lastMatchEnd: Smi = 0;
+ let unicode: bool = false;
+ const replaceLength: Smi = replaceString.length_smi;
+ const fastRegexp = UnsafeCast<FastJSRegExp>(regexp);
+ const global: bool = fastRegexp.global;
+
+ if (global) {
+ unicode = fastRegexp.unicode;
+ fastRegexp.lastIndex = 0;
}
- transitioning builtin RegExpReplace(implicit context: Context)(
- regexp: FastJSRegExp, string: String, replaceValue: JSAny): String {
- // TODO(pwong): Remove assert when all callers (StringPrototypeReplace) are
- // from Torque.
- assert(Is<FastJSRegExp>(regexp));
-
- // 2. Is {replace_value} callable?
- typeswitch (replaceValue) {
- case (replaceFn: Callable): {
- return regexp.global ?
- RegExpReplaceFastGlobalCallable(regexp, string, replaceFn) :
- StringReplaceNonGlobalRegExpWithFunction(string, regexp, replaceFn);
- }
- case (JSAny): {
- const stableRegexp: JSRegExp = regexp;
- const replaceString: String = ToString_Inline(replaceValue);
-
- try {
- // ToString(replaceValue) could potentially change the shape of the
- // RegExp object. Recheck that we are still on the fast path and bail
- // to runtime otherwise.
- const fastRegexp = Cast<FastJSRegExp>(stableRegexp) otherwise Runtime;
- if (StringIndexOf(
- replaceString, SingleCharacterStringConstant('$'), 0) != -1) {
- goto Runtime;
- }
-
- return RegExpReplaceFastString(fastRegexp, string, replaceString);
+ while (true) {
+ const match: RegExpMatchInfo =
+ RegExpPrototypeExecBodyWithoutResultFast(regexp, string)
+ otherwise break;
+ const matchStart: Smi = match.GetStartOfCapture(0);
+ const matchEnd: Smi = match.GetEndOfCapture(0);
+
+ // TODO(jgruber): We could skip many of the checks that using SubString
+ // here entails.
+ result = result + SubString(string, lastMatchEnd, matchStart);
+ lastMatchEnd = matchEnd;
+
+ if (replaceLength != 0) result = result + replaceString;
+
+ // Non-global case ends here after the first replacement.
+ if (!global) break;
+
+ // If match is the empty string, we have to increment lastIndex.
+ if (matchEnd == matchStart) {
+ typeswitch (regexp) {
+ case (fastRegexp: FastJSRegExp): {
+ fastRegexp.lastIndex =
+ AdvanceStringIndexFast(string, fastRegexp.lastIndex, unicode);
}
- label Runtime deferred {
- return RegExpReplaceRT(context, stableRegexp, string, replaceString);
+ case (Object): {
+ const lastIndex: JSAny = SlowLoadLastIndex(regexp);
+ const thisIndex: Number = ToLength_Inline(lastIndex);
+ const nextIndex: Number =
+ AdvanceStringIndexSlow(string, thisIndex, unicode);
+ SlowStoreLastIndex(regexp, nextIndex);
}
}
}
}
- const kRegExpReplaceCalledOnSlowRegExp: constexpr int31
- generates 'v8::Isolate::kRegExpReplaceCalledOnSlowRegExp';
-
- transitioning javascript builtin RegExpPrototypeReplace(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- const methodName: constexpr string = 'RegExp.prototype.@@replace';
-
- // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
- //
- // if (!IsFastRegExp(receiver)) CallRuntime(RegExpReplace)
- // if (IsCallable(replace)) {
- // if (IsGlobal(receiver)) {
- // // Called 'fast-path' but contains several runtime calls.
- // RegExpReplaceFastGlobalCallable()
- // } else {
- // CallRuntime(StringReplaceNonGlobalRegExpWithFunction)
- // }
- // } else {
- // if (replace.contains("$")) {
- // CallRuntime(RegExpReplace)
- // } else {
- // RegExpReplaceFastString()
- // }
- // }
-
- const string: JSAny = arguments[0];
- const replaceValue: JSAny = arguments[1];
-
- // Let rx be the this value.
- // If Type(rx) is not Object, throw a TypeError exception.
- const rx = Cast<JSReceiver>(receiver)
- otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, methodName);
-
- // Let S be ? ToString(string).
- const s = ToString_Inline(string);
-
- // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
- try {
- const fastRx: FastJSRegExp = Cast<FastJSRegExp>(rx) otherwise Runtime;
- return RegExpReplace(fastRx, s, replaceValue);
+ return result + SubString(string, lastMatchEnd, string.length_smi);
+}
+
+transitioning builtin RegExpReplace(implicit context: Context)(
+ regexp: FastJSRegExp, string: String, replaceValue: JSAny): String {
+ // TODO(pwong): Remove assert when all callers (StringPrototypeReplace) are
+ // from Torque.
+ assert(Is<FastJSRegExp>(regexp));
+
+ // 2. Is {replace_value} callable?
+ typeswitch (replaceValue) {
+ case (replaceFn: Callable): {
+ return regexp.global ?
+ RegExpReplaceFastGlobalCallable(regexp, string, replaceFn) :
+ StringReplaceNonGlobalRegExpWithFunction(string, regexp, replaceFn);
}
- label Runtime deferred {
- IncrementUseCounter(
- context, SmiConstant(kRegExpReplaceCalledOnSlowRegExp));
- return RegExpReplaceRT(context, rx, s, replaceValue);
+ case (JSAny): {
+ const stableRegexp: JSRegExp = regexp;
+ const replaceString: String = ToString_Inline(replaceValue);
+
+ try {
+ // ToString(replaceValue) could potentially change the shape of the
+ // RegExp object. Recheck that we are still on the fast path and bail
+ // to runtime otherwise.
+ const fastRegexp = Cast<FastJSRegExp>(stableRegexp) otherwise Runtime;
+ if (StringIndexOf(
+ replaceString, SingleCharacterStringConstant('$'), 0) != -1) {
+ goto Runtime;
+ }
+
+ return RegExpReplaceFastString(fastRegexp, string, replaceString);
+ } label Runtime deferred {
+ return RegExpReplaceRT(context, stableRegexp, string, replaceString);
+ }
}
}
+}
+const kRegExpReplaceCalledOnSlowRegExp: constexpr int31
+ generates 'v8::Isolate::kRegExpReplaceCalledOnSlowRegExp';
+
+transitioning javascript builtin RegExpPrototypeReplace(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ const methodName: constexpr string = 'RegExp.prototype.@@replace';
+
+ // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
+ //
+ // if (!IsFastRegExp(receiver)) CallRuntime(RegExpReplace)
+ // if (IsCallable(replace)) {
+ // if (IsGlobal(receiver)) {
+ // // Called 'fast-path' but contains several runtime calls.
+ // RegExpReplaceFastGlobalCallable()
+ // } else {
+ // CallRuntime(StringReplaceNonGlobalRegExpWithFunction)
+ // }
+ // } else {
+ // if (replace.contains("$")) {
+ // CallRuntime(RegExpReplace)
+ // } else {
+ // RegExpReplaceFastString()
+ // }
+ // }
+
+ const string: JSAny = arguments[0];
+ const replaceValue: JSAny = arguments[1];
+
+ // Let rx be the this value.
+ // If Type(rx) is not Object, throw a TypeError exception.
+ const rx = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, methodName);
+
+ // Let S be ? ToString(string).
+ const s = ToString_Inline(string);
+
+ // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
+ try {
+ const fastRx: FastJSRegExp = Cast<FastJSRegExp>(rx) otherwise Runtime;
+ return RegExpReplace(fastRx, s, replaceValue);
+ } label Runtime deferred {
+ IncrementUseCounter(context, SmiConstant(kRegExpReplaceCalledOnSlowRegExp));
+ return RegExpReplaceRT(context, rx, s, replaceValue);
+ }
+}
}
diff --git a/deps/v8/src/builtins/regexp-search.tq b/deps/v8/src/builtins/regexp-search.tq
index 14fb9f9b03..b70d23a0dd 100644
--- a/deps/v8/src/builtins/regexp-search.tq
+++ b/deps/v8/src/builtins/regexp-search.tq
@@ -6,103 +6,101 @@
namespace regexp {
- transitioning macro
- RegExpPrototypeSearchBodyFast(implicit context: Context)(
- regexp: JSRegExp, string: String): JSAny {
- assert(IsFastRegExpPermissive(regexp));
-
- // Grab the initial value of last index.
- const previousLastIndex: Smi = FastLoadLastIndex(regexp);
-
- // Ensure last index is 0.
- FastStoreLastIndex(regexp, 0);
-
- // Call exec.
- try {
- const matchIndices: RegExpMatchInfo =
- RegExpPrototypeExecBodyWithoutResultFast(
- UnsafeCast<JSRegExp>(regexp), string)
- otherwise DidNotMatch;
-
- // Successful match.
- // Reset last index.
- FastStoreLastIndex(regexp, previousLastIndex);
-
- // Return the index of the match.
- return UnsafeCast<Smi>(
- matchIndices.objects[kRegExpMatchInfoFirstCaptureIndex]);
- }
- label DidNotMatch {
- // Reset last index and return -1.
- FastStoreLastIndex(regexp, previousLastIndex);
- return SmiConstant(-1);
- }
- }
+transitioning macro
+RegExpPrototypeSearchBodyFast(implicit context: Context)(
+ regexp: JSRegExp, string: String): JSAny {
+ assert(IsFastRegExpPermissive(regexp));
+
+ // Grab the initial value of last index.
+ const previousLastIndex: Smi = FastLoadLastIndex(regexp);
+
+ // Ensure last index is 0.
+ FastStoreLastIndex(regexp, 0);
+
+ // Call exec.
+ try {
+ const matchIndices: RegExpMatchInfo =
+ RegExpPrototypeExecBodyWithoutResultFast(
+ UnsafeCast<JSRegExp>(regexp), string)
+ otherwise DidNotMatch;
- extern macro RegExpBuiltinsAssembler::BranchIfRegExpResult(
- implicit context: Context)(Object): never labels IsUnmodified,
- IsModified;
+ // Successful match.
+ // Reset last index.
+ FastStoreLastIndex(regexp, previousLastIndex);
- macro
- IsRegExpResult(implicit context: Context)(execResult: HeapObject): bool {
- BranchIfRegExpResult(execResult) otherwise return true, return false;
+ // Return the index of the match.
+ return UnsafeCast<Smi>(
+ matchIndices.objects[kRegExpMatchInfoFirstCaptureIndex]);
+ } label DidNotMatch {
+ // Reset last index and return -1.
+ FastStoreLastIndex(regexp, previousLastIndex);
+ return SmiConstant(-1);
}
+}
- transitioning macro RegExpPrototypeSearchBodySlow(implicit context: Context)(
- regexp: JSReceiver, string: String): JSAny {
- // Grab the initial value of last index.
- const previousLastIndex = SlowLoadLastIndex(regexp);
- const smiZero: Smi = 0;
+extern macro RegExpBuiltinsAssembler::BranchIfRegExpResult(
+ implicit context: Context)(Object): never labels IsUnmodified,
+ IsModified;
- // Ensure last index is 0.
- if (!SameValue(previousLastIndex, smiZero)) {
- SlowStoreLastIndex(regexp, smiZero);
- }
+macro
+IsRegExpResult(implicit context: Context)(execResult: HeapObject): bool {
+ BranchIfRegExpResult(execResult) otherwise return true, return false;
+}
- // Call exec.
- const execResult = RegExpExec(regexp, string);
+transitioning macro RegExpPrototypeSearchBodySlow(implicit context: Context)(
+ regexp: JSReceiver, string: String): JSAny {
+ // Grab the initial value of last index.
+ const previousLastIndex = SlowLoadLastIndex(regexp);
+ const smiZero: Smi = 0;
- // Reset last index if necessary.
- const currentLastIndex = SlowLoadLastIndex(regexp);
- if (!SameValue(currentLastIndex, previousLastIndex)) {
- SlowStoreLastIndex(regexp, previousLastIndex);
- }
+ // Ensure last index is 0.
+ if (!SameValue(previousLastIndex, smiZero)) {
+ SlowStoreLastIndex(regexp, smiZero);
+ }
- // Return -1 if no match was found.
- if (execResult == Null) {
- return SmiConstant(-1);
- }
+ // Call exec.
+ const execResult = RegExpExec(regexp, string);
- // Return the index of the match.
- const fastExecResult = Cast<JSRegExpResult>(execResult)
- otherwise return GetProperty(execResult, 'index');
- return fastExecResult.index;
+ // Reset last index if necessary.
+ const currentLastIndex = SlowLoadLastIndex(regexp);
+ if (!SameValue(currentLastIndex, previousLastIndex)) {
+ SlowStoreLastIndex(regexp, previousLastIndex);
}
- // Helper that skips a few initial checks. and assumes...
- // 1) receiver is a "fast permissive" RegExp
- // 2) pattern is a string
- transitioning builtin RegExpSearchFast(implicit context: Context)(
- receiver: JSRegExp, string: String): JSAny {
- return RegExpPrototypeSearchBodyFast(receiver, string);
+ // Return -1 if no match was found.
+ if (execResult == Null) {
+ return SmiConstant(-1);
}
- // ES#sec-regexp.prototype-@@search
- // RegExp.prototype [ @@search ] ( string )
- transitioning javascript builtin RegExpPrototypeSearch(
- js-implicit context: NativeContext,
- receiver: JSAny)(string: JSAny): JSAny {
- ThrowIfNotJSReceiver(
- receiver, MessageTemplate::kIncompatibleMethodReceiver,
- 'RegExp.prototype.@@search');
- const receiver = UnsafeCast<JSReceiver>(receiver);
- const string: String = ToString_Inline(string);
-
- if (IsFastRegExpPermissive(receiver)) {
- // TODO(pwong): Could be optimized to remove the overhead of calling the
- // builtin (at the cost of a larger builtin).
- return RegExpSearchFast(UnsafeCast<JSRegExp>(receiver), string);
- }
- return RegExpPrototypeSearchBodySlow(receiver, string);
+ // Return the index of the match.
+ const fastExecResult = Cast<JSRegExpResult>(execResult)
+ otherwise return GetProperty(execResult, 'index');
+ return fastExecResult.index;
+}
+
+// Helper that skips a few initial checks. and assumes...
+// 1) receiver is a "fast permissive" RegExp
+// 2) pattern is a string
+transitioning builtin RegExpSearchFast(implicit context: Context)(
+ receiver: JSRegExp, string: String): JSAny {
+ return RegExpPrototypeSearchBodyFast(receiver, string);
+}
+
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+transitioning javascript builtin RegExpPrototypeSearch(
+ js-implicit context: NativeContext, receiver: JSAny)(string: JSAny): JSAny {
+ ThrowIfNotJSReceiver(
+ receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ 'RegExp.prototype.@@search');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+ const string: String = ToString_Inline(string);
+
+ if (IsFastRegExpPermissive(receiver)) {
+ // TODO(pwong): Could be optimized to remove the overhead of calling the
+ // builtin (at the cost of a larger builtin).
+ return RegExpSearchFast(UnsafeCast<JSRegExp>(receiver), string);
}
+ return RegExpPrototypeSearchBodySlow(receiver, string);
+}
}
diff --git a/deps/v8/src/builtins/regexp-source.tq b/deps/v8/src/builtins/regexp-source.tq
index 009e5181dc..5f9c6b22c3 100644
--- a/deps/v8/src/builtins/regexp-source.tq
+++ b/deps/v8/src/builtins/regexp-source.tq
@@ -6,22 +6,22 @@
namespace regexp {
- // ES6 21.2.5.10.
- // ES #sec-get-regexp.prototype.source
- transitioning javascript builtin RegExpPrototypeSourceGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- typeswitch (receiver) {
- case (receiver: JSRegExp): {
- return receiver.source;
- }
- case (Object): {
- }
+// ES6 21.2.5.10.
+// ES #sec-get-regexp.prototype.source
+transitioning javascript builtin RegExpPrototypeSourceGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ typeswitch (receiver) {
+ case (receiver: JSRegExp): {
+ return receiver.source;
}
- if (!IsReceiverInitialRegExpPrototype(receiver)) {
- const methodName: constexpr string = 'RegExp.prototype.source';
- ThrowTypeError(MessageTemplate::kRegExpNonRegExp, methodName);
+ case (Object): {
}
- IncrementUseCounter(context, SmiConstant(kRegExpPrototypeSourceGetter));
- return '(?:)';
}
+ if (!IsReceiverInitialRegExpPrototype(receiver)) {
+ const methodName: constexpr string = 'RegExp.prototype.source';
+ ThrowTypeError(MessageTemplate::kRegExpNonRegExp, methodName);
+ }
+ IncrementUseCounter(context, SmiConstant(kRegExpPrototypeSourceGetter));
+ return '(?:)';
+}
}
diff --git a/deps/v8/src/builtins/regexp-split.tq b/deps/v8/src/builtins/regexp-split.tq
index e4092803ee..47ff214130 100644
--- a/deps/v8/src/builtins/regexp-split.tq
+++ b/deps/v8/src/builtins/regexp-split.tq
@@ -5,70 +5,68 @@
#include 'src/builtins/builtins-regexp-gen.h'
namespace runtime {
- extern transitioning runtime
- RegExpSplit(implicit context: Context)(JSReceiver, String, Object): JSAny;
+extern transitioning runtime
+RegExpSplit(implicit context: Context)(JSReceiver, String, Object): JSAny;
} // namespace runtime
namespace regexp {
- const kMaxValueSmi: constexpr int31
- generates 'Smi::kMaxValue';
+const kMaxValueSmi: constexpr int31
+ generates 'Smi::kMaxValue';
- extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
- implicit context: Context)(JSRegExp, String, Smi): JSArray;
+extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
+ implicit context: Context)(JSRegExp, String, Smi): JSArray;
- // Helper that skips a few initial checks.
- transitioning builtin
- RegExpSplit(implicit context: Context)(
- regexp: FastJSRegExp, string: String, limit: JSAny): JSAny {
- let sanitizedLimit: Smi;
+// Helper that skips a few initial checks.
+transitioning builtin
+RegExpSplit(implicit context: Context)(
+ regexp: FastJSRegExp, string: String, limit: JSAny): JSAny {
+ let sanitizedLimit: Smi;
- // We need to be extra-strict and require the given limit to be either
- // undefined or a positive smi. We can't call ToUint32(maybe_limit) since
- // that might move us onto the slow path, resulting in ordering spec
- // violations (see https://crbug.com/801171).
+ // We need to be extra-strict and require the given limit to be either
+ // undefined or a positive smi. We can't call ToUint32(maybe_limit) since
+ // that might move us onto the slow path, resulting in ordering spec
+ // violations (see https://crbug.com/801171).
- if (limit == Undefined) {
- // TODO(jgruber): In this case, we can probably avoid generation of limit
- // checks in Generate_RegExpPrototypeSplitBody.
- sanitizedLimit = SmiConstant(kMaxValueSmi);
- } else if (!TaggedIsPositiveSmi(limit)) {
- return runtime::RegExpSplit(regexp, string, limit);
- } else {
- sanitizedLimit = UnsafeCast<Smi>(limit);
- }
-
- // Due to specific shortcuts we take on the fast path (specifically, we
- // don't allocate a new regexp instance as specced), we need to ensure that
- // the given regexp is non-sticky to avoid invalid results. See
- // crbug.com/v8/6706.
+ if (limit == Undefined) {
+ // TODO(jgruber): In this case, we can probably avoid generation of limit
+ // checks in Generate_RegExpPrototypeSplitBody.
+ sanitizedLimit = SmiConstant(kMaxValueSmi);
+ } else if (!TaggedIsPositiveSmi(limit)) {
+ return runtime::RegExpSplit(regexp, string, limit);
+ } else {
+ sanitizedLimit = UnsafeCast<Smi>(limit);
+ }
- if (FastFlagGetter(regexp, Flag::kSticky)) {
- return runtime::RegExpSplit(regexp, string, sanitizedLimit);
- }
+ // Due to specific shortcuts we take on the fast path (specifically, we
+ // don't allocate a new regexp instance as specced), we need to ensure that
+ // the given regexp is non-sticky to avoid invalid results. See
+ // crbug.com/v8/6706.
- // We're good to go on the fast path, which is inlined here.
- return RegExpPrototypeSplitBody(regexp, string, sanitizedLimit);
+ if (FastFlagGetter(regexp, Flag::kSticky)) {
+ return runtime::RegExpSplit(regexp, string, sanitizedLimit);
}
- // ES#sec-regexp.prototype-@@split
- // RegExp.prototype [ @@split ] ( string, limit )
- transitioning javascript builtin RegExpPrototypeSplit(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- ThrowIfNotJSReceiver(
- receiver, MessageTemplate::kIncompatibleMethodReceiver,
- 'RegExp.prototype.@@split');
- const receiver = UnsafeCast<JSReceiver>(receiver);
- const string: String = ToString_Inline(arguments[0]);
- const limit = arguments[1];
+ // We're good to go on the fast path, which is inlined here.
+ return RegExpPrototypeSplitBody(regexp, string, sanitizedLimit);
+}
- // Strict: Reads the flags property.
- // TODO(jgruber): Handle slow flag accesses on the fast path and make this
- // permissive.
- const fastRegExp = Cast<FastJSRegExp>(receiver)
- otherwise return runtime::RegExpSplit(receiver, string, limit);
- return RegExpSplit(fastRegExp, string, limit);
- }
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+transitioning javascript builtin RegExpPrototypeSplit(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ ThrowIfNotJSReceiver(
+ receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ 'RegExp.prototype.@@split');
+ const receiver = UnsafeCast<JSReceiver>(receiver);
+ const string: String = ToString_Inline(arguments[0]);
+ const limit = arguments[1];
+ // Strict: Reads the flags property.
+ // TODO(jgruber): Handle slow flag accesses on the fast path and make this
+ // permissive.
+ const fastRegExp = Cast<FastJSRegExp>(receiver)
+ otherwise return runtime::RegExpSplit(receiver, string, limit);
+ return RegExpSplit(fastRegExp, string, limit);
+}
}
diff --git a/deps/v8/src/builtins/regexp-test.tq b/deps/v8/src/builtins/regexp-test.tq
index cd41823975..c83afd602d 100644
--- a/deps/v8/src/builtins/regexp-test.tq
+++ b/deps/v8/src/builtins/regexp-test.tq
@@ -6,30 +6,29 @@
namespace regexp {
- // ES#sec-regexp.prototype.test
- // RegExp.prototype.test ( S )
- transitioning javascript builtin RegExpPrototypeTest(
- js-implicit context: NativeContext,
- receiver: JSAny)(string: JSAny): JSAny {
- const methodName: constexpr string = 'RegExp.prototype.test';
- const receiver = Cast<JSReceiver>(receiver)
- otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, methodName);
- const str: String = ToString_Inline(string);
- if (IsFastRegExpPermissive(receiver)) {
- RegExpPrototypeExecBodyWithoutResultFast(
- UnsafeCast<JSRegExp>(receiver), str)
- otherwise return False;
- return True;
- }
- const matchIndices = RegExpExec(receiver, str);
- return SelectBooleanConstant(matchIndices != Null);
- }
-
- transitioning builtin RegExpPrototypeTestFast(implicit context: Context)(
- receiver: JSRegExp, string: String): Object {
- RegExpPrototypeExecBodyWithoutResultFast(receiver, string)
+// ES#sec-regexp.prototype.test
+// RegExp.prototype.test ( S )
+transitioning javascript builtin RegExpPrototypeTest(
+ js-implicit context: NativeContext, receiver: JSAny)(string: JSAny): JSAny {
+ const methodName: constexpr string = 'RegExp.prototype.test';
+ const receiver = Cast<JSReceiver>(receiver)
+ otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, methodName);
+ const str: String = ToString_Inline(string);
+ if (IsFastRegExpPermissive(receiver)) {
+ RegExpPrototypeExecBodyWithoutResultFast(
+ UnsafeCast<JSRegExp>(receiver), str)
otherwise return False;
return True;
}
+ const matchIndices = RegExpExec(receiver, str);
+ return SelectBooleanConstant(matchIndices != Null);
+}
+
+transitioning builtin RegExpPrototypeTestFast(implicit context: Context)(
+ receiver: JSRegExp, string: String): Object {
+ RegExpPrototypeExecBodyWithoutResultFast(receiver, string)
+ otherwise return False;
+ return True;
+}
}
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index a48dce3863..21577b4763 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -6,417 +6,415 @@
namespace regexp {
- extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Strict(
- implicit context: Context)(HeapObject): never labels IsFast,
- IsSlow;
- macro IsFastRegExpStrict(implicit context: Context)(o: HeapObject): bool {
- BranchIfFastRegExp_Strict(o) otherwise return true, return false;
- }
+extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Strict(
+ implicit context: Context)(HeapObject): never labels IsFast,
+ IsSlow;
+macro IsFastRegExpStrict(implicit context: Context)(o: HeapObject): bool {
+ BranchIfFastRegExp_Strict(o) otherwise return true, return false;
+}
- extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive(
- implicit context: Context)(HeapObject): never labels IsFast,
- IsSlow;
+extern macro RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive(
+ implicit context: Context)(HeapObject): never labels IsFast,
+ IsSlow;
- @export
- macro IsFastRegExpPermissive(implicit context: Context)(o: HeapObject): bool {
- BranchIfFastRegExp_Permissive(o) otherwise return true, return false;
- }
+@export
+macro IsFastRegExpPermissive(implicit context: Context)(o: HeapObject): bool {
+ BranchIfFastRegExp_Permissive(o) otherwise return true, return false;
+}
- // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
- @export
- transitioning macro RegExpExec(implicit context: Context)(
- receiver: JSReceiver, string: String): JSAny {
- // Take the slow path of fetching the exec property, calling it, and
- // verifying its return value.
-
- const exec = GetProperty(receiver, 'exec');
-
- // Is {exec} callable?
- typeswitch (exec) {
- case (execCallable: Callable): {
- const result = Call(context, execCallable, receiver, string);
- if (result != Null) {
- ThrowIfNotJSReceiver(
- result, MessageTemplate::kInvalidRegExpExecResult, '');
- }
- return result;
- }
- case (Object): {
- const regexp = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'RegExp.prototype.exec', receiver);
- return RegExpPrototypeExecSlow(regexp, string);
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+@export
+transitioning macro RegExpExec(implicit context: Context)(
+ receiver: JSReceiver, string: String): JSAny {
+ // Take the slow path of fetching the exec property, calling it, and
+ // verifying its return value.
+
+ const exec = GetProperty(receiver, 'exec');
+
+ // Is {exec} callable?
+ typeswitch (exec) {
+ case (execCallable: Callable): {
+ const result = Call(context, execCallable, receiver, string);
+ if (result != Null) {
+ ThrowIfNotJSReceiver(
+ result, MessageTemplate::kInvalidRegExpExecResult, '');
}
+ return result;
}
- }
-
- extern macro RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
- implicit context: Context)(JSRegExp, RegExpMatchInfo, String, Number):
- JSRegExpResult;
-
- const kGlobalOrSticky: constexpr int31
- generates 'JSRegExp::kGlobal | JSRegExp::kSticky';
-
- extern macro RegExpBuiltinsAssembler::RegExpExecInternal(
- implicit context: Context)(JSRegExp, String, Number, RegExpMatchInfo):
- HeapObject;
-
- // ES#sec-regexp.prototype.exec
- // RegExp.prototype.exec ( string )
- // Implements the core of RegExp.prototype.exec but without actually
- // constructing the JSRegExpResult. Returns a fixed array containing match
- // indices as returned by RegExpExecStub on successful match, and jumps to
- // IfDidNotMatch otherwise.
- transitioning macro RegExpPrototypeExecBodyWithoutResult(implicit context:
- Context)(
- regexp: JSRegExp, string: String, regexpLastIndex: Number,
- isFastPath: constexpr bool): RegExpMatchInfo labels IfDidNotMatch {
- if (isFastPath) {
- assert(HasInitialRegExpMap(regexp));
- } else {
- IncrementUseCounter(context, SmiConstant(kRegExpExecCalledOnSlowRegExp));
+ case (Object): {
+ const regexp = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, 'RegExp.prototype.exec',
+ receiver);
+ return RegExpPrototypeExecSlow(regexp, string);
}
+ }
+}
- let lastIndex = regexpLastIndex;
+extern macro RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
+ implicit context: Context)(
+ JSRegExp, RegExpMatchInfo, String, Number): JSRegExpResult;
+
+const kGlobalOrSticky: constexpr int31
+ generates 'JSRegExp::kGlobal | JSRegExp::kSticky';
+
+extern macro RegExpBuiltinsAssembler::RegExpExecInternal(
+ implicit context: Context)(
+ JSRegExp, String, Number, RegExpMatchInfo): HeapObject;
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+// Implements the core of RegExp.prototype.exec but without actually
+// constructing the JSRegExpResult. Returns a fixed array containing match
+// indices as returned by RegExpExecStub on successful match, and jumps to
+// IfDidNotMatch otherwise.
+transitioning macro RegExpPrototypeExecBodyWithoutResult(
+ implicit context: Context)(
+ regexp: JSRegExp, string: String, regexpLastIndex: Number,
+ isFastPath: constexpr bool): RegExpMatchInfo labels IfDidNotMatch {
+ if (isFastPath) {
+ assert(HasInitialRegExpMap(regexp));
+ } else {
+ IncrementUseCounter(context, SmiConstant(kRegExpExecCalledOnSlowRegExp));
+ }
- // Check whether the regexp is global or sticky, which determines whether we
- // update last index later on.
- const flags = UnsafeCast<Smi>(regexp.flags);
- const isGlobalOrSticky: intptr =
- SmiUntag(flags) & IntPtrConstant(kGlobalOrSticky);
- const shouldUpdateLastIndex: bool = isGlobalOrSticky != 0;
+ let lastIndex = regexpLastIndex;
- // Grab and possibly update last index.
- if (shouldUpdateLastIndex) {
- if (!TaggedIsSmi(lastIndex) || (lastIndex > string.length_smi)) {
- StoreLastIndex(regexp, SmiConstant(0), isFastPath);
- goto IfDidNotMatch;
- }
- } else {
- lastIndex = SmiConstant(0);
- }
+ // Check whether the regexp is global or sticky, which determines whether we
+ // update last index later on.
+ const flags = UnsafeCast<Smi>(regexp.flags);
+ const isGlobalOrSticky: intptr =
+ SmiUntag(flags) & IntPtrConstant(kGlobalOrSticky);
+ const shouldUpdateLastIndex: bool = isGlobalOrSticky != 0;
- const lastMatchInfo: RegExpMatchInfo = GetRegExpLastMatchInfo();
-
- const matchIndices =
- RegExpExecInternal(regexp, string, lastIndex, lastMatchInfo);
-
- // {match_indices} is either null or the RegExpMatchInfo array.
- // Return early if exec failed, possibly updating last index.
- if (matchIndices != Null) {
- const matchIndicesRegExpMatchInfo =
- UnsafeCast<RegExpMatchInfo>(matchIndices);
- if (shouldUpdateLastIndex) {
- // Update the new last index from {match_indices}.
- const newLastIndex: Smi =
- matchIndicesRegExpMatchInfo.GetEndOfCapture(0);
- StoreLastIndex(regexp, newLastIndex, isFastPath);
- }
- return matchIndicesRegExpMatchInfo;
- }
- if (shouldUpdateLastIndex) {
+ // Grab and possibly update last index.
+ if (shouldUpdateLastIndex) {
+ if (!TaggedIsSmi(lastIndex) || (lastIndex > string.length_smi)) {
StoreLastIndex(regexp, SmiConstant(0), isFastPath);
+ goto IfDidNotMatch;
}
- goto IfDidNotMatch;
+ } else {
+ lastIndex = SmiConstant(0);
}
- @export
- transitioning macro RegExpPrototypeExecBodyWithoutResultFast(
- implicit context: Context)(regexp: JSRegExp, string: String):
- RegExpMatchInfo labels IfDidNotMatch {
- const lastIndex = LoadLastIndexAsLength(regexp, true);
- return RegExpPrototypeExecBodyWithoutResult(regexp, string, lastIndex, true)
- otherwise IfDidNotMatch;
- }
+ const lastMatchInfo: RegExpMatchInfo = GetRegExpLastMatchInfo();
- transitioning macro RegExpPrototypeExecBodyWithoutResultFast(
- implicit context:
- Context)(regexp: JSRegExp, string: String, lastIndex: Number):
- RegExpMatchInfo labels IfDidNotMatch {
- return RegExpPrototypeExecBodyWithoutResult(regexp, string, lastIndex, true)
- otherwise IfDidNotMatch;
- }
+ const matchIndices =
+ RegExpExecInternal(regexp, string, lastIndex, lastMatchInfo);
- // ES#sec-regexp.prototype.exec
- // RegExp.prototype.exec ( string )
- transitioning macro RegExpPrototypeExecBody(implicit context: Context)(
- receiver: JSReceiver, string: String, isFastPath: constexpr bool): JSAny {
- let regexp: JSRegExp;
- if constexpr (isFastPath) {
- regexp = UnsafeCast<JSRegExp>(receiver);
- } else {
- regexp = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, 'RegExp.prototype.exec',
- receiver);
+ // {match_indices} is either null or the RegExpMatchInfo array.
+ // Return early if exec failed, possibly updating last index.
+ if (matchIndices != Null) {
+ const matchIndicesRegExpMatchInfo =
+ UnsafeCast<RegExpMatchInfo>(matchIndices);
+ if (shouldUpdateLastIndex) {
+ // Update the new last index from {match_indices}.
+ const newLastIndex: Smi = matchIndicesRegExpMatchInfo.GetEndOfCapture(0);
+ StoreLastIndex(regexp, newLastIndex, isFastPath);
}
- const lastIndex = LoadLastIndexAsLength(regexp, isFastPath);
- const matchIndices: RegExpMatchInfo = RegExpPrototypeExecBodyWithoutResult(
- regexp, string, lastIndex, isFastPath) otherwise return Null;
- return ConstructNewResultFromMatchInfo(
- regexp, matchIndices, string, lastIndex);
+ return matchIndicesRegExpMatchInfo;
}
-
- macro LoadRegExpFunction(implicit context: Context)(
- nativeContext: NativeContext): JSFunction {
- return UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::REGEXP_FUNCTION_INDEX]);
+ if (shouldUpdateLastIndex) {
+ StoreLastIndex(regexp, SmiConstant(0), isFastPath);
}
+ goto IfDidNotMatch;
+}
- // Note this doesn't guarantee const-ness of object properties, just
- // unchanged object layout.
- macro HasInitialRegExpMap(implicit context: Context)(o: HeapObject): bool {
- const nativeContext = LoadNativeContext(context);
- const function = LoadRegExpFunction(nativeContext);
- const initialMap = UnsafeCast<Map>(function.prototype_or_initial_map);
- return initialMap == o.map;
- }
+@export
+transitioning macro RegExpPrototypeExecBodyWithoutResultFast(
+ implicit context: Context)(regexp: JSRegExp, string: String):
+ RegExpMatchInfo labels IfDidNotMatch {
+ const lastIndex = LoadLastIndexAsLength(regexp, true);
+ return RegExpPrototypeExecBodyWithoutResult(regexp, string, lastIndex, true)
+ otherwise IfDidNotMatch;
+}
- macro IsReceiverInitialRegExpPrototype(implicit context:
- Context)(receiver: Object): bool {
- const nativeContext = LoadNativeContext(context);
- const regexpFun = LoadRegExpFunction(nativeContext);
- const initialMap = UnsafeCast<Map>(regexpFun.prototype_or_initial_map);
- const initialPrototype: HeapObject = initialMap.prototype;
- return TaggedEqual(receiver, initialPrototype);
- }
+transitioning macro RegExpPrototypeExecBodyWithoutResultFast(
+ implicit context: Context)(
+ regexp: JSRegExp, string: String,
+ lastIndex: Number): RegExpMatchInfo labels IfDidNotMatch {
+ return RegExpPrototypeExecBodyWithoutResult(regexp, string, lastIndex, true)
+ otherwise IfDidNotMatch;
+}
- extern enum Flag constexpr 'JSRegExp::Flag' {
- kNone,
- kGlobal,
- kIgnoreCase,
- kMultiline,
- kSticky,
- kUnicode,
- kDotAll,
- kInvalid
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+transitioning macro RegExpPrototypeExecBody(implicit context: Context)(
+ receiver: JSReceiver, string: String, isFastPath: constexpr bool): JSAny {
+ let regexp: JSRegExp;
+ if constexpr (isFastPath) {
+ regexp = UnsafeCast<JSRegExp>(receiver);
+ } else {
+ regexp = Cast<JSRegExp>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, 'RegExp.prototype.exec',
+ receiver);
}
+ const lastIndex = LoadLastIndexAsLength(regexp, isFastPath);
+ const matchIndices: RegExpMatchInfo = RegExpPrototypeExecBodyWithoutResult(
+ regexp, string, lastIndex, isFastPath) otherwise return Null;
+ return ConstructNewResultFromMatchInfo(
+ regexp, matchIndices, string, lastIndex);
+}
- const kRegExpPrototypeOldFlagGetter: constexpr int31
- generates 'v8::Isolate::kRegExpPrototypeOldFlagGetter';
- const kRegExpPrototypeStickyGetter: constexpr int31
- generates 'v8::Isolate::kRegExpPrototypeStickyGetter';
- const kRegExpPrototypeUnicodeGetter: constexpr int31
- generates 'v8::Isolate::kRegExpPrototypeUnicodeGetter';
-
- extern macro RegExpBuiltinsAssembler::FastFlagGetter(
- JSRegExp, constexpr Flag): bool;
- extern runtime IncrementUseCounter(Context, Smi): void;
-
- macro FlagGetter(implicit context: Context)(
- receiver: Object, flag: constexpr Flag, counter: constexpr int31,
- methodName: constexpr string): JSAny {
- typeswitch (receiver) {
- case (receiver: JSRegExp): {
- return SelectBooleanConstant(FastFlagGetter(receiver, flag));
- }
- case (Object): {
- }
- }
- if (!IsReceiverInitialRegExpPrototype(receiver)) {
- ThrowTypeError(MessageTemplate::kRegExpNonRegExp, methodName);
+macro LoadRegExpFunction(implicit context: Context)(
+ nativeContext: NativeContext): JSFunction {
+ return UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::REGEXP_FUNCTION_INDEX]);
+}
+
+// Note this doesn't guarantee const-ness of object properties, just
+// unchanged object layout.
+macro HasInitialRegExpMap(implicit context: Context)(o: HeapObject): bool {
+ const nativeContext = LoadNativeContext(context);
+ const function = LoadRegExpFunction(nativeContext);
+ const initialMap = UnsafeCast<Map>(function.prototype_or_initial_map);
+ return initialMap == o.map;
+}
+
+macro IsReceiverInitialRegExpPrototype(implicit context: Context)(
+ receiver: Object): bool {
+ const nativeContext = LoadNativeContext(context);
+ const regexpFun = LoadRegExpFunction(nativeContext);
+ const initialMap = UnsafeCast<Map>(regexpFun.prototype_or_initial_map);
+ const initialPrototype: HeapObject = initialMap.prototype;
+ return TaggedEqual(receiver, initialPrototype);
+}
+
+extern enum Flag constexpr 'JSRegExp::Flag' {
+ kNone,
+ kGlobal,
+ kIgnoreCase,
+ kMultiline,
+ kSticky,
+ kUnicode,
+ kDotAll,
+ kInvalid
+}
+
+const kRegExpPrototypeOldFlagGetter: constexpr int31
+ generates 'v8::Isolate::kRegExpPrototypeOldFlagGetter';
+const kRegExpPrototypeStickyGetter: constexpr int31
+ generates 'v8::Isolate::kRegExpPrototypeStickyGetter';
+const kRegExpPrototypeUnicodeGetter: constexpr int31
+ generates 'v8::Isolate::kRegExpPrototypeUnicodeGetter';
+
+extern macro RegExpBuiltinsAssembler::FastFlagGetter(
+ JSRegExp, constexpr Flag): bool;
+extern runtime IncrementUseCounter(Context, Smi): void;
+
+macro FlagGetter(implicit context: Context)(
+ receiver: Object, flag: constexpr Flag, counter: constexpr int31,
+ methodName: constexpr string): JSAny {
+ typeswitch (receiver) {
+ case (receiver: JSRegExp): {
+ return SelectBooleanConstant(FastFlagGetter(receiver, flag));
}
- if constexpr (counter != -1) {
- IncrementUseCounter(context, SmiConstant(counter));
+ case (Object): {
}
- return Undefined;
}
-
- // ES6 21.2.5.4.
- // ES #sec-get-regexp.prototype.global
- transitioning javascript builtin RegExpPrototypeGlobalGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- return FlagGetter(
- receiver, Flag::kGlobal, kRegExpPrototypeOldFlagGetter,
- 'RegExp.prototype.global');
+ if (!IsReceiverInitialRegExpPrototype(receiver)) {
+ ThrowTypeError(MessageTemplate::kRegExpNonRegExp, methodName);
}
-
- // ES6 21.2.5.5.
- // ES #sec-get-regexp.prototype.ignorecase
- transitioning javascript builtin RegExpPrototypeIgnoreCaseGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- return FlagGetter(
- receiver, Flag::kIgnoreCase, kRegExpPrototypeOldFlagGetter,
- 'RegExp.prototype.ignoreCase');
+ if constexpr (counter != -1) {
+ IncrementUseCounter(context, SmiConstant(counter));
}
+ return Undefined;
+}
- // ES6 21.2.5.7.
- // ES #sec-get-regexp.prototype.multiline
- transitioning javascript builtin RegExpPrototypeMultilineGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- return FlagGetter(
- receiver, Flag::kMultiline, kRegExpPrototypeOldFlagGetter,
- 'RegExp.prototype.multiline');
- }
+// ES6 21.2.5.4.
+// ES #sec-get-regexp.prototype.global
+transitioning javascript builtin RegExpPrototypeGlobalGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kGlobal, kRegExpPrototypeOldFlagGetter,
+ 'RegExp.prototype.global');
+}
- // ES #sec-get-regexp.prototype.dotAll
- transitioning javascript builtin RegExpPrototypeDotAllGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- const kNoCounter: constexpr int31 = -1;
- return FlagGetter(
- receiver, Flag::kDotAll, kNoCounter, 'RegExp.prototype.dotAll');
- }
+// ES6 21.2.5.5.
+// ES #sec-get-regexp.prototype.ignorecase
+transitioning javascript builtin RegExpPrototypeIgnoreCaseGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kIgnoreCase, kRegExpPrototypeOldFlagGetter,
+ 'RegExp.prototype.ignoreCase');
+}
- // ES6 21.2.5.12.
- // ES #sec-get-regexp.prototype.sticky
- transitioning javascript builtin RegExpPrototypeStickyGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- return FlagGetter(
- receiver, Flag::kSticky, kRegExpPrototypeStickyGetter,
- 'RegExp.prototype.sticky');
- }
+// ES6 21.2.5.7.
+// ES #sec-get-regexp.prototype.multiline
+transitioning javascript builtin RegExpPrototypeMultilineGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kMultiline, kRegExpPrototypeOldFlagGetter,
+ 'RegExp.prototype.multiline');
+}
- // ES6 21.2.5.15.
- // ES #sec-get-regexp.prototype.unicode
- transitioning javascript builtin RegExpPrototypeUnicodeGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- return FlagGetter(
- receiver, Flag::kUnicode, kRegExpPrototypeUnicodeGetter,
- 'RegExp.prototype.unicode');
- }
+// ES #sec-get-regexp.prototype.dotAll
+transitioning javascript builtin RegExpPrototypeDotAllGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ const kNoCounter: constexpr int31 = -1;
+ return FlagGetter(
+ receiver, Flag::kDotAll, kNoCounter, 'RegExp.prototype.dotAll');
+}
- extern transitioning macro
- RegExpBuiltinsAssembler::FlagsGetter(implicit context: Context)(
- Object, constexpr bool): String;
+// ES6 21.2.5.12.
+// ES #sec-get-regexp.prototype.sticky
+transitioning javascript builtin RegExpPrototypeStickyGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kSticky, kRegExpPrototypeStickyGetter,
+ 'RegExp.prototype.sticky');
+}
- transitioning macro
- FastFlagsGetter(implicit context: Context)(receiver: FastJSRegExp): String {
- return FlagsGetter(receiver, true);
- }
+// ES6 21.2.5.15.
+// ES #sec-get-regexp.prototype.unicode
+transitioning javascript builtin RegExpPrototypeUnicodeGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kUnicode, kRegExpPrototypeUnicodeGetter,
+ 'RegExp.prototype.unicode');
+}
- transitioning macro SlowFlagsGetter(implicit context:
- Context)(receiver: JSAny): String {
- return FlagsGetter(receiver, false);
- }
+extern transitioning macro
+RegExpBuiltinsAssembler::FlagsGetter(implicit context: Context)(
+ Object, constexpr bool): String;
- // ES #sec-get-regexp.prototype.flags
- // TFJ(RegExpPrototypeFlagsGetter, 0, kReceiver) \
- transitioning javascript builtin RegExpPrototypeFlagsGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): String {
- ThrowIfNotJSReceiver(
- receiver, MessageTemplate::kRegExpNonObject, 'RegExp.prototype.flags');
-
- // The check is strict because the following code relies on individual flag
- // getters on the regexp prototype (e.g.: global, sticky, ...). We don't
- // bother to check these individually.
- const fastRegexp = Cast<FastJSRegExp>(receiver)
- otherwise return SlowFlagsGetter(receiver);
- return FastFlagsGetter(fastRegexp);
- }
+transitioning macro
+FastFlagsGetter(implicit context: Context)(receiver: FastJSRegExp): String {
+ return FlagsGetter(receiver, true);
+}
+
+transitioning macro SlowFlagsGetter(implicit context: Context)(receiver: JSAny):
+ String {
+ return FlagsGetter(receiver, false);
+}
- extern transitioning macro RegExpBuiltinsAssembler::SlowLoadLastIndex(
- implicit context: Context)(JSAny): JSAny;
- extern transitioning macro RegExpBuiltinsAssembler::SlowStoreLastIndex(
- implicit context: Context)(JSAny, JSAny): void;
+// ES #sec-get-regexp.prototype.flags
+// TFJ(RegExpPrototypeFlagsGetter, 0, kReceiver) \
+transitioning javascript builtin RegExpPrototypeFlagsGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): String {
+ ThrowIfNotJSReceiver(
+ receiver, MessageTemplate::kRegExpNonObject, 'RegExp.prototype.flags');
+
+ // The check is strict because the following code relies on individual flag
+ // getters on the regexp prototype (e.g.: global, sticky, ...). We don't
+ // bother to check these individually.
+ const fastRegexp = Cast<FastJSRegExp>(receiver)
+ otherwise return SlowFlagsGetter(receiver);
+ return FastFlagsGetter(fastRegexp);
+}
- extern macro RegExpBuiltinsAssembler::FastLoadLastIndex(JSRegExp): Smi;
- extern macro RegExpBuiltinsAssembler::FastStoreLastIndex(JSRegExp, Smi): void;
+extern transitioning macro RegExpBuiltinsAssembler::SlowLoadLastIndex(
+ implicit context: Context)(JSAny): JSAny;
+extern transitioning macro RegExpBuiltinsAssembler::SlowStoreLastIndex(
+ implicit context: Context)(JSAny, JSAny): void;
- @export
- transitioning macro LoadLastIndex(implicit context: Context)(
- regexp: JSAny, isFastPath: constexpr bool): JSAny {
- return isFastPath ? FastLoadLastIndex(UnsafeCast<JSRegExp>(regexp)) :
- SlowLoadLastIndex(regexp);
- }
+extern macro RegExpBuiltinsAssembler::FastLoadLastIndex(JSRegExp): Smi;
+extern macro RegExpBuiltinsAssembler::FastStoreLastIndex(JSRegExp, Smi): void;
- @export
- transitioning macro LoadLastIndexAsLength(implicit context: Context)(
- regexp: JSRegExp, isFastPath: constexpr bool): Number {
- const lastIndex = LoadLastIndex(regexp, isFastPath);
- if (isFastPath) {
- // ToLength on a positive smi is a nop and can be skipped.
- return UnsafeCast<PositiveSmi>(lastIndex);
- } else {
- // Omit ToLength if last_index is a non-negative smi.
- typeswitch (lastIndex) {
- case (i: PositiveSmi): {
- return i;
- }
- case (o: JSAny): {
- return ToLength_Inline(o);
- }
+@export
+transitioning macro LoadLastIndex(implicit context: Context)(
+ regexp: JSAny, isFastPath: constexpr bool): JSAny {
+ return isFastPath ? FastLoadLastIndex(UnsafeCast<JSRegExp>(regexp)) :
+ SlowLoadLastIndex(regexp);
+}
+
+@export
+transitioning macro LoadLastIndexAsLength(implicit context: Context)(
+ regexp: JSRegExp, isFastPath: constexpr bool): Number {
+ const lastIndex = LoadLastIndex(regexp, isFastPath);
+ if (isFastPath) {
+ // ToLength on a positive smi is a nop and can be skipped.
+ return UnsafeCast<PositiveSmi>(lastIndex);
+ } else {
+ // Omit ToLength if last_index is a non-negative smi.
+ typeswitch (lastIndex) {
+ case (i: PositiveSmi): {
+ return i;
+ }
+ case (o: JSAny): {
+ return ToLength_Inline(o);
}
}
}
+}
- @export
- transitioning macro StoreLastIndex(implicit context: Context)(
- regexp: JSAny, value: Number, isFastPath: constexpr bool): void {
- if (isFastPath) {
- FastStoreLastIndex(UnsafeCast<JSRegExp>(regexp), UnsafeCast<Smi>(value));
- } else {
- SlowStoreLastIndex(regexp, value);
- }
+@export
+transitioning macro StoreLastIndex(implicit context: Context)(
+ regexp: JSAny, value: Number, isFastPath: constexpr bool): void {
+ if (isFastPath) {
+ FastStoreLastIndex(UnsafeCast<JSRegExp>(regexp), UnsafeCast<Smi>(value));
+ } else {
+ SlowStoreLastIndex(regexp, value);
}
+}
- extern builtin
- StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
-
- extern macro RegExpBuiltinsAssembler::AdvanceStringIndex(
- String, Number, bool, constexpr bool): Number;
- extern macro
- RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
- extern macro
- RegExpBuiltinsAssembler::AdvanceStringIndexSlow(String, Number, bool): Smi;
-
- type UseCounterFeature extends int31
- constexpr 'v8::Isolate::UseCounterFeature';
- const kRegExpMatchIsTrueishOnNonJSRegExp: constexpr UseCounterFeature
- generates 'v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp';
- const kRegExpMatchIsFalseishOnJSRegExp: constexpr UseCounterFeature
- generates 'v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp';
- const kRegExpPrototypeSourceGetter: constexpr UseCounterFeature
- generates 'v8::Isolate::kRegExpPrototypeSourceGetter';
- const kRegExpExecCalledOnSlowRegExp: constexpr UseCounterFeature
- generates 'v8::Isolate::kRegExpExecCalledOnSlowRegExp';
-
- // ES#sec-isregexp IsRegExp ( argument )
- @export
- transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
- const receiver = Cast<JSReceiver>(obj) otherwise return false;
-
- // Check @match.
- const value = GetProperty(receiver, MatchSymbolConstant());
- if (value == Undefined) {
- return Is<JSRegExp>(receiver);
- }
-
- assert(value != Undefined);
- // The common path. Symbol.match exists, equals the RegExpPrototypeMatch
- // function (and is thus trueish), and the receiver is a JSRegExp.
- if (ToBoolean(value)) {
- if (!Is<JSRegExp>(receiver)) {
- IncrementUseCounter(
- context, SmiConstant(kRegExpMatchIsTrueishOnNonJSRegExp));
- }
- return true;
- }
+extern builtin
+StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
+
+extern macro RegExpBuiltinsAssembler::AdvanceStringIndex(
+ String, Number, bool, constexpr bool): Number;
+extern macro
+RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
+extern macro
+RegExpBuiltinsAssembler::AdvanceStringIndexSlow(String, Number, bool): Smi;
+
+type UseCounterFeature extends int31
+constexpr 'v8::Isolate::UseCounterFeature';
+const kRegExpMatchIsTrueishOnNonJSRegExp: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp';
+const kRegExpMatchIsFalseishOnJSRegExp: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp';
+const kRegExpPrototypeSourceGetter: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpPrototypeSourceGetter';
+const kRegExpExecCalledOnSlowRegExp: constexpr UseCounterFeature
+ generates 'v8::Isolate::kRegExpExecCalledOnSlowRegExp';
+
+// ES#sec-isregexp IsRegExp ( argument )
+@export
+transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
+ const receiver = Cast<JSReceiver>(obj) otherwise return false;
+
+ // Check @match.
+ const value = GetProperty(receiver, MatchSymbolConstant());
+ if (value == Undefined) {
+ return Is<JSRegExp>(receiver);
+ }
- assert(!ToBoolean(value));
- if (Is<JSRegExp>(receiver)) {
+ assert(value != Undefined);
+ // The common path. Symbol.match exists, equals the RegExpPrototypeMatch
+ // function (and is thus trueish), and the receiver is a JSRegExp.
+ if (ToBoolean(value)) {
+ if (!Is<JSRegExp>(receiver)) {
IncrementUseCounter(
- context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp));
+ context, SmiConstant(kRegExpMatchIsTrueishOnNonJSRegExp));
}
- return false;
+ return true;
}
- extern runtime RegExpInitializeAndCompile(Context, JSRegExp, String, String):
- JSAny;
-
- @export
- transitioning macro RegExpCreate(implicit context: Context)(
- nativeContext: NativeContext, maybeString: JSAny, flags: String): JSAny {
- const regexpFun = LoadRegExpFunction(nativeContext);
- const initialMap = UnsafeCast<Map>(regexpFun.prototype_or_initial_map);
- return RegExpCreate(initialMap, maybeString, flags);
+ assert(!ToBoolean(value));
+ if (Is<JSRegExp>(receiver)) {
+ IncrementUseCounter(context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp));
}
+ return false;
+}
- @export
- transitioning macro RegExpCreate(implicit context: Context)(
- initialMap: Map, maybeString: JSAny, flags: String): JSAny {
- const pattern: String =
- maybeString == Undefined ? kEmptyString : ToString_Inline(maybeString);
- const regexp =
- UnsafeCast<JSRegExp>(AllocateFastOrSlowJSObjectFromMap(initialMap));
- return RegExpInitializeAndCompile(context, regexp, pattern, flags);
- }
+extern runtime RegExpInitializeAndCompile(
+ Context, JSRegExp, String, String): JSAny;
+
+@export
+transitioning macro RegExpCreate(implicit context: Context)(
+ nativeContext: NativeContext, maybeString: JSAny, flags: String): JSAny {
+ const regexpFun = LoadRegExpFunction(nativeContext);
+ const initialMap = UnsafeCast<Map>(regexpFun.prototype_or_initial_map);
+ return RegExpCreate(initialMap, maybeString, flags);
+}
+
+@export
+transitioning macro RegExpCreate(implicit context: Context)(
+ initialMap: Map, maybeString: JSAny, flags: String): JSAny {
+ const pattern: String =
+ maybeString == Undefined ? kEmptyString : ToString_Inline(maybeString);
+ const regexp =
+ UnsafeCast<JSRegExp>(AllocateFastOrSlowJSObjectFromMap(initialMap));
+ return RegExpInitializeAndCompile(context, regexp, pattern, flags);
+}
}
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
index 6941728b39..b9615b1149 100644
--- a/deps/v8/src/builtins/string-endswith.tq
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -3,79 +3,78 @@
// found in the LICENSE file.
namespace string {
- macro TryFastStringCompareSequence(
- string: String, searchStr: String, start: uintptr,
- searchLength: uintptr): Boolean labels Slow {
- const directString = Cast<DirectString>(string) otherwise Slow;
- const directSearchStr = Cast<DirectString>(searchStr) otherwise Slow;
+macro TryFastStringCompareSequence(
+ string: String, searchStr: String, start: uintptr,
+ searchLength: uintptr): Boolean labels Slow {
+ const directString = Cast<DirectString>(string) otherwise Slow;
+ const directSearchStr = Cast<DirectString>(searchStr) otherwise Slow;
- let searchIndex: uintptr = 0;
- let stringIndex: uintptr = start;
+ let searchIndex: uintptr = 0;
+ let stringIndex: uintptr = start;
- while (searchIndex < searchLength) {
- if (StringCharCodeAt(directSearchStr, searchIndex) !=
- StringCharCodeAt(directString, stringIndex)) {
- return False;
- }
-
- searchIndex++;
- stringIndex++;
+ while (searchIndex < searchLength) {
+ if (StringCharCodeAt(directSearchStr, searchIndex) !=
+ StringCharCodeAt(directString, stringIndex)) {
+ return False;
}
- return True;
+
+ searchIndex++;
+ stringIndex++;
}
+ return True;
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.endswith
- transitioning javascript builtin StringPrototypeEndsWith(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): Boolean {
- const searchString: JSAny = arguments[0];
- const endPosition: JSAny = arguments[1];
- const kBuiltinName: constexpr string = 'String.prototype.endsWith';
+// https://tc39.github.io/ecma262/#sec-string.prototype.endswith
+transitioning javascript builtin StringPrototypeEndsWith(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(...arguments): Boolean {
+ const searchString: JSAny = arguments[0];
+ const endPosition: JSAny = arguments[1];
+ const kBuiltinName: constexpr string = 'String.prototype.endsWith';
- // 1. Let O be ? RequireObjectCoercible(this value).
- // 2. Let S be ? ToString(O).
- const string: String = ToThisString(receiver, kBuiltinName);
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const string: String = ToThisString(receiver, kBuiltinName);
- // 3. Let isRegExp be ? IsRegExp(searchString).
- // 4. If isRegExp is true, throw a TypeError exception.
- if (regexp::IsRegExp(searchString)) {
- ThrowTypeError(MessageTemplate::kFirstArgumentNotRegExp, kBuiltinName);
- }
+ // 3. Let isRegExp be ? IsRegExp(searchString).
+ // 4. If isRegExp is true, throw a TypeError exception.
+ if (regexp::IsRegExp(searchString)) {
+ ThrowTypeError(MessageTemplate::kFirstArgumentNotRegExp, kBuiltinName);
+ }
- // 5. Let searchStr be ? ToString(searchString).
- const searchStr: String = ToString_Inline(searchString);
+ // 5. Let searchStr be ? ToString(searchString).
+ const searchStr: String = ToString_Inline(searchString);
- // 6. Let len be the length of S.
- const len: uintptr = string.length_uintptr;
+ // 6. Let len be the length of S.
+ const len: uintptr = string.length_uintptr;
- // 7. If endPosition is undefined, let pos be len,
- // else let pos be ? ToInteger(endPosition).
- // 8. Let end be min(max(pos, 0), len).
- const end: uintptr =
- (endPosition != Undefined) ? ClampToIndexRange(endPosition, len) : len;
+ // 7. If endPosition is undefined, let pos be len,
+ // else let pos be ? ToInteger(endPosition).
+ // 8. Let end be min(max(pos, 0), len).
+ const end: uintptr =
+ (endPosition != Undefined) ? ClampToIndexRange(endPosition, len) : len;
- // 9. Let searchLength be the length of searchStr.
- const searchLength: uintptr = searchStr.length_uintptr;
+ // 9. Let searchLength be the length of searchStr.
+ const searchLength: uintptr = searchStr.length_uintptr;
- // 10. Let start be end - searchLength.
- const start: uintptr = end - searchLength;
+ // 10. Let start be end - searchLength.
+ const start: uintptr = end - searchLength;
- // 11. If start is less than 0, return false.
- if (Signed(start) < 0) return False;
+ // 11. If start is less than 0, return false.
+ if (Signed(start) < 0) return False;
- // 12. If the sequence of code units of S starting at start of length
- // searchLength is the same as the full code unit sequence of searchStr,
- // return true.
- // 13. Otherwise, return false.
- try {
- // Fast Path: If both strings are direct and relevant indices are Smis.
- return TryFastStringCompareSequence(
- string, searchStr, start, searchLength) otherwise Slow;
- }
- label Slow {
- // Slow Path: If either of the string is indirect, bail into runtime.
- return StringCompareSequence(
- context, string, searchStr, Convert<Number>(start));
- }
+ // 12. If the sequence of code units of S starting at start of length
+ // searchLength is the same as the full code unit sequence of searchStr,
+ // return true.
+ // 13. Otherwise, return false.
+ try {
+ // Fast Path: If both strings are direct and relevant indices are Smis.
+ return TryFastStringCompareSequence(string, searchStr, start, searchLength)
+ otherwise Slow;
+ } label Slow {
+ // Slow Path: If either of the string is indirect, bail into runtime.
+ return StringCompareSequence(
+ context, string, searchStr, Convert<Number>(start));
}
}
+}
diff --git a/deps/v8/src/builtins/string-html.tq b/deps/v8/src/builtins/string-html.tq
index f12c2dd22f..8b3e01342e 100644
--- a/deps/v8/src/builtins/string-html.tq
+++ b/deps/v8/src/builtins/string-html.tq
@@ -3,127 +3,124 @@
// found in the LICENSE file.
namespace string {
- extern runtime StringEscapeQuotes(Context, String): String;
-
- // https://tc39.github.io/ecma262/#sec-createhtml
- transitioning builtin CreateHTML(implicit context: Context)(
- receiver: JSAny, methodName: String, tagName: String, attr: String,
- attrValue: JSAny): String {
- const tagContents: String = ToThisString(receiver, methodName);
- let result = '<' + tagName;
- if (attr != kEmptyString) {
- const attrStringValue: String =
- StringEscapeQuotes(context, ToString_Inline(attrValue));
- result = result + ' ' + attr + '=\"' + attrStringValue + '\"';
- }
-
- return result + '>' + tagContents + '</' + tagName + '>';
+extern runtime StringEscapeQuotes(Context, String): String;
+
+// https://tc39.github.io/ecma262/#sec-createhtml
+transitioning builtin CreateHTML(implicit context: Context)(
+ receiver: JSAny, methodName: String, tagName: String, attr: String,
+ attrValue: JSAny): String {
+ const tagContents: String = ToThisString(receiver, methodName);
+ let result = '<' + tagName;
+ if (attr != kEmptyString) {
+ const attrStringValue: String =
+ StringEscapeQuotes(context, ToString_Inline(attrValue));
+ result = result + ' ' + attr + '=\"' + attrStringValue + '\"';
}
- // https://tc39.github.io/ecma262/#sec-string.prototype.anchor
- transitioning javascript builtin StringPrototypeAnchor(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]);
- }
+ return result + '>' + tagContents + '</' + tagName + '>';
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.big
- transitioning javascript builtin
- StringPrototypeBig(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.anchor
+transitioning javascript builtin StringPrototypeAnchor(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.blink
- transitioning javascript builtin
- StringPrototypeBlink(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.blink', 'blink', kEmptyString,
- kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.big
+transitioning javascript builtin
+StringPrototypeBig(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.bold
- transitioning javascript builtin
- StringPrototypeBold(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.blink
+transitioning javascript builtin
+StringPrototypeBlink(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.blink', 'blink', kEmptyString, kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor
- transitioning javascript builtin
- StringPrototypeFontcolor(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.bold
+transitioning javascript builtin
+StringPrototypeBold(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.fontsize
- transitioning javascript builtin
- StringPrototypeFontsize(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor
+transitioning javascript builtin
+StringPrototypeFontcolor(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.fixed
- transitioning javascript builtin
- StringPrototypeFixed(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.fontsize
+transitioning javascript builtin
+StringPrototypeFontsize(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.italics
- transitioning javascript builtin
- StringPrototypeItalics(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.fixed
+transitioning javascript builtin
+StringPrototypeFixed(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.link
- transitioning javascript builtin
- StringPrototypeLink(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.link', 'a', 'href', arguments[0]);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.italics
+transitioning javascript builtin
+StringPrototypeItalics(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.small
- transitioning javascript builtin
- StringPrototypeSmall(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.small', 'small', kEmptyString,
- kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.link
+transitioning javascript builtin
+StringPrototypeLink(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.link', 'a', 'href', arguments[0]);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.strike
- transitioning javascript builtin
- StringPrototypeStrike(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.strike', 'strike', kEmptyString,
- kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.small
+transitioning javascript builtin
+StringPrototypeSmall(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.small', 'small', kEmptyString, kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.sub
- transitioning javascript builtin
- StringPrototypeSub(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.strike
+transitioning javascript builtin
+StringPrototypeStrike(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.strike', 'strike', kEmptyString,
+ kEmptyString);
+}
- // https://tc39.github.io/ecma262/#sec-string.prototype.sup
- transitioning javascript builtin
- StringPrototypeSup(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- return CreateHTML(
- receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString);
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.sub
+transitioning javascript builtin
+StringPrototypeSub(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString);
+}
+
+// https://tc39.github.io/ecma262/#sec-string.prototype.sup
+transitioning javascript builtin
+StringPrototypeSup(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ return CreateHTML(
+ receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString);
+}
}
diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq
index 79032e0e28..eea7a621f0 100644
--- a/deps/v8/src/builtins/string-iterator.tq
+++ b/deps/v8/src/builtins/string-iterator.tq
@@ -4,43 +4,43 @@
namespace string {
- macro NewJSStringIterator(implicit context: Context)(
- string: String, nextIndex: Smi): JSStringIterator {
- return new JSStringIterator{
- map: GetInitialStringIteratorMap(),
- properties_or_hash: kEmptyFixedArray,
- elements: kEmptyFixedArray,
- string: string,
- index: nextIndex
- };
- }
+macro NewJSStringIterator(implicit context: Context)(
+ string: String, nextIndex: Smi): JSStringIterator {
+ return new JSStringIterator{
+ map: GetInitialStringIteratorMap(),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ string: string,
+ index: nextIndex
+ };
+}
- // ES6 #sec-string.prototype-@@iterator
- transitioning javascript builtin StringPrototypeIterator(
- js-implicit context: NativeContext, receiver: JSAny)(): JSStringIterator {
- const name: String =
- ToThisString(receiver, 'String.prototype[Symbol.iterator]');
- const index: Smi = 0;
- return NewJSStringIterator(name, index);
- }
+// ES6 #sec-string.prototype-@@iterator
+transitioning javascript builtin StringPrototypeIterator(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSStringIterator {
+ const name: String =
+ ToThisString(receiver, 'String.prototype[Symbol.iterator]');
+ const index: Smi = 0;
+ return NewJSStringIterator(name, index);
+}
- // ES6 #sec-%stringiteratorprototype%.next
- transitioning javascript builtin StringIteratorPrototypeNext(
- js-implicit context: NativeContext, receiver: JSAny)(): JSObject {
- const iterator = Cast<JSStringIterator>(receiver) otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'String Iterator.prototype.next', receiver);
- const string = iterator.string;
- const position: intptr = SmiUntag(iterator.index);
- const length: intptr = string.length_intptr;
- if (position >= length) {
- return AllocateJSIteratorResult(Undefined, True);
- }
- // Move to next codepoint.
- const encoding = UnicodeEncoding::UTF16;
- const ch = string::LoadSurrogatePairAt(string, length, position, encoding);
- const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch);
- iterator.index = SmiTag(position + value.length_intptr);
- return AllocateJSIteratorResult(value, False);
+// ES6 #sec-%stringiteratorprototype%.next
+transitioning javascript builtin StringIteratorPrototypeNext(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSObject {
+ const iterator = Cast<JSStringIterator>(receiver) otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver,
+ 'String Iterator.prototype.next', receiver);
+ const string = iterator.string;
+ const position: intptr = SmiUntag(iterator.index);
+ const length: intptr = string.length_intptr;
+ if (position >= length) {
+ return AllocateJSIteratorResult(Undefined, True);
}
+ // Move to next codepoint.
+ const encoding = UnicodeEncoding::UTF16;
+ const ch = string::LoadSurrogatePairAt(string, length, position, encoding);
+ const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch);
+ iterator.index = SmiTag(position + value.length_intptr);
+ return AllocateJSIteratorResult(value, False);
+}
}
diff --git a/deps/v8/src/builtins/string-pad.tq b/deps/v8/src/builtins/string-pad.tq
index 4a4c370406..b95e68628a 100644
--- a/deps/v8/src/builtins/string-pad.tq
+++ b/deps/v8/src/builtins/string-pad.tq
@@ -6,106 +6,106 @@
namespace string {
- extern transitioning builtin
- StringSubstring(implicit context: Context)(String, intptr, intptr): String;
+extern transitioning builtin
+StringSubstring(implicit context: Context)(String, intptr, intptr): String;
- const kStringPadStart: constexpr int31 = 0;
- const kStringPadEnd: constexpr int31 = 1;
+const kStringPadStart: constexpr int31 = 0;
+const kStringPadEnd: constexpr int31 = 1;
- transitioning macro StringPad(implicit context: Context)(
- receiver: JSAny, arguments: Arguments, methodName: constexpr string,
- variant: constexpr int31): String {
- const receiverString: String = ToThisString(receiver, methodName);
- const stringLength: Smi = receiverString.length_smi;
+transitioning macro StringPad(implicit context: Context)(
+ receiver: JSAny, arguments: Arguments, methodName: constexpr string,
+ variant: constexpr int31): String {
+ const receiverString: String = ToThisString(receiver, methodName);
+ const stringLength: Smi = receiverString.length_smi;
- if (arguments.length == 0) {
- return receiverString;
- }
- const maxLength: Number = ToLength_Inline(arguments[0]);
- assert(IsNumberNormalized(maxLength));
-
- typeswitch (maxLength) {
- case (smiMaxLength: Smi): {
- if (smiMaxLength <= stringLength) {
- return receiverString;
- }
- }
- case (Number): {
- }
- }
+ if (arguments.length == 0) {
+ return receiverString;
+ }
+ const maxLength: Number = ToLength_Inline(arguments[0]);
+ assert(IsNumberNormalized(maxLength));
- let fillString: String = ' ';
- let fillLength: intptr = 1;
-
- if (arguments.length != 1) {
- const fill = arguments[1];
- if (fill != Undefined) {
- fillString = ToString_Inline(fill);
- fillLength = fillString.length_intptr;
- if (fillLength == 0) {
- return receiverString;
- }
+ typeswitch (maxLength) {
+ case (smiMaxLength: Smi): {
+ if (smiMaxLength <= stringLength) {
+ return receiverString;
}
}
-
- // Pad.
- assert(fillLength > 0);
- // Throw if max_length is greater than String::kMaxLength.
- if (!TaggedIsSmi(maxLength)) {
- ThrowInvalidStringLength(context);
+ case (Number): {
}
+ }
- const smiMaxLength: Smi = UnsafeCast<Smi>(maxLength);
- if (smiMaxLength > SmiConstant(kStringMaxLength)) {
- ThrowInvalidStringLength(context);
- }
- assert(smiMaxLength > stringLength);
- const padLength: Smi = smiMaxLength - stringLength;
-
- let padding: String;
- if (fillLength == 1) {
- // Single char fill.
- // Fast path for a single character fill. No need to calculate number of
- // repetitions or remainder.
- padding = StringRepeat(context, fillString, padLength);
- } else {
- // Multi char fill.
- const fillLengthWord32: int32 = TruncateIntPtrToInt32(fillLength);
- const padLengthWord32: int32 = Convert<int32>(padLength);
- const repetitionsWord32: int32 = padLengthWord32 / fillLengthWord32;
- const remainingWord32: int32 = padLengthWord32 % fillLengthWord32;
- padding =
- StringRepeat(context, fillString, Convert<Smi>(repetitionsWord32));
-
- if (remainingWord32 != 0) {
- const remainderString =
- StringSubstring(fillString, 0, Convert<intptr>(remainingWord32));
- padding = padding + remainderString;
+ let fillString: String = ' ';
+ let fillLength: intptr = 1;
+
+ if (arguments.length != 1) {
+ const fill = arguments[1];
+ if (fill != Undefined) {
+ fillString = ToString_Inline(fill);
+ fillLength = fillString.length_intptr;
+ if (fillLength == 0) {
+ return receiverString;
}
}
+ }
- // Return result.
- assert(padLength == padding.length_smi);
- if (variant == kStringPadStart) {
- return padding + receiverString;
- }
- assert(variant == kStringPadEnd);
- return receiverString + padding;
+ // Pad.
+ assert(fillLength > 0);
+ // Throw if max_length is greater than String::kMaxLength.
+ if (!TaggedIsSmi(maxLength)) {
+ ThrowInvalidStringLength(context);
}
- // ES6 #sec-string.prototype.padstart
- transitioning javascript builtin
- StringPrototypePadStart(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- const methodName: constexpr string = 'String.prototype.padStart';
- return StringPad(receiver, arguments, methodName, kStringPadStart);
+ const smiMaxLength: Smi = UnsafeCast<Smi>(maxLength);
+ if (smiMaxLength > SmiConstant(kStringMaxLength)) {
+ ThrowInvalidStringLength(context);
+ }
+ assert(smiMaxLength > stringLength);
+ const padLength: Smi = smiMaxLength - stringLength;
+
+ let padding: String;
+ if (fillLength == 1) {
+ // Single char fill.
+ // Fast path for a single character fill. No need to calculate number of
+ // repetitions or remainder.
+ padding = StringRepeat(context, fillString, padLength);
+ } else {
+ // Multi char fill.
+ const fillLengthWord32: int32 = TruncateIntPtrToInt32(fillLength);
+ const padLengthWord32: int32 = Convert<int32>(padLength);
+ const repetitionsWord32: int32 = padLengthWord32 / fillLengthWord32;
+ const remainingWord32: int32 = padLengthWord32 % fillLengthWord32;
+ padding =
+ StringRepeat(context, fillString, Convert<Smi>(repetitionsWord32));
+
+ if (remainingWord32 != 0) {
+ const remainderString =
+ StringSubstring(fillString, 0, Convert<intptr>(remainingWord32));
+ padding = padding + remainderString;
+ }
}
- // ES6 #sec-string.prototype.padend
- transitioning javascript builtin
- StringPrototypePadEnd(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): String {
- const methodName: constexpr string = 'String.prototype.padEnd';
- return StringPad(receiver, arguments, methodName, kStringPadEnd);
+ // Return result.
+ assert(padLength == padding.length_smi);
+ if (variant == kStringPadStart) {
+ return padding + receiverString;
}
+ assert(variant == kStringPadEnd);
+ return receiverString + padding;
+}
+
+// ES6 #sec-string.prototype.padstart
+transitioning javascript builtin
+StringPrototypePadStart(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.padStart';
+ return StringPad(receiver, arguments, methodName, kStringPadStart);
+}
+
+// ES6 #sec-string.prototype.padend
+transitioning javascript builtin
+StringPrototypePadEnd(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.padEnd';
+ return StringPad(receiver, arguments, methodName, kStringPadEnd);
+}
}
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index e3e72ae7b5..e1e33eb53a 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -3,76 +3,72 @@
// found in the LICENSE file.
namespace string {
- const kBuiltinName: constexpr string = 'String.prototype.repeat';
+const kBuiltinName: constexpr string = 'String.prototype.repeat';
- builtin StringRepeat(implicit context: Context)(string: String, count: Smi):
- String {
- assert(count >= 0);
- assert(string != kEmptyString);
+builtin StringRepeat(implicit context: Context)(
+ string: String, count: Smi): String {
+ assert(count >= 0);
+ assert(string != kEmptyString);
- let result: String = kEmptyString;
- let powerOfTwoRepeats: String = string;
- let n: intptr = Convert<intptr>(count);
+ let result: String = kEmptyString;
+ let powerOfTwoRepeats: String = string;
+ let n: intptr = Convert<intptr>(count);
- while (true) {
- if ((n & 1) == 1) result = result + powerOfTwoRepeats;
+ while (true) {
+ if ((n & 1) == 1) result = result + powerOfTwoRepeats;
- n = n >> 1;
- if (n == 0) break;
+ n = n >> 1;
+ if (n == 0) break;
- powerOfTwoRepeats = powerOfTwoRepeats + powerOfTwoRepeats;
- }
-
- return result;
+ powerOfTwoRepeats = powerOfTwoRepeats + powerOfTwoRepeats;
}
- // https://tc39.github.io/ecma262/#sec-string.prototype.repeat
- transitioning javascript builtin StringPrototypeRepeat(
- js-implicit context: NativeContext,
- receiver: JSAny)(count: JSAny): String {
- // 1. Let O be ? RequireObjectCoercible(this value).
- // 2. Let S be ? ToString(O).
- const s: String = ToThisString(receiver, kBuiltinName);
+ return result;
+}
- try {
- // 3. Let n be ? ToInteger(count).
- typeswitch (ToInteger_Inline(count)) {
- case (n: Smi): {
- // 4. If n < 0, throw a RangeError exception.
- if (n < 0) goto InvalidCount;
+// https://tc39.github.io/ecma262/#sec-string.prototype.repeat
+transitioning javascript builtin StringPrototypeRepeat(
+ js-implicit context: NativeContext, receiver: JSAny)(count: JSAny): String {
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const s: String = ToThisString(receiver, kBuiltinName);
- // 6. If n is 0, return the empty String.
- if (n == 0 || s.length_uint32 == 0) goto EmptyString;
+ try {
+ // 3. Let n be ? ToInteger(count).
+ typeswitch (ToInteger_Inline(count)) {
+ case (n: Smi): {
+ // 4. If n < 0, throw a RangeError exception.
+ if (n < 0) goto InvalidCount;
- if (n > kStringMaxLength) goto InvalidStringLength;
+ // 6. If n is 0, return the empty String.
+ if (n == 0 || s.length_uint32 == 0) goto EmptyString;
- // 7. Return the String value that is made from n copies of S appended
- // together.
- return StringRepeat(s, n);
- }
- case (heapNum: HeapNumber): deferred {
- assert(IsNumberNormalized(heapNum));
- const n = LoadHeapNumberValue(heapNum);
+ if (n > kStringMaxLength) goto InvalidStringLength;
+
+ // 7. Return the String value that is made from n copies of S appended
+ // together.
+ return StringRepeat(s, n);
+ }
+ case (heapNum: HeapNumber): deferred {
+ assert(IsNumberNormalized(heapNum));
+ const n = LoadHeapNumberValue(heapNum);
- // 4. If n < 0, throw a RangeError exception.
- // 5. If n is +āˆž, throw a RangeError exception.
- if (n == V8_INFINITY || n < 0.0) goto InvalidCount;
+ // 4. If n < 0, throw a RangeError exception.
+ // 5. If n is +āˆž, throw a RangeError exception.
+ if (n == V8_INFINITY || n < 0.0) goto InvalidCount;
- // 6. If n is 0, return the empty String.
- if (s.length_uint32 == 0) goto EmptyString;
+ // 6. If n is 0, return the empty String.
+ if (s.length_uint32 == 0) goto EmptyString;
- goto InvalidStringLength;
- }
+ goto InvalidStringLength;
}
}
- label EmptyString {
- return kEmptyString;
- }
- label InvalidCount deferred {
- ThrowRangeError(MessageTemplate::kInvalidCountValue, count);
- }
- label InvalidStringLength deferred {
- ThrowInvalidStringLength(context);
- }
+ } label EmptyString {
+ return kEmptyString;
+ } label InvalidCount deferred {
+ ThrowRangeError(MessageTemplate::kInvalidCountValue, count);
+ } label InvalidStringLength deferred {
+ ThrowInvalidStringLength(context);
}
}
+}
diff --git a/deps/v8/src/builtins/string-replaceall.tq b/deps/v8/src/builtins/string-replaceall.tq
index c7589f18a6..9211304b34 100644
--- a/deps/v8/src/builtins/string-replaceall.tq
+++ b/deps/v8/src/builtins/string-replaceall.tq
@@ -5,218 +5,216 @@
#include 'src/builtins/builtins-string-gen.h'
namespace string {
- extern macro ReplaceSymbolConstant(): Symbol;
-
- extern macro StringBuiltinsAssembler::GetSubstitution(
- implicit context: Context)(String, Smi, Smi, String): String;
-
- extern builtin
- StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
-
- macro TryFastAbstractStringIndexOf(implicit context: Context)(
- string: String, searchString: String, fromIndex: Smi): Smi labels Slow {
- const stringLen = string.length_uintptr;
- const searchLen = searchString.length_uintptr;
- const directString = Cast<DirectString>(string) otherwise Slow;
- const directSearchStr = Cast<DirectString>(searchString) otherwise Slow;
- const fromIndexUint = Unsigned(SmiUntag(fromIndex));
-
- for (let i: uintptr = fromIndexUint; i < stringLen; i++) {
- let j = i;
- let k: uintptr = 0;
- while (j < stringLen && k < searchLen &&
- StringCharCodeAt(directString, j) ==
- StringCharCodeAt(directSearchStr, k)) {
- j++;
- k++;
- }
- if (k == searchLen) {
- return SmiTag(Signed(i));
- }
+extern macro ReplaceSymbolConstant(): Symbol;
+
+extern macro StringBuiltinsAssembler::GetSubstitution(
+ implicit context: Context)(String, Smi, Smi, String): String;
+
+extern builtin
+StringIndexOf(implicit context: Context)(String, String, Smi): Smi;
+
+macro TryFastAbstractStringIndexOf(implicit context: Context)(
+ string: String, searchString: String, fromIndex: Smi): Smi labels Slow {
+ const stringLen = string.length_uintptr;
+ const searchLen = searchString.length_uintptr;
+ const directString = Cast<DirectString>(string) otherwise Slow;
+ const directSearchStr = Cast<DirectString>(searchString) otherwise Slow;
+ const fromIndexUint = Unsigned(SmiUntag(fromIndex));
+
+ for (let i: uintptr = fromIndexUint; i < stringLen; i++) {
+ let j = i;
+ let k: uintptr = 0;
+ while (j < stringLen && k < searchLen &&
+ StringCharCodeAt(directString, j) ==
+ StringCharCodeAt(directSearchStr, k)) {
+ j++;
+ k++;
+ }
+ if (k == searchLen) {
+ return SmiTag(Signed(i));
}
- return -1;
}
+ return -1;
+}
- macro AbstractStringIndexOf(implicit context: Context)(
- string: String, searchString: String, fromIndex: Smi): Smi {
- // Special case the empty string.
- const searchStringLength = searchString.length_intptr;
- const stringLength = string.length_intptr;
- if (searchStringLength == 0 && SmiUntag(fromIndex) <= stringLength) {
- return fromIndex;
- }
+macro AbstractStringIndexOf(implicit context: Context)(
+ string: String, searchString: String, fromIndex: Smi): Smi {
+ // Special case the empty string.
+ const searchStringLength = searchString.length_intptr;
+ const stringLength = string.length_intptr;
+ if (searchStringLength == 0 && SmiUntag(fromIndex) <= stringLength) {
+ return fromIndex;
+ }
- // Don't bother to search if the searchString would go past the end
- // of the string. This is actually necessary because of runtime
- // checks.
- if (SmiUntag(fromIndex) + searchStringLength > stringLength) {
- return -1;
- }
+ // Don't bother to search if the searchString would go past the end
+ // of the string. This is actually necessary because of runtime
+ // checks.
+ if (SmiUntag(fromIndex) + searchStringLength > stringLength) {
+ return -1;
+ }
- try {
- return TryFastAbstractStringIndexOf(string, searchString, fromIndex)
- otherwise Slow;
- }
- label Slow {
- for (let i: intptr = SmiUntag(fromIndex);
- i + searchStringLength <= stringLength; i++) {
- if (StringCompareSequence(
- context, string, searchString, Convert<Number>(SmiTag(i))) ==
- True) {
- return SmiTag(i);
- }
+ try {
+ return TryFastAbstractStringIndexOf(string, searchString, fromIndex)
+ otherwise Slow;
+ } label Slow {
+ for (let i: intptr = SmiUntag(fromIndex);
+ i + searchStringLength <= stringLength; i++) {
+ if (StringCompareSequence(
+ context, string, searchString, Convert<Number>(SmiTag(i))) ==
+ True) {
+ return SmiTag(i);
}
- return -1;
}
+ return -1;
}
+}
- transitioning macro
- ThrowIfNotGlobal(implicit context: Context)(searchValue: JSAny): void {
- let shouldThrow: bool;
- typeswitch (searchValue) {
- case (fastRegExp: FastJSRegExp): {
- shouldThrow = !fastRegExp.global;
- }
- case (Object): {
- const flags = GetProperty(searchValue, 'flags');
- RequireObjectCoercible(flags, 'String.prototype.replaceAll');
- shouldThrow =
- StringIndexOf(ToString_Inline(flags), StringConstant('g'), 0) == -1;
- }
+transitioning macro
+ThrowIfNotGlobal(implicit context: Context)(searchValue: JSAny): void {
+ let shouldThrow: bool;
+ typeswitch (searchValue) {
+ case (fastRegExp: FastJSRegExp): {
+ shouldThrow = !fastRegExp.global;
}
- if (shouldThrow) {
- ThrowTypeError(
- MessageTemplate::kRegExpGlobalInvokedOnNonGlobal,
- 'String.prototype.replaceAll');
+ case (Object): {
+ const flags = GetProperty(searchValue, 'flags');
+ RequireObjectCoercible(flags, 'String.prototype.replaceAll');
+ shouldThrow =
+ StringIndexOf(ToString_Inline(flags), StringConstant('g'), 0) == -1;
}
}
+ if (shouldThrow) {
+ ThrowTypeError(
+ MessageTemplate::kRegExpGlobalInvokedOnNonGlobal,
+ 'String.prototype.replaceAll');
+ }
+}
- // https://tc39.es/ecma262/#sec-string.prototype.replaceall
- transitioning javascript builtin StringPrototypeReplaceAll(
- js-implicit context: NativeContext,
- receiver: JSAny)(searchValue: JSAny, replaceValue: JSAny): JSAny {
- // 1. Let O be ? RequireObjectCoercible(this value).
- RequireObjectCoercible(receiver, 'String.prototype.replaceAll');
-
- // 2. If searchValue is neither undefined nor null, then
- if (searchValue != Undefined && searchValue != Null) {
- // a. Let isRegExp be ? IsRegExp(searchString).
- // b. If isRegExp is true, then
- // i. Let flags be ? Get(searchValue, "flags").
- // ii. Perform ? RequireObjectCoercible(flags).
- // iii. If ? ToString(flags) does not contain "g", throw a
- // TypeError exception.
- if (regexp::IsRegExp(searchValue)) {
- ThrowIfNotGlobal(searchValue);
- }
-
- // TODO(joshualitt): We could easily add fast paths for string
- // searchValues and potential FastRegExps.
- // c. Let replacer be ? GetMethod(searchValue, @@replace).
- // d. If replacer is not undefined, then
- // i. Return ? Call(replacer, searchValue, Ā« O, replaceValue Ā»).
- try {
- const replacer = GetMethod(searchValue, ReplaceSymbolConstant())
- otherwise ReplaceSymbolIsNullOrUndefined;
- return Call(context, replacer, searchValue, receiver, replaceValue);
- }
- label ReplaceSymbolIsNullOrUndefined {}
+// https://tc39.es/ecma262/#sec-string.prototype.replaceall
+transitioning javascript builtin StringPrototypeReplaceAll(
+ js-implicit context: NativeContext, receiver: JSAny)(
+ searchValue: JSAny, replaceValue: JSAny): JSAny {
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ RequireObjectCoercible(receiver, 'String.prototype.replaceAll');
+
+ // 2. If searchValue is neither undefined nor null, then
+ if (searchValue != Undefined && searchValue != Null) {
+ // a. Let isRegExp be ? IsRegExp(searchString).
+ // b. If isRegExp is true, then
+ // i. Let flags be ? Get(searchValue, "flags").
+ // ii. Perform ? RequireObjectCoercible(flags).
+ // iii. If ? ToString(flags) does not contain "g", throw a
+ // TypeError exception.
+ if (regexp::IsRegExp(searchValue)) {
+ ThrowIfNotGlobal(searchValue);
}
- // 3. Let string be ? ToString(O).
- const string = ToString_Inline(receiver);
+ // TODO(joshualitt): We could easily add fast paths for string
+ // searchValues and potential FastRegExps.
+ // c. Let replacer be ? GetMethod(searchValue, @@replace).
+ // d. If replacer is not undefined, then
+ // i. Return ? Call(replacer, searchValue, Ā« O, replaceValue Ā»).
+ try {
+ const replacer = GetMethod(searchValue, ReplaceSymbolConstant())
+ otherwise ReplaceSymbolIsNullOrUndefined;
+ return Call(context, replacer, searchValue, receiver, replaceValue);
+ } label ReplaceSymbolIsNullOrUndefined {}
+ }
- // 4. Let searchString be ? ToString(searchValue).
- const searchString = ToString_Inline(searchValue);
+ // 3. Let string be ? ToString(O).
+ const string = ToString_Inline(receiver);
- // 5. Let functionalReplace be IsCallable(replaceValue).
- let replaceValueArg = replaceValue;
- const functionalReplace = TaggedIsCallable(replaceValue);
+ // 4. Let searchString be ? ToString(searchValue).
+ const searchString = ToString_Inline(searchValue);
- // 6. If functionalReplace is false, then
- if (!functionalReplace) {
- // a. Let replaceValue be ? ToString(replaceValue).
- replaceValueArg = ToString_Inline(replaceValue);
- }
+ // 5. Let functionalReplace be IsCallable(replaceValue).
+ let replaceValueArg = replaceValue;
+ const functionalReplace = Is<Callable>(replaceValue);
- // 7. Let searchLength be the length of searchString.
- const searchLength = searchString.length_smi;
-
- // 8. Let advanceBy be max(1, searchLength).
- const advanceBy = SmiMax(1, searchLength);
-
- // We combine the two loops from the spec into one to avoid
- // needing a growable array.
- //
- // 9. Let matchPositions be a new empty List.
- // 10. Let position be ! StringIndexOf(string, searchString, 0).
- // 11. Repeat, while position is not -1
- // a. Append position to the end of matchPositions.
- // b. Let position be ! StringIndexOf(string, searchString,
- // position + advanceBy).
- // 12. Let endOfLastMatch be 0.
- // 13. Let result be the empty string value.
- // 14. For each position in matchPositions, do
- let endOfLastMatch: Smi = 0;
- let result: String = kEmptyString;
- let position = AbstractStringIndexOf(string, searchString, 0);
- while (position != -1) {
- // a. If functionalReplace is true, then
- // b. Else,
- let replacement: String;
- if (functionalReplace) {
- // i. Let replacement be ? ToString(? Call(replaceValue, undefined,
- // Ā« searchString, position,
- // string Ā»).
- replacement = ToString_Inline(Call(
- context, UnsafeCast<Callable>(replaceValueArg), Undefined,
- searchString, position, string));
- } else {
- // i. Assert: Type(replaceValue) is String.
- const replaceValueString = UnsafeCast<String>(replaceValueArg);
-
- // ii. Let captures be a new empty List.
- // iii. Let replacement be GetSubstitution(searchString,
- // string, position, captures,
- // undefined, replaceValue).
- // Note: Instead we just call a simpler GetSubstitution primitive.
- const matchEndPosition = position + searchLength;
- replacement = GetSubstitution(
- string, position, matchEndPosition, replaceValueString);
- }
+ // 6. If functionalReplace is false, then
+ if (!functionalReplace) {
+ // a. Let replaceValue be ? ToString(replaceValue).
+ replaceValueArg = ToString_Inline(replaceValue);
+ }
- // c. Let stringSlice be the substring of string consisting of the code
- // units from endOfLastMatch (inclusive) up through position
- // (exclusive).
- const stringSlice = string::SubString(
- string, Unsigned(SmiUntag(endOfLastMatch)),
- Unsigned(SmiUntag(position)));
+ // 7. Let searchLength be the length of searchString.
+ const searchLength = searchString.length_smi;
+
+ // 8. Let advanceBy be max(1, searchLength).
+ const advanceBy = SmiMax(1, searchLength);
+
+ // We combine the two loops from the spec into one to avoid
+ // needing a growable array.
+ //
+ // 9. Let matchPositions be a new empty List.
+ // 10. Let position be ! StringIndexOf(string, searchString, 0).
+ // 11. Repeat, while position is not -1
+ // a. Append position to the end of matchPositions.
+ // b. Let position be ! StringIndexOf(string, searchString,
+ // position + advanceBy).
+ // 12. Let endOfLastMatch be 0.
+ // 13. Let result be the empty string value.
+ // 14. For each position in matchPositions, do
+ let endOfLastMatch: Smi = 0;
+ let result: String = kEmptyString;
+ let position = AbstractStringIndexOf(string, searchString, 0);
+ while (position != -1) {
+ // a. If functionalReplace is true, then
+ // b. Else,
+ let replacement: String;
+ if (functionalReplace) {
+ // i. Let replacement be ? ToString(? Call(replaceValue, undefined,
+ // Ā« searchString, position,
+ // string Ā»).
+ replacement = ToString_Inline(Call(
+ context, UnsafeCast<Callable>(replaceValueArg), Undefined,
+ searchString, position, string));
+ } else {
+ // i. Assert: Type(replaceValue) is String.
+ const replaceValueString = UnsafeCast<String>(replaceValueArg);
+
+ // ii. Let captures be a new empty List.
+ // iii. Let replacement be GetSubstitution(searchString,
+ // string, position, captures,
+ // undefined, replaceValue).
+ // Note: Instead we just call a simpler GetSubstitution primitive.
+ const matchEndPosition = position + searchLength;
+ replacement = GetSubstitution(
+ string, position, matchEndPosition, replaceValueString);
+ }
- // d. Let result be the string-concatenation of result, stringSlice,
- // and replacement.
- // TODO(joshualitt): This leaves a completely degenerate ConsString tree.
- // We could be smarter here.
- result = result + stringSlice + replacement;
+ // c. Let stringSlice be the substring of string consisting of the code
+ // units from endOfLastMatch (inclusive) up through position
+ // (exclusive).
+ const stringSlice = string::SubString(
+ string, Unsigned(SmiUntag(endOfLastMatch)),
+ Unsigned(SmiUntag(position)));
- // e. Let endOfLastMatch be position + searchLength.
- endOfLastMatch = position + searchLength;
+ // d. Let result be the string-concatenation of result, stringSlice,
+ // and replacement.
+ // TODO(joshualitt): This leaves a completely degenerate ConsString tree.
+ // We could be smarter here.
+ result = result + stringSlice + replacement;
- position =
- AbstractStringIndexOf(string, searchString, position + advanceBy);
- }
+ // e. Let endOfLastMatch be position + searchLength.
+ endOfLastMatch = position + searchLength;
- // 15. If endOfLastMatch < the length of string, then
- if (endOfLastMatch < string.length_smi) {
- // a. Let result be the string-concatenation of result and the substring
- // of string consisting of the code units from endOfLastMatch
- // (inclusive) up through the final code unit of string (inclusive).
- result = result +
- string::SubString(
- string, Unsigned(SmiUntag(endOfLastMatch)),
- Unsigned(string.length_intptr));
- }
+ position =
+ AbstractStringIndexOf(string, searchString, position + advanceBy);
+ }
- // 16. Return result.
- return result;
+ // 15. If endOfLastMatch < the length of string, then
+ if (endOfLastMatch < string.length_smi) {
+ // a. Let result be the string-concatenation of result and the substring
+ // of string consisting of the code units from endOfLastMatch
+ // (inclusive) up through the final code unit of string (inclusive).
+ result = result +
+ string::SubString(
+ string, Unsigned(SmiUntag(endOfLastMatch)),
+ Unsigned(string.length_intptr));
}
+
+ // 16. Return result.
+ return result;
+}
}
diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq
index ea95d44a82..71442a28fa 100644
--- a/deps/v8/src/builtins/string-slice.tq
+++ b/deps/v8/src/builtins/string-slice.tq
@@ -3,33 +3,32 @@
// found in the LICENSE file.
namespace string {
- // ES6 #sec-string.prototype.slice ( start, end )
- // https://tc39.github.io/ecma262/#sec-string.prototype.slice
- transitioning javascript builtin StringPrototypeSlice(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): String {
- // 1. Let O be ? RequireObjectCoercible(this value).
- // 2. Let S be ? ToString(O).
- const string: String = ToThisString(receiver, 'String.prototype.slice');
+// ES6 #sec-string.prototype.slice ( start, end )
+// https://tc39.github.io/ecma262/#sec-string.prototype.slice
+transitioning javascript builtin StringPrototypeSlice(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const string: String = ToThisString(receiver, 'String.prototype.slice');
- // 3. Let len be the number of elements in S.
- const length: uintptr = string.length_uintptr;
+ // 3. Let len be the number of elements in S.
+ const length: uintptr = string.length_uintptr;
- // Convert {start} to a relative index.
- const arg0 = arguments[0];
- const start: uintptr =
- arg0 != Undefined ? ConvertToRelativeIndex(arg0, length) : 0;
+ // Convert {start} to a relative index.
+ const arg0 = arguments[0];
+ const start: uintptr =
+ arg0 != Undefined ? ConvertToRelativeIndex(arg0, length) : 0;
- // 5. If end is undefined, let intEnd be len;
- // else Convert {end} to a relative index.
- const arg1 = arguments[1];
- const end: uintptr =
- arg1 != Undefined ? ConvertToRelativeIndex(arg1, length) : length;
+ // 5. If end is undefined, let intEnd be len;
+ // else Convert {end} to a relative index.
+ const arg1 = arguments[1];
+ const end: uintptr =
+ arg1 != Undefined ? ConvertToRelativeIndex(arg1, length) : length;
- if (end <= start) {
- return kEmptyString;
- }
-
- return SubString(string, start, end);
+ if (end <= start) {
+ return kEmptyString;
}
+
+ return SubString(string, start, end);
+}
}
diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq
index 045722dd82..a1f99df17b 100644
--- a/deps/v8/src/builtins/string-startswith.tq
+++ b/deps/v8/src/builtins/string-startswith.tq
@@ -5,57 +5,56 @@
#include 'src/builtins/builtins-regexp-gen.h'
namespace string {
- // https://tc39.github.io/ecma262/#sec-string.prototype.startswith
- transitioning javascript builtin StringPrototypeStartsWith(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): Boolean {
- const searchString: JSAny = arguments[0];
- const position: JSAny = arguments[1];
- const kBuiltinName: constexpr string = 'String.prototype.startsWith';
-
- // 1. Let O be ? RequireObjectCoercible(this value).
- // 2. Let S be ? ToString(O).
- const string: String = ToThisString(receiver, kBuiltinName);
-
- // 3. Let isRegExp be ? IsRegExp(searchString).
- // 4. If isRegExp is true, throw a TypeError exception.
- if (regexp::IsRegExp(searchString)) {
- ThrowTypeError(MessageTemplate::kFirstArgumentNotRegExp, kBuiltinName);
- }
-
- // 5. Let searchStr be ? ToString(searchString).
- const searchStr: String = ToString_Inline(searchString);
-
- // 8. Let len be the length of S.
- const len: uintptr = string.length_uintptr;
-
- // 6. Let pos be ? ToInteger(position).
- // 7. Assert: If position is undefined, then pos is 0.
- // 9. Let start be min(max(pos, 0), len).
- const start: uintptr =
- (position != Undefined) ? ClampToIndexRange(position, len) : 0;
-
- // 10. Let searchLength be the length of searchStr.
- const searchLength: uintptr = searchStr.length_uintptr;
-
- // 11. If searchLength + start is greater than len, return false.
- // The comparison is rephrased to be overflow-friendly with unsigned
- // indices.
- if (searchLength > len - start) return False;
-
- // 12. If the sequence of code units of S starting at start of length
- // searchLength is the same as the full code unit sequence of searchStr,
- // return true.
- // 13. Otherwise, return false.
- try {
- // Fast Path: If both strings are direct and relevant indices are Smis.
- return TryFastStringCompareSequence(
- string, searchStr, start, searchLength) otherwise Slow;
- }
- label Slow {
- // Slow Path: If either of the string is indirect, bail into runtime.
- return StringCompareSequence(
- context, string, searchStr, Convert<Number>(start));
- }
+// https://tc39.github.io/ecma262/#sec-string.prototype.startswith
+transitioning javascript builtin StringPrototypeStartsWith(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(...arguments): Boolean {
+ const searchString: JSAny = arguments[0];
+ const position: JSAny = arguments[1];
+ const kBuiltinName: constexpr string = 'String.prototype.startsWith';
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const string: String = ToThisString(receiver, kBuiltinName);
+
+ // 3. Let isRegExp be ? IsRegExp(searchString).
+ // 4. If isRegExp is true, throw a TypeError exception.
+ if (regexp::IsRegExp(searchString)) {
+ ThrowTypeError(MessageTemplate::kFirstArgumentNotRegExp, kBuiltinName);
}
+
+ // 5. Let searchStr be ? ToString(searchString).
+ const searchStr: String = ToString_Inline(searchString);
+
+ // 8. Let len be the length of S.
+ const len: uintptr = string.length_uintptr;
+
+ // 6. Let pos be ? ToInteger(position).
+ // 7. Assert: If position is undefined, then pos is 0.
+ // 9. Let start be min(max(pos, 0), len).
+ const start: uintptr =
+ (position != Undefined) ? ClampToIndexRange(position, len) : 0;
+
+ // 10. Let searchLength be the length of searchStr.
+ const searchLength: uintptr = searchStr.length_uintptr;
+
+ // 11. If searchLength + start is greater than len, return false.
+ // The comparison is rephrased to be overflow-friendly with unsigned
+ // indices.
+ if (searchLength > len - start) return False;
+
+ // 12. If the sequence of code units of S starting at start of length
+ // searchLength is the same as the full code unit sequence of searchStr,
+ // return true.
+ // 13. Otherwise, return false.
+ try {
+ // Fast Path: If both strings are direct and relevant indices are Smis.
+ return TryFastStringCompareSequence(string, searchStr, start, searchLength)
+ otherwise Slow;
+ } label Slow {
+ // Slow Path: If either of the string is indirect, bail into runtime.
+ return StringCompareSequence(
+ context, string, searchStr, Convert<Number>(start));
+ }
+}
}
diff --git a/deps/v8/src/builtins/string-substr.tq b/deps/v8/src/builtins/string-substr.tq
index 917bee691e..068c4437ca 100644
--- a/deps/v8/src/builtins/string-substr.tq
+++ b/deps/v8/src/builtins/string-substr.tq
@@ -4,40 +4,39 @@
namespace string {
- // String.prototype.substr ( start, length )
- // ES6 #sec-string.prototype.substr
- transitioning javascript builtin StringPrototypeSubstr(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): String {
- const methodName: constexpr string = 'String.prototype.substr';
- // 1. Let O be ? RequireObjectCoercible(this value).
- // 2. Let S be ? ToString(O).
- const string: String = ToThisString(receiver, methodName);
+// String.prototype.substr ( start, length )
+// ES6 #sec-string.prototype.substr
+transitioning javascript builtin StringPrototypeSubstr(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.substr';
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const string: String = ToThisString(receiver, methodName);
- // 5. Let size be the number of code units in S.
- const size: uintptr = string.length_uintptr;
+ // 5. Let size be the number of code units in S.
+ const size: uintptr = string.length_uintptr;
- // 3. Let intStart be ? ToInteger(start).
- // 6. If intStart < 0, set intStart to max(size + intStart, 0).
- const start = arguments[0];
- const initStart: uintptr =
- start != Undefined ? ConvertToRelativeIndex(start, size) : 0;
+ // 3. Let intStart be ? ToInteger(start).
+ // 6. If intStart < 0, set intStart to max(size + intStart, 0).
+ const start = arguments[0];
+ const initStart: uintptr =
+ start != Undefined ? ConvertToRelativeIndex(start, size) : 0;
- // 4. If length is undefined,
- // let end be +āˆž; otherwise let end be ? ToInteger(length).
- // 7. Let resultLength be min(max(end, 0), size - intStart).
- const length = arguments[1];
- const lengthLimit = size - initStart;
- assert(lengthLimit <= size);
- const resultLength: uintptr = length != Undefined ?
- ClampToIndexRange(length, lengthLimit) :
- lengthLimit;
+ // 4. If length is undefined,
+ // let end be +āˆž; otherwise let end be ? ToInteger(length).
+ // 7. Let resultLength be min(max(end, 0), size - intStart).
+ const length = arguments[1];
+ const lengthLimit = size - initStart;
+ assert(lengthLimit <= size);
+ const resultLength: uintptr = length != Undefined ?
+ ClampToIndexRange(length, lengthLimit) :
+ lengthLimit;
- // 8. If resultLength ā‰¤ 0, return the empty String "".
- if (resultLength == 0) return EmptyStringConstant();
+ // 8. If resultLength ā‰¤ 0, return the empty String "".
+ if (resultLength == 0) return EmptyStringConstant();
- // 9. Return the String value containing resultLength consecutive code units
- // from S beginning with the code unit at index intStart.
- return SubString(string, initStart, initStart + resultLength);
- }
+ // 9. Return the String value containing resultLength consecutive code units
+ // from S beginning with the code unit at index intStart.
+ return SubString(string, initStart, initStart + resultLength);
+}
}
diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq
index e4e7d70000..099a28b505 100644
--- a/deps/v8/src/builtins/string-substring.tq
+++ b/deps/v8/src/builtins/string-substring.tq
@@ -4,28 +4,26 @@
namespace string {
- // ES6 #sec-string.prototype.substring
- transitioning javascript builtin StringPrototypeSubstring(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): String {
- // Check that {receiver} is coercible to Object and convert it to a String.
- const string: String = ToThisString(receiver, 'String.prototype.substring');
- const length: uintptr = string.length_uintptr;
+// ES6 #sec-string.prototype.substring
+transitioning javascript builtin StringPrototypeSubstring(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ const string: String = ToThisString(receiver, 'String.prototype.substring');
+ const length: uintptr = string.length_uintptr;
- // Conversion and bounds-checks for {start}.
- const arg0 = arguments[0];
- let start: uintptr =
- arg0 != Undefined ? ClampToIndexRange(arg0, length) : 0;
+ // Conversion and bounds-checks for {start}.
+ const arg0 = arguments[0];
+ let start: uintptr = arg0 != Undefined ? ClampToIndexRange(arg0, length) : 0;
- // Conversion and bounds-checks for {end}.
- const arg1 = arguments[1];
- let end: uintptr =
- arg1 != Undefined ? ClampToIndexRange(arg1, length) : length;
- if (end < start) {
- const tmp: uintptr = end;
- end = start;
- start = tmp;
- }
- return SubString(string, start, end);
+ // Conversion and bounds-checks for {end}.
+ const arg1 = arguments[1];
+ let end: uintptr =
+ arg1 != Undefined ? ClampToIndexRange(arg1, length) : length;
+ if (end < start) {
+ const tmp: uintptr = end;
+ end = start;
+ start = tmp;
}
+ return SubString(string, start, end);
+}
}
diff --git a/deps/v8/src/builtins/symbol.tq b/deps/v8/src/builtins/symbol.tq
index cda344471f..18bdebd380 100644
--- a/deps/v8/src/builtins/symbol.tq
+++ b/deps/v8/src/builtins/symbol.tq
@@ -3,47 +3,45 @@
// found in the LICENSE file.
namespace symbol {
- extern runtime SymbolDescriptiveString(implicit context: Context)(Symbol):
- String;
+extern runtime SymbolDescriptiveString(implicit context: Context)(Symbol):
+ String;
- transitioning macro ThisSymbolValue(implicit context: Context)(
- receiver: JSAny, method: constexpr string): Symbol {
- return UnsafeCast<Symbol>(
- ToThisValue(receiver, PrimitiveType::kSymbol, method));
- }
+transitioning macro ThisSymbolValue(implicit context: Context)(
+ receiver: JSAny, method: constexpr string): Symbol {
+ return UnsafeCast<Symbol>(
+ ToThisValue(receiver, PrimitiveType::kSymbol, method));
+}
- // ES #sec-symbol.prototype.description
- transitioning javascript builtin SymbolPrototypeDescriptionGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): String|Undefined {
- // 1. Let s be the this value.
- // 2. Let sym be ? thisSymbolValue(s).
- const sym: Symbol =
- ThisSymbolValue(receiver, 'Symbol.prototype.description');
- // 3. Return sym.[[Description]].
- return sym.description;
- }
+// ES #sec-symbol.prototype.description
+transitioning javascript builtin SymbolPrototypeDescriptionGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): String|Undefined {
+ // 1. Let s be the this value.
+ // 2. Let sym be ? thisSymbolValue(s).
+ const sym: Symbol = ThisSymbolValue(receiver, 'Symbol.prototype.description');
+ // 3. Return sym.[[Description]].
+ return sym.description;
+}
- // ES6 #sec-symbol.prototype-@@toprimitive
- transitioning javascript builtin SymbolPrototypeToPrimitive(
- js-implicit context: NativeContext,
- receiver: JSAny)(_hint: JSAny): JSAny {
- // 1. Return ? thisSymbolValue(this value).
- return ThisSymbolValue(receiver, 'Symbol.prototype [ @@toPrimitive ]');
- }
+// ES6 #sec-symbol.prototype-@@toprimitive
+transitioning javascript builtin SymbolPrototypeToPrimitive(
+ js-implicit context: NativeContext, receiver: JSAny)(_hint: JSAny): JSAny {
+ // 1. Return ? thisSymbolValue(this value).
+ return ThisSymbolValue(receiver, 'Symbol.prototype [ @@toPrimitive ]');
+}
- // ES6 #sec-symbol.prototype.tostring
- transitioning javascript builtin SymbolPrototypeToString(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- // 1. Let sym be ? thisSymbolValue(this value).
- const sym: Symbol = ThisSymbolValue(receiver, 'Symbol.prototype.toString');
- // 2. Return SymbolDescriptiveString(sym).
- return SymbolDescriptiveString(sym);
- }
+// ES6 #sec-symbol.prototype.tostring
+transitioning javascript builtin SymbolPrototypeToString(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Let sym be ? thisSymbolValue(this value).
+ const sym: Symbol = ThisSymbolValue(receiver, 'Symbol.prototype.toString');
+ // 2. Return SymbolDescriptiveString(sym).
+ return SymbolDescriptiveString(sym);
+}
- // ES6 #sec-symbol.prototype.valueof
- transitioning javascript builtin SymbolPrototypeValueOf(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- // 1. Return ? thisSymbolValue(this value).
- return ThisSymbolValue(receiver, 'Symbol.prototype.valueOf');
- }
+// ES6 #sec-symbol.prototype.valueof
+transitioning javascript builtin SymbolPrototypeValueOf(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Return ? thisSymbolValue(this value).
+ return ThisSymbolValue(receiver, 'Symbol.prototype.valueOf');
+}
}
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 85c43342cf..d2b107f932 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -3,188 +3,189 @@
// found in the LICENSE file.
namespace torque_internal {
- // Unsafe is a marker that we require to be passed when calling internal APIs
- // that might lead to unsoundness when used incorrectly. Unsafe markers should
- // therefore not be instantiated anywhere outside of this namespace.
- struct Unsafe {}
-
- // Size of a type in memory (on the heap). For class types, this is the size
- // of the pointer, not of the instance.
- intrinsic %SizeOf<T: type>(): constexpr int31;
-
- struct Reference<T: type> {
- const object: HeapObject;
- const offset: intptr;
- unsafeMarker: Unsafe;
- }
- type ConstReference<T: type> extends Reference<T>;
- type MutableReference<T: type> extends ConstReference<T>;
-
- macro UnsafeNewReference<T: type>(object: HeapObject, offset: intptr):&T {
- return %RawDownCast<&T>(
- Reference<T>{object: object, offset: offset, unsafeMarker: Unsafe {}});
- }
-
- struct Slice<T: type> {
- macro TryAtIndex(index: intptr):&T labels OutOfBounds {
- if (Convert<uintptr>(index) < Convert<uintptr>(this.length)) {
- return UnsafeNewReference<T>(
- this.object, this.offset + index * %SizeOf<T>());
- } else {
- goto OutOfBounds;
- }
- }
+// Unsafe is a marker that we require to be passed when calling internal APIs
+// that might lead to unsoundness when used incorrectly. Unsafe markers should
+// therefore not be instantiated anywhere outside of this namespace.
+struct Unsafe {}
+
+// Size of a type in memory (on the heap). For class types, this is the size
+// of the pointer, not of the instance.
+intrinsic %SizeOf<T: type>(): constexpr int31;
+
+struct Reference<T: type> {
+ const object: HeapObject;
+ const offset: intptr;
+ unsafeMarker: Unsafe;
+}
+type ConstReference<T: type> extends Reference<T>;
+type MutableReference<T: type> extends ConstReference<T>;
- macro AtIndex(index: intptr):&T {
- return this.TryAtIndex(index) otherwise unreachable;
- }
+namespace unsafe {
+macro NewReference<T: type>(object: HeapObject, offset: intptr):&T {
+ return %RawDownCast<&T>(
+ Reference<T>{object: object, offset: offset, unsafeMarker: Unsafe {}});
+}
+} // namespace unsafe
- macro AtIndex(index: uintptr):&T {
- return this.TryAtIndex(Convert<intptr>(index)) otherwise unreachable;
+struct Slice<T: type> {
+ macro TryAtIndex(index: intptr):&T labels OutOfBounds {
+ if (Convert<uintptr>(index) < Convert<uintptr>(this.length)) {
+ return unsafe::NewReference<T>(
+ this.object, this.offset + index * %SizeOf<T>());
+ } else {
+ goto OutOfBounds;
}
+ }
- macro AtIndex(index: constexpr int31):&T {
- const i: intptr = Convert<intptr>(index);
- return this.TryAtIndex(i) otherwise unreachable;
- }
+ macro AtIndex(index: intptr):&T {
+ return this.TryAtIndex(index) otherwise unreachable;
+ }
- macro AtIndex(index: Smi):&T {
- const i: intptr = Convert<intptr>(index);
- return this.TryAtIndex(i) otherwise unreachable;
- }
+ macro AtIndex(index: uintptr):&T {
+ return this.TryAtIndex(Convert<intptr>(index)) otherwise unreachable;
+ }
- macro Iterator(): SliceIterator<T> {
- const end = this.offset + this.length * %SizeOf<T>();
- return SliceIterator<T>{
- object: this.object,
- start: this.offset,
- end: end,
- unsafeMarker: Unsafe {}
- };
- }
- macro Iterator(startIndex: intptr, endIndex: intptr): SliceIterator<T> {
- check(
- Convert<uintptr>(endIndex) <= Convert<uintptr>(this.length) &&
- Convert<uintptr>(startIndex) <= Convert<uintptr>(endIndex));
- const start = this.offset + startIndex * %SizeOf<T>();
- const end = this.offset + endIndex * %SizeOf<T>();
- return SliceIterator<T>{
- object: this.object,
- start,
- end,
- unsafeMarker: Unsafe {}
- };
- }
+ macro AtIndex(index: constexpr int31):&T {
+ const i: intptr = Convert<intptr>(index);
+ return this.TryAtIndex(i) otherwise unreachable;
+ }
- const object: HeapObject;
- const offset: intptr;
- const length: intptr;
- unsafeMarker: Unsafe;
+ macro AtIndex(index: Smi):&T {
+ const i: intptr = Convert<intptr>(index);
+ return this.TryAtIndex(i) otherwise unreachable;
}
- macro UnsafeNewSlice<T: type>(
- object: HeapObject, offset: intptr, length: intptr): Slice<T> {
- return Slice<T>{
- object: object,
- offset: offset,
- length: length,
+ macro Iterator(): SliceIterator<T> {
+ const end = this.offset + this.length * %SizeOf<T>();
+ return SliceIterator<T>{
+ object: this.object,
+ start: this.offset,
+ end: end,
+ unsafeMarker: Unsafe {}
+ };
+ }
+ macro Iterator(startIndex: intptr, endIndex: intptr): SliceIterator<T> {
+ check(
+ Convert<uintptr>(endIndex) <= Convert<uintptr>(this.length) &&
+ Convert<uintptr>(startIndex) <= Convert<uintptr>(endIndex));
+ const start = this.offset + startIndex * %SizeOf<T>();
+ const end = this.offset + endIndex * %SizeOf<T>();
+ return SliceIterator<T>{
+ object: this.object,
+ start,
+ end,
unsafeMarker: Unsafe {}
};
}
- struct SliceIterator<T: type> {
- macro Empty(): bool {
- return this.start == this.end;
- }
-
- macro Next(): T labels NoMore {
- return * this.NextReference() otherwise NoMore;
- }
+ const object: HeapObject;
+ const offset: intptr;
+ const length: intptr;
+ unsafeMarker: Unsafe;
+}
- macro NextReference():&T labels NoMore {
- if (this.Empty()) {
- goto NoMore;
- } else {
- const result = UnsafeNewReference<T>(this.object, this.start);
- this.start += %SizeOf<T>();
- return result;
- }
- }
+macro UnsafeNewSlice<T: type>(
+ object: HeapObject, offset: intptr, length: intptr): Slice<T> {
+ return Slice<T>{
+ object: object,
+ offset: offset,
+ length: length,
+ unsafeMarker: Unsafe {}
+ };
+}
- object: HeapObject;
- start: intptr;
- end: intptr;
- unsafeMarker: Unsafe;
+struct SliceIterator<T: type> {
+ macro Empty(): bool {
+ return this.start == this.end;
}
- macro AddIndexedFieldSizeToObjectSize(
- baseSize: intptr, arrayLength: intptr,
- fieldSize: constexpr int32): intptr {
- const arrayLength = Convert<int32>(arrayLength);
- const byteLength = TryInt32Mul(arrayLength, fieldSize)
- otherwise unreachable;
- return TryIntPtrAdd(baseSize, Convert<intptr>(byteLength))
- otherwise unreachable;
+ macro Next(): T labels NoMore {
+ return * this.NextReference() otherwise NoMore;
}
- macro AlignTagged(x: intptr): intptr {
- // Round up to a multiple of kTaggedSize.
- return (x + kObjectAlignmentMask) & ~kObjectAlignmentMask;
+ macro NextReference():&T labels NoMore {
+ if (this.Empty()) {
+ goto NoMore;
+ } else {
+ const result = unsafe::NewReference<T>(this.object, this.start);
+ this.start += %SizeOf<T>();
+ return result;
+ }
}
- macro IsTaggedAligned(x: intptr): bool {
- return (x & kObjectAlignmentMask) == 0;
- }
+ object: HeapObject;
+ start: intptr;
+ end: intptr;
+ unsafeMarker: Unsafe;
+}
- macro ValidAllocationSize(sizeInBytes: intptr, map: Map): bool {
- if (sizeInBytes <= 0) return false;
- if (!IsTaggedAligned(sizeInBytes)) return false;
- const instanceSizeInWords = Convert<intptr>(map.instance_size_in_words);
- return instanceSizeInWords == kVariableSizeSentinel ||
- instanceSizeInWords * kTaggedSize == sizeInBytes;
- }
+macro AddIndexedFieldSizeToObjectSize(
+ baseSize: intptr, arrayLength: intptr, fieldSize: constexpr int32): intptr {
+ const arrayLength = Convert<int32>(arrayLength);
+ const byteLength = TryInt32Mul(arrayLength, fieldSize)
+ otherwise unreachable;
+ return TryIntPtrAdd(baseSize, Convert<intptr>(byteLength))
+ otherwise unreachable;
+}
- type UninitializedHeapObject extends HeapObject;
+macro AlignTagged(x: intptr): intptr {
+ // Round up to a multiple of kTaggedSize.
+ return (x + kObjectAlignmentMask) & ~kObjectAlignmentMask;
+}
- extern macro AllocateAllowLOS(intptr): UninitializedHeapObject;
- extern macro GetInstanceTypeMap(constexpr InstanceType): Map;
+macro IsTaggedAligned(x: intptr): bool {
+ return (x & kObjectAlignmentMask) == 0;
+}
- macro Allocate(sizeInBytes: intptr, map: Map): UninitializedHeapObject {
- assert(ValidAllocationSize(sizeInBytes, map));
- return AllocateAllowLOS(sizeInBytes);
- }
+macro ValidAllocationSize(sizeInBytes: intptr, map: Map): bool {
+ if (sizeInBytes <= 0) return false;
+ if (!IsTaggedAligned(sizeInBytes)) return false;
+ const instanceSizeInWords = Convert<intptr>(map.instance_size_in_words);
+ return instanceSizeInWords == kVariableSizeSentinel ||
+ instanceSizeInWords * kTaggedSize == sizeInBytes;
+}
- macro InitializeFieldsFromIterator<T: type, Iterator: type>(
- target: Slice<T>, originIterator: Iterator) {
- let targetIterator = target.Iterator();
- let originIterator = originIterator;
- while (true) {
- const ref:&T = targetIterator.NextReference() otherwise break;
- * ref = originIterator.Next() otherwise unreachable;
- }
- }
- // Dummy implementations: do not initialize for UninitializedIterator.
- InitializeFieldsFromIterator<char8, UninitializedIterator>(
- _target: Slice<char8>, _originIterator: UninitializedIterator) {}
- InitializeFieldsFromIterator<char16, UninitializedIterator>(
- _target: Slice<char16>, _originIterator: UninitializedIterator) {}
-
- extern macro IsDoubleHole(HeapObject, intptr): bool;
- extern macro StoreDoubleHole(HeapObject, intptr);
-
- macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
- return float64_or_hole{
- is_hole: IsDoubleHole(r.object, r.offset - kHeapObjectTag),
- value: * UnsafeNewReference<float64>(r.object, r.offset)
- };
+type UninitializedHeapObject extends HeapObject;
+
+extern macro AllocateAllowLOS(intptr): UninitializedHeapObject;
+extern macro GetInstanceTypeMap(constexpr InstanceType): Map;
+
+macro Allocate(sizeInBytes: intptr, map: Map): UninitializedHeapObject {
+ assert(ValidAllocationSize(sizeInBytes, map));
+ return AllocateAllowLOS(sizeInBytes);
+}
+
+macro InitializeFieldsFromIterator<T: type, Iterator: type>(
+ target: Slice<T>, originIterator: Iterator) {
+ let targetIterator = target.Iterator();
+ let originIterator = originIterator;
+ while (true) {
+ const ref:&T = targetIterator.NextReference() otherwise break;
+ * ref = originIterator.Next() otherwise unreachable;
}
- macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) {
- if (value.is_hole) {
- StoreDoubleHole(r.object, r.offset - kHeapObjectTag);
- } else {
- * UnsafeNewReference<float64>(r.object, r.offset) = value.value;
- }
+}
+// Dummy implementations: do not initialize for UninitializedIterator.
+InitializeFieldsFromIterator<char8, UninitializedIterator>(
+ _target: Slice<char8>, _originIterator: UninitializedIterator) {}
+InitializeFieldsFromIterator<char16, UninitializedIterator>(
+ _target: Slice<char16>, _originIterator: UninitializedIterator) {}
+
+extern macro IsDoubleHole(HeapObject, intptr): bool;
+extern macro StoreDoubleHole(HeapObject, intptr);
+
+macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
+ return float64_or_hole{
+ is_hole: IsDoubleHole(r.object, r.offset - kHeapObjectTag),
+ value: * unsafe::NewReference<float64>(r.object, r.offset)
+ };
+}
+macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) {
+ if (value.is_hole) {
+ StoreDoubleHole(r.object, r.offset - kHeapObjectTag);
+ } else {
+ * unsafe::NewReference<float64>(r.object, r.offset) = value.value;
}
+}
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index e5398fc50a..ec51efc00a 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -5,452 +5,431 @@
#include 'src/builtins/builtins-constructor-gen.h'
namespace typed_array {
- extern builtin IterableToListMayPreserveHoles(Context, Object, Callable):
- JSArray;
-
- extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- implicit context: Context)(uintptr): JSArrayBuffer;
- extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
- extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
- implicit context: Context)(JSTypedArray): JSFunction;
- extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
- JSTypedArray): void;
-
- extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
- Map, String): never;
-
- transitioning macro AllocateTypedArray(implicit context: Context)(
- isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
- byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
- let elements: ByteArray;
- if constexpr (isOnHeap) {
- elements = AllocateByteArray(byteLength);
- } else {
- elements = kEmptyByteArray;
-
- // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
- // platforms are self-limiting, because we can't allocate an array bigger
- // than our 32-bit arithmetic range anyway. 64 bit platforms could
- // theoretically have an offset up to 2^35 - 1.
- const backingStore: uintptr = Convert<uintptr>(buffer.backing_store);
-
- // Assert no overflow has occurred. Only assert if the mock array buffer
- // allocator is NOT used. When the mock array buffer is used, impossibly
- // large allocations are allowed that would erroneously cause an overflow
- // and this assertion to fail.
- assert(
- IsMockArrayBufferAllocatorFlag() ||
- (backingStore + byteOffset) >= backingStore);
- }
-
- // We can't just build the new object with "new JSTypedArray" here because
- // Torque doesn't know its full size including embedder fields, so use CSA
- // for the allocation step.
- const typedArray =
- UnsafeCast<JSTypedArray>(AllocateFastOrSlowJSObjectFromMap(map));
- typedArray.elements = elements;
- typedArray.buffer = buffer;
- typedArray.byte_offset = byteOffset;
- typedArray.byte_length = byteLength;
- typedArray.length = length;
- if constexpr (isOnHeap) {
- typed_array::SetJSTypedArrayOnHeapDataPtr(
- typedArray, elements, byteOffset);
- } else {
- typed_array::SetJSTypedArrayOffHeapDataPtr(
- typedArray, buffer.backing_store, byteOffset);
- assert(
- typedArray.data_ptr ==
- (buffer.backing_store + Convert<intptr>(byteOffset)));
- }
- SetupTypedArrayEmbedderFields(typedArray);
- return typedArray;
+extern builtin IterableToListMayPreserveHoles(
+ Context, Object, Callable): JSArray;
+
+extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
+ implicit context: Context)(uintptr): JSArrayBuffer;
+extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
+extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
+ implicit context: Context)(JSTypedArray): JSFunction;
+extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
+ JSTypedArray): void;
+
+extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
+ Map, String): never;
+
+transitioning macro AllocateTypedArray(implicit context: Context)(
+ isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
+ byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
+ let elements: ByteArray;
+ if constexpr (isOnHeap) {
+ elements = AllocateByteArray(byteLength);
+ } else {
+ elements = kEmptyByteArray;
+
+ // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
+ // platforms are self-limiting, because we can't allocate an array bigger
+ // than our 32-bit arithmetic range anyway. 64 bit platforms could
+ // theoretically have an offset up to 2^35 - 1.
+ const backingStore: uintptr = Convert<uintptr>(buffer.backing_store_ptr);
+
+ // Assert no overflow has occurred. Only assert if the mock array buffer
+ // allocator is NOT used. When the mock array buffer is used, impossibly
+ // large allocations are allowed that would erroneously cause an overflow
+ // and this assertion to fail.
+ assert(
+ IsMockArrayBufferAllocatorFlag() ||
+ (backingStore + byteOffset) >= backingStore);
}
- transitioning macro TypedArrayInitialize(implicit context: Context)(
- initialize: constexpr bool, map: Map, length: uintptr,
- elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): JSTypedArray labels IfRangeError {
- const byteLength = elementsInfo.CalculateByteLength(length)
- otherwise IfRangeError;
- const byteLengthNum = Convert<Number>(byteLength);
- const defaultConstructor = GetArrayBufferFunction();
- const byteOffset: uintptr = 0;
-
- try {
- if (bufferConstructor != defaultConstructor) {
- goto AttachOffHeapBuffer(ConstructWithTarget(
- defaultConstructor, bufferConstructor, byteLengthNum));
- }
+ // We can't just build the new object with "new JSTypedArray" here because
+ // Torque doesn't know its full size including embedder fields, so use CSA
+ // for the allocation step.
+ const typedArray =
+ UnsafeCast<JSTypedArray>(AllocateFastOrSlowJSObjectFromMap(map));
+ typedArray.elements = elements;
+ typedArray.buffer = buffer;
+ typedArray.byte_offset = byteOffset;
+ typedArray.byte_length = byteLength;
+ typedArray.length = length;
+ if constexpr (isOnHeap) {
+ typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
+ } else {
+ typed_array::SetJSTypedArrayOffHeapDataPtr(
+ typedArray, buffer.backing_store_ptr, byteOffset);
+ assert(
+ typedArray.data_ptr ==
+ (buffer.backing_store_ptr + Convert<intptr>(byteOffset)));
+ }
+ SetupTypedArrayEmbedderFields(typedArray);
+ return typedArray;
+}
- if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap;
+transitioning macro TypedArrayInitialize(implicit context: Context)(
+ initialize: constexpr bool, map: Map, length: uintptr,
+ elementsInfo: typed_array::TypedArrayElementsInfo,
+ bufferConstructor: JSReceiver): JSTypedArray labels IfRangeError {
+ const byteLength = elementsInfo.CalculateByteLength(length)
+ otherwise IfRangeError;
+ const byteLengthNum = Convert<Number>(byteLength);
+ const defaultConstructor = GetArrayBufferFunction();
+ const byteOffset: uintptr = 0;
+
+ try {
+ if (bufferConstructor != defaultConstructor) {
+ goto AttachOffHeapBuffer(ConstructWithTarget(
+ defaultConstructor, bufferConstructor, byteLengthNum));
+ }
- const buffer = AllocateEmptyOnHeapBuffer(byteLength);
+ if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap;
- const isOnHeap: constexpr bool = true;
- const typedArray = AllocateTypedArray(
- isOnHeap, map, buffer, byteOffset, byteLength, length);
+ const buffer = AllocateEmptyOnHeapBuffer(byteLength);
- if constexpr (initialize) {
- const backingStore = typedArray.data_ptr;
- typed_array::CallCMemset(backingStore, 0, byteLength);
- }
+ const isOnHeap: constexpr bool = true;
+ const typedArray = AllocateTypedArray(
+ isOnHeap, map, buffer, byteOffset, byteLength, length);
- return typedArray;
+ if constexpr (initialize) {
+ const backingStore = typedArray.data_ptr;
+ typed_array::CallCMemset(backingStore, 0, byteLength);
}
- label AllocateOffHeap {
- if constexpr (initialize) {
- goto AttachOffHeapBuffer(Construct(defaultConstructor, byteLengthNum));
- } else {
- goto AttachOffHeapBuffer(Call(
- context, GetArrayBufferNoInitFunction(), Undefined, byteLengthNum));
- }
- }
- label AttachOffHeapBuffer(bufferObj: Object) {
- const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
- const isOnHeap: constexpr bool = false;
- return AllocateTypedArray(
- isOnHeap, map, buffer, byteOffset, byteLength, length);
+
+ return typedArray;
+ } label AllocateOffHeap {
+ if constexpr (initialize) {
+ goto AttachOffHeapBuffer(Construct(defaultConstructor, byteLengthNum));
+ } else {
+ goto AttachOffHeapBuffer(Call(
+ context, GetArrayBufferNoInitFunction(), Undefined, byteLengthNum));
}
+ } label AttachOffHeapBuffer(bufferObj: Object) {
+ const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
+ const isOnHeap: constexpr bool = false;
+ return AllocateTypedArray(
+ isOnHeap, map, buffer, byteOffset, byteLength, length);
}
+}
- // 22.2.4.2 TypedArray ( length )
- // ES #sec-typedarray-length
- transitioning macro ConstructByLength(implicit context: Context)(
- map: Map, lengthObj: JSAny,
- elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
- try {
- const length: uintptr = ToIndex(lengthObj) otherwise RangeError;
- const defaultConstructor: Constructor = GetArrayBufferFunction();
- const initialize: constexpr bool = true;
- return TypedArrayInitialize(
- initialize, map, length, elementsInfo, defaultConstructor)
- otherwise RangeError;
- }
- label RangeError deferred {
- ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, lengthObj);
- }
+// 22.2.4.2 TypedArray ( length )
+// ES #sec-typedarray-length
+transitioning macro ConstructByLength(implicit context: Context)(
+ map: Map, lengthObj: JSAny,
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
+ try {
+ const length: uintptr = ToIndex(lengthObj) otherwise RangeError;
+ const defaultConstructor: Constructor = GetArrayBufferFunction();
+ const initialize: constexpr bool = true;
+ return TypedArrayInitialize(
+ initialize, map, length, elementsInfo, defaultConstructor)
+ otherwise RangeError;
+ } label RangeError deferred {
+ ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, lengthObj);
}
+}
+
+// 22.2.4.4 TypedArray ( object )
+// ES #sec-typedarray-object
+transitioning macro ConstructByArrayLike(implicit context: Context)(
+ map: Map, arrayLike: HeapObject, length: uintptr,
+ elementsInfo: typed_array::TypedArrayElementsInfo,
+ bufferConstructor: JSReceiver): JSTypedArray {
+ try {
+ const initialize: constexpr bool = false;
+ const typedArray = TypedArrayInitialize(
+ initialize, map, length, elementsInfo, bufferConstructor)
+ otherwise RangeError;
- // 22.2.4.4 TypedArray ( object )
- // ES #sec-typedarray-object
- transitioning macro ConstructByArrayLike(implicit context: Context)(
- map: Map, arrayLike: HeapObject, length: uintptr,
- elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): JSTypedArray {
try {
- const initialize: constexpr bool = false;
- const typedArray = TypedArrayInitialize(
- initialize, map, length, elementsInfo, bufferConstructor)
- otherwise RangeError;
-
- try {
- const src: JSTypedArray =
- Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
-
- if (IsDetachedBuffer(src.buffer)) {
- ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
-
- } else if (src.elements_kind != elementsInfo.kind) {
- goto IfSlow;
-
- } else if (length > 0) {
- const byteLength = typedArray.byte_length;
- assert(byteLength <= kArrayBufferMaxByteLength);
- typed_array::CallCMemcpy(
- typedArray.data_ptr, src.data_ptr, byteLength);
- }
+ const src: JSTypedArray = Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
+
+ if (IsDetachedBuffer(src.buffer)) {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
+
+ } else if (src.elements_kind != elementsInfo.kind) {
+ goto IfSlow;
+
+ } else if (length > 0) {
+ const byteLength = typedArray.byte_length;
+ assert(byteLength <= kArrayBufferMaxByteLength);
+ typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength);
}
- label IfSlow deferred {
- if (length > 0) {
- TypedArrayCopyElements(
- context, typedArray, arrayLike, Convert<Number>(length));
- }
+ } label IfSlow deferred {
+ if (length > 0) {
+ TypedArrayCopyElements(
+ context, typedArray, arrayLike, Convert<Number>(length));
}
- return typedArray;
- }
- label RangeError deferred {
- ThrowRangeError(
- MessageTemplate::kInvalidTypedArrayLength, Convert<Number>(length));
}
+ return typedArray;
+ } label RangeError deferred {
+ ThrowRangeError(
+ MessageTemplate::kInvalidTypedArrayLength, Convert<Number>(length));
}
+}
- // 22.2.4.4 TypedArray ( object )
- // ES #sec-typedarray-object
- transitioning macro ConstructByIterable(implicit context: Context)(
- iterable: JSReceiver, iteratorFn: Callable): never
- labels IfConstructByArrayLike(JSArray, uintptr, JSReceiver) {
- const array: JSArray =
- IterableToListMayPreserveHoles(context, iterable, iteratorFn);
- // Max JSArray length is a valid JSTypedArray length so we just use it.
- goto IfConstructByArrayLike(
- array, array.length_uintptr, GetArrayBufferFunction());
- }
+// 22.2.4.4 TypedArray ( object )
+// ES #sec-typedarray-object
+transitioning macro ConstructByIterable(implicit context: Context)(
+ iterable: JSReceiver, iteratorFn: Callable): never
+ labels IfConstructByArrayLike(JSArray, uintptr, JSReceiver) {
+ const array: JSArray =
+ IterableToListMayPreserveHoles(context, iterable, iteratorFn);
+ // Max JSArray length is a valid JSTypedArray length so we just use it.
+ goto IfConstructByArrayLike(
+ array, array.length_uintptr, GetArrayBufferFunction());
+}
- // 22.2.4.3 TypedArray ( typedArray )
- // ES #sec-typedarray-typedarray
- transitioning macro ConstructByTypedArray(implicit context: Context)(
- srcTypedArray: JSTypedArray): never
- labels IfConstructByArrayLike(JSTypedArray, uintptr, JSReceiver) {
- let bufferConstructor: JSReceiver = GetArrayBufferFunction();
- const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
+// 22.2.4.3 TypedArray ( typedArray )
+// ES #sec-typedarray-typedarray
+transitioning macro ConstructByTypedArray(implicit context: Context)(
+ srcTypedArray: JSTypedArray): never
+ labels IfConstructByArrayLike(JSTypedArray, uintptr, JSReceiver) {
+ let bufferConstructor: JSReceiver = GetArrayBufferFunction();
+ const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
+ // TODO(petermarshall): Throw on detached typedArray.
+ let length: uintptr = IsDetachedBuffer(srcBuffer) ? 0 : srcTypedArray.length;
+
+ // The spec requires that constructing a typed array using a SAB-backed
+ // typed array use the ArrayBuffer constructor, not the species constructor.
+ // See https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
+ if (!IsSharedArrayBuffer(srcBuffer)) {
+ bufferConstructor = SpeciesConstructor(srcBuffer, bufferConstructor);
// TODO(petermarshall): Throw on detached typedArray.
- let length: uintptr =
- IsDetachedBuffer(srcBuffer) ? 0 : srcTypedArray.length;
-
- // The spec requires that constructing a typed array using a SAB-backed
- // typed array use the ArrayBuffer constructor, not the species constructor.
- // See https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
- if (!IsSharedArrayBuffer(srcBuffer)) {
- bufferConstructor = SpeciesConstructor(srcBuffer, bufferConstructor);
- // TODO(petermarshall): Throw on detached typedArray.
- if (IsDetachedBuffer(srcBuffer)) length = 0;
- }
- goto IfConstructByArrayLike(srcTypedArray, length, bufferConstructor);
+ if (IsDetachedBuffer(srcBuffer)) length = 0;
}
+ goto IfConstructByArrayLike(srcTypedArray, length, bufferConstructor);
+}
- // 22.2.4.5 TypedArray ( buffer, byteOffset, length )
- // ES #sec-typedarray-buffer-byteoffset-length
- transitioning macro ConstructByArrayBuffer(implicit context: Context)(
- map: Map, buffer: JSArrayBuffer, byteOffset: JSAny, length: JSAny,
- elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
- try {
- // 6. Let offset be ? ToIndex(byteOffset).
- const offset: uintptr = ToIndex(byteOffset) otherwise IfInvalidOffset;
+// 22.2.4.5 TypedArray ( buffer, byteOffset, length )
+// ES #sec-typedarray-buffer-byteoffset-length
+transitioning macro ConstructByArrayBuffer(implicit context: Context)(
+ map: Map, buffer: JSArrayBuffer, byteOffset: JSAny, length: JSAny,
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
+ try {
+ // 6. Let offset be ? ToIndex(byteOffset).
+ const offset: uintptr = ToIndex(byteOffset) otherwise IfInvalidOffset;
+
+ // 7. If offset modulo elementSize ā‰  0, throw a RangeError exception.
+ if (elementsInfo.IsUnaligned(offset)) {
+ goto IfInvalidAlignment('start offset');
+ }
- // 7. If offset modulo elementSize ā‰  0, throw a RangeError exception.
- if (elementsInfo.IsUnaligned(offset)) {
- goto IfInvalidAlignment('start offset');
- }
+ // 8. If length is present and length is not undefined, then
+ // a. Let newLength be ? ToIndex(length).
+ let newLength: uintptr = ToIndex(length) otherwise IfInvalidLength;
+ let newByteLength: uintptr;
- // 8. If length is present and length is not undefined, then
- // a. Let newLength be ? ToIndex(length).
- let newLength: uintptr = ToIndex(length) otherwise IfInvalidLength;
- let newByteLength: uintptr;
+ // 9. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (IsDetachedBuffer(buffer)) {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
+ }
- // 9. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct');
- }
+ // 10. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
+ const bufferByteLength: uintptr = buffer.byte_length;
- // 10. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
- const bufferByteLength: uintptr = buffer.byte_length;
-
- // 11. If length is either not present or undefined, then
- if (length == Undefined) {
- // a. If bufferByteLength modulo elementSize ā‰  0, throw a RangeError
- // exception.
- if (elementsInfo.IsUnaligned(bufferByteLength)) {
- goto IfInvalidAlignment('byte length');
- }
-
- // b. Let newByteLength be bufferByteLength - offset.
- // c. If newByteLength < 0, throw a RangeError exception.
- if (bufferByteLength < offset) goto IfInvalidOffset;
-
- // Spec step 16 length calculated here to avoid recalculating the length
- // in the step 12 branch.
- newByteLength = bufferByteLength - offset;
- newLength = elementsInfo.CalculateLength(newByteLength)
- otherwise IfInvalidOffset;
-
- // 12. Else,
- } else {
- // a. Let newByteLength be newLength Ɨ elementSize.
- newByteLength = elementsInfo.CalculateByteLength(newLength)
- otherwise IfInvalidLength;
-
- // b. If offset + newByteLength > bufferByteLength, throw a RangeError
- // exception.
- if ((bufferByteLength < newByteLength) ||
- (offset > bufferByteLength - newByteLength))
- goto IfInvalidLength;
+ // 11. If length is either not present or undefined, then
+ if (length == Undefined) {
+ // a. If bufferByteLength modulo elementSize ā‰  0, throw a RangeError
+ // exception.
+ if (elementsInfo.IsUnaligned(bufferByteLength)) {
+ goto IfInvalidAlignment('byte length');
}
- const isOnHeap: constexpr bool = false;
- return AllocateTypedArray(
- isOnHeap, map, buffer, offset, newByteLength, newLength);
- }
- label IfInvalidAlignment(problemString: String) deferred {
- ThrowInvalidTypedArrayAlignment(map, problemString);
- }
- label IfInvalidLength deferred {
- ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
- }
- label IfInvalidOffset deferred {
- ThrowRangeError(MessageTemplate::kInvalidOffset, byteOffset);
- }
- }
+ // b. Let newByteLength be bufferByteLength - offset.
+ // c. If newByteLength < 0, throw a RangeError exception.
+ if (bufferByteLength < offset) goto IfInvalidOffset;
- // 22.2.4.6 TypedArrayCreate ( constructor, argumentList )
- // ES #typedarray-create
- @export
- transitioning macro TypedArrayCreateByLength(implicit context: Context)(
- constructor: Constructor, length: Number, methodName: constexpr string):
- JSTypedArray {
- assert(IsSafeInteger(length));
+ // Spec step 16 length calculated here to avoid recalculating the length
+ // in the step 12 branch.
+ newByteLength = bufferByteLength - offset;
+ newLength = elementsInfo.CalculateLength(newByteLength)
+ otherwise IfInvalidOffset;
- // 1. Let newTypedArray be ? Construct(constructor, argumentList).
- const newTypedArrayObj = Construct(constructor, length);
+ // 12. Else,
+ } else {
+ // a. Let newByteLength be newLength Ɨ elementSize.
+ newByteLength = elementsInfo.CalculateByteLength(newLength)
+ otherwise IfInvalidLength;
+
+ // b. If offset + newByteLength > bufferByteLength, throw a RangeError
+ // exception.
+ if ((bufferByteLength < newByteLength) ||
+ (offset > bufferByteLength - newByteLength))
+ goto IfInvalidLength;
+ }
- // 2. Perform ? ValidateTypedArray(newTypedArray).
- // ValidateTypedArray currently returns the array, not the ViewBuffer.
- const newTypedArray: JSTypedArray =
- ValidateTypedArray(context, newTypedArrayObj, methodName);
+ const isOnHeap: constexpr bool = false;
+ return AllocateTypedArray(
+ isOnHeap, map, buffer, offset, newByteLength, newLength);
+ } label IfInvalidAlignment(problemString: String) deferred {
+ ThrowInvalidTypedArrayAlignment(map, problemString);
+ } label IfInvalidLength deferred {
+ ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
+ } label IfInvalidOffset deferred {
+ ThrowRangeError(MessageTemplate::kInvalidOffset, byteOffset);
+ }
+}
- if (IsDetachedBuffer(newTypedArray.buffer)) deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, methodName);
- }
+// 22.2.4.6 TypedArrayCreate ( constructor, argumentList )
+// ES #typedarray-create
+@export
+transitioning macro TypedArrayCreateByLength(implicit context: Context)(
+ constructor: Constructor, length: Number, methodName: constexpr string):
+ JSTypedArray {
+ assert(IsSafeInteger(length));
- // 3. If argumentList is a List of a single Number, then
- // a. If newTypedArray.[[ArrayLength]] < argumentList[0], throw a
- // TypeError exception.
- if (newTypedArray.length < Convert<uintptr>(length)) deferred {
- ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
- }
+ // 1. Let newTypedArray be ? Construct(constructor, argumentList).
+ const newTypedArrayObj = Construct(constructor, length);
- // 4. Return newTypedArray.
- return newTypedArray;
- }
+ // 2. Perform ? ValidateTypedArray(newTypedArray).
+ // ValidateTypedArray currently returns the array, not the ViewBuffer.
+ const newTypedArray: JSTypedArray =
+ ValidateTypedArray(context, newTypedArrayObj, methodName);
- transitioning macro ConstructByJSReceiver(implicit context:
- Context)(obj: JSReceiver): never
- labels IfConstructByArrayLike(JSReceiver, uintptr, JSReceiver) {
- try {
- // TODO(v8:8906): Use iterator::GetIteratorMethod() once it supports
- // labels.
- const iteratorMethod = GetMethod(obj, IteratorSymbolConstant())
- otherwise IfIteratorUndefined, IfIteratorNotCallable;
- ConstructByIterable(obj, iteratorMethod)
- otherwise IfConstructByArrayLike;
- }
- label IfIteratorUndefined {
- const lengthObj: JSAny = GetProperty(obj, kLengthString);
- const lengthNumber: Number = ToLength_Inline(lengthObj);
- // Throw RangeError here if the length does not fit in uintptr because
- // such a length will not pass bounds checks in ConstructByArrayLike()
- // anyway.
- const length: uintptr = ChangeSafeIntegerNumberToUintPtr(lengthNumber)
- otherwise goto IfInvalidLength(lengthNumber);
- goto IfConstructByArrayLike(obj, length, GetArrayBufferFunction());
- }
- label IfInvalidLength(length: Number) {
- ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
+ if (IsDetachedBuffer(newTypedArray.buffer)) deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, methodName);
}
- label IfIteratorNotCallable(_value: JSAny) deferred {
- ThrowTypeError(MessageTemplate::kIteratorSymbolNonCallable);
+
+ // 3. If argumentList is a List of a single Number, then
+ // a. If newTypedArray.[[ArrayLength]] < argumentList[0], throw a
+ // TypeError exception.
+ if (newTypedArray.length < Convert<uintptr>(length)) deferred {
+ ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
}
- }
- // 22.2.4 The TypedArray Constructors
- // ES #sec-typedarray-constructors
- transitioning builtin CreateTypedArray(
- context: Context, target: JSFunction, newTarget: JSReceiver, arg1: JSAny,
- arg2: JSAny, arg3: JSAny): JSTypedArray {
- assert(IsConstructor(target));
- // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
- // "%TypedArrayPrototype%").
- const map = GetDerivedMap(target, newTarget);
+ // 4. Return newTypedArray.
+ return newTypedArray;
+}
- // 5. Let elementSize be the Number value of the Element Size value in Table
- // 56 for constructorName.
- const elementsInfo = GetTypedArrayElementsInfo(map);
+transitioning macro ConstructByJSReceiver(implicit context: Context)(
+ obj: JSReceiver): never
+ labels IfConstructByArrayLike(JSReceiver, uintptr, JSReceiver) {
+ try {
+ // TODO(v8:8906): Use iterator::GetIteratorMethod() once it supports
+ // labels.
+ const iteratorMethod = GetMethod(obj, IteratorSymbolConstant())
+ otherwise IfIteratorUndefined, IfIteratorNotCallable;
+ ConstructByIterable(obj, iteratorMethod)
+ otherwise IfConstructByArrayLike;
+ } label IfIteratorUndefined {
+ const lengthObj: JSAny = GetProperty(obj, kLengthString);
+ const lengthNumber: Number = ToLength_Inline(lengthObj);
+ // Throw RangeError here if the length does not fit in uintptr because
+ // such a length will not pass bounds checks in ConstructByArrayLike()
+ // anyway.
+ const length: uintptr = ChangeSafeIntegerNumberToUintPtr(lengthNumber)
+ otherwise goto IfInvalidLength(lengthNumber);
+ goto IfConstructByArrayLike(obj, length, GetArrayBufferFunction());
+ } label IfInvalidLength(length: Number) {
+ ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
+ } label IfIteratorNotCallable(_value: JSAny) deferred {
+ ThrowTypeError(MessageTemplate::kIteratorSymbolNonCallable);
+ }
+}
- try {
- typeswitch (arg1) {
- case (length: Smi): {
- goto IfConstructByLength(length);
- }
- case (buffer: JSArrayBuffer): {
- return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo);
- }
- case (typedArray: JSTypedArray): {
- ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
- }
- case (obj: JSReceiver): {
- ConstructByJSReceiver(obj) otherwise IfConstructByArrayLike;
- }
- // The first argument was a number or fell through and is treated as
- // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
- case (lengthObj: JSAny): {
- goto IfConstructByLength(lengthObj);
- }
+// 22.2.4 The TypedArray Constructors
+// ES #sec-typedarray-constructors
+transitioning builtin CreateTypedArray(
+ context: Context, target: JSFunction, newTarget: JSReceiver, arg1: JSAny,
+ arg2: JSAny, arg3: JSAny): JSTypedArray {
+ assert(IsConstructor(target));
+ // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
+ // "%TypedArrayPrototype%").
+ const map = GetDerivedMap(target, newTarget);
+
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
+
+ try {
+ typeswitch (arg1) {
+ case (length: Smi): {
+ goto IfConstructByLength(length);
+ }
+ case (buffer: JSArrayBuffer): {
+ return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo);
+ }
+ case (typedArray: JSTypedArray): {
+ ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
+ }
+ case (obj: JSReceiver): {
+ ConstructByJSReceiver(obj) otherwise IfConstructByArrayLike;
+ }
+ // The first argument was a number or fell through and is treated as
+ // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
+ case (lengthObj: JSAny): {
+ goto IfConstructByLength(lengthObj);
}
}
- label IfConstructByLength(length: JSAny) {
- return ConstructByLength(map, length, elementsInfo);
- }
- label IfConstructByArrayLike(
- arrayLike: JSReceiver, length: uintptr, bufferConstructor: JSReceiver) {
- return ConstructByArrayLike(
- map, arrayLike, length, elementsInfo, bufferConstructor);
- }
+ } label IfConstructByLength(length: JSAny) {
+ return ConstructByLength(map, length, elementsInfo);
+ } label IfConstructByArrayLike(
+ arrayLike: JSReceiver, length: uintptr, bufferConstructor: JSReceiver) {
+ return ConstructByArrayLike(
+ map, arrayLike, length, elementsInfo, bufferConstructor);
}
+}
- transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
- methodName: constexpr string, numArgs: constexpr int31,
- exemplar: JSTypedArray, arg0: JSAny, arg1: JSAny,
- arg2: JSAny): JSTypedArray {
- const defaultConstructor = GetDefaultConstructor(exemplar);
+transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
+ methodName: constexpr string, numArgs: constexpr int31,
+ exemplar: JSTypedArray, arg0: JSAny, arg1: JSAny,
+ arg2: JSAny): JSTypedArray {
+ const defaultConstructor = GetDefaultConstructor(exemplar);
- try {
- if (!IsPrototypeTypedArrayPrototype(exemplar.map)) goto IfSlow;
- if (IsTypedArraySpeciesProtectorCellInvalid()) goto IfSlow;
+ try {
+ if (!IsPrototypeTypedArrayPrototype(exemplar.map)) goto IfSlow;
+ if (IsTypedArraySpeciesProtectorCellInvalid()) goto IfSlow;
- const typedArray = CreateTypedArray(
- context, defaultConstructor, defaultConstructor, arg0, arg1, arg2);
+ const typedArray = CreateTypedArray(
+ context, defaultConstructor, defaultConstructor, arg0, arg1, arg2);
- // It is assumed that the CreateTypedArray builtin does not produce a
- // typed array that fails ValidateTypedArray
- assert(!IsDetachedBuffer(typedArray.buffer));
+ // It is assumed that the CreateTypedArray builtin does not produce a
+ // typed array that fails ValidateTypedArray
+ assert(!IsDetachedBuffer(typedArray.buffer));
- return typedArray;
+ return typedArray;
+ } label IfSlow deferred {
+ const constructor =
+ Cast<Constructor>(SpeciesConstructor(exemplar, defaultConstructor))
+ otherwise unreachable;
+
+ // TODO(pwong): Simplify and remove numArgs when varargs are supported in
+ // macros.
+ let newObj: JSAny = Undefined;
+ if constexpr (numArgs == 1) {
+ newObj = Construct(constructor, arg0);
+ } else {
+ assert(numArgs == 3);
+ newObj = Construct(constructor, arg0, arg1, arg2);
}
- label IfSlow deferred {
- const constructor =
- Cast<Constructor>(SpeciesConstructor(exemplar, defaultConstructor))
- otherwise unreachable;
-
- // TODO(pwong): Simplify and remove numArgs when varargs are supported in
- // macros.
- let newObj: JSAny = Undefined;
- if constexpr (numArgs == 1) {
- newObj = Construct(constructor, arg0);
- } else {
- assert(numArgs == 3);
- newObj = Construct(constructor, arg0, arg1, arg2);
- }
- return ValidateTypedArray(context, newObj, methodName);
- }
+ return ValidateTypedArray(context, newObj, methodName);
}
+}
- @export
- transitioning macro TypedArraySpeciesCreateByLength(implicit context:
- Context)(
- methodName: constexpr string, exemplar: JSTypedArray, length: uintptr):
- JSTypedArray {
- const numArgs: constexpr int31 = 1;
- // TODO(v8:4153): pass length further as uintptr.
- const typedArray: JSTypedArray = TypedArraySpeciesCreate(
- methodName, numArgs, exemplar, Convert<Number>(length), Undefined,
- Undefined);
- if (typedArray.length < length) deferred {
- ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
- }
- return typedArray;
- }
+@export
+transitioning macro TypedArraySpeciesCreateByLength(implicit context: Context)(
+ methodName: constexpr string, exemplar: JSTypedArray, length: uintptr):
+ JSTypedArray {
+ const numArgs: constexpr int31 = 1;
+ // TODO(v8:4153): pass length further as uintptr.
+ const typedArray: JSTypedArray = TypedArraySpeciesCreate(
+ methodName, numArgs, exemplar, Convert<Number>(length), Undefined,
+ Undefined);
+ if (typedArray.length < length) deferred {
+ ThrowTypeError(MessageTemplate::kTypedArrayTooShort);
+ }
+ return typedArray;
+}
- transitioning macro TypedArraySpeciesCreateByBuffer(implicit context:
- Context)(
- methodName: constexpr string, exemplar: JSTypedArray,
- buffer: JSArrayBuffer, beginByteOffset: uintptr,
- newLength: uintptr): JSTypedArray {
- const numArgs: constexpr int31 = 3;
- // TODO(v8:4153): pass length further as uintptr.
- const typedArray: JSTypedArray = TypedArraySpeciesCreate(
- methodName, numArgs, exemplar, buffer, Convert<Number>(beginByteOffset),
- Convert<Number>(newLength));
- return typedArray;
- }
+transitioning macro TypedArraySpeciesCreateByBuffer(implicit context: Context)(
+ methodName: constexpr string, exemplar: JSTypedArray, buffer: JSArrayBuffer,
+ beginByteOffset: uintptr, newLength: uintptr): JSTypedArray {
+ const numArgs: constexpr int31 = 3;
+ // TODO(v8:4153): pass length further as uintptr.
+ const typedArray: JSTypedArray = TypedArraySpeciesCreate(
+ methodName, numArgs, exemplar, buffer, Convert<Number>(beginByteOffset),
+ Convert<Number>(newLength));
+ return typedArray;
+}
}
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index cba688244b..fdd4961dee 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -5,52 +5,49 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every';
+const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every';
- transitioning macro EveryAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Boolean {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const result = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
- witness.GetStable());
- if (!ToBoolean(result)) {
- return False;
- }
+transitioning macro EveryAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ thisArg: JSAny): Boolean {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ for (let k: uintptr = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result = Call(
+ context, callbackfn, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
+ if (!ToBoolean(result)) {
+ return False;
}
- return True;
}
+ return True;
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
- transitioning javascript builtin
- TypedArrayPrototypeEvery(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = thisArg
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
+transitioning javascript builtin
+TypedArrayPrototypeEvery(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const thisArg = arguments[1];
- return EveryAllElements(uarray, callbackfn, thisArg);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEvery);
- }
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEvery);
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ return EveryAllElements(uarray, callbackfn, thisArg);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEvery);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEvery);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index dd3b4900dc..15d40f92eb 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -3,85 +3,81 @@
// found in the LICENSE file.
namespace typed_array {
- const kBuiltinNameFilter: constexpr string = '%TypedArray%.prototype.filter';
+const kBuiltinNameFilter: constexpr string = '%TypedArray%.prototype.filter';
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter
- transitioning javascript builtin TypedArrayPrototypeFilter(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = thisArg
- try {
- // 1. Let O be the this value.
- // 2. Perform ? ValidateTypedArray(O).
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise ThrowTypeError(
- MessageTemplate::kNotTypedArray, kBuiltinNameFilter);
- const src = typed_array::EnsureAttached(array) otherwise IsDetached;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter
+transitioning javascript builtin TypedArrayPrototypeFilter(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg
+ try {
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise ThrowTypeError(
+ MessageTemplate::kNotTypedArray, kBuiltinNameFilter);
+ const src = typed_array::EnsureAttached(array) otherwise IsDetached;
- // 3. Let len be O.[[ArrayLength]].
- const len: uintptr = src.length;
+ // 3. Let len be O.[[ArrayLength]].
+ const len: uintptr = src.length;
- // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
- const callbackfn = Cast<Callable>(arguments[0])
- otherwise ThrowTypeError(
- MessageTemplate::kCalledNonCallable, arguments[0]);
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ const callbackfn = Cast<Callable>(arguments[0])
+ otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- // 5. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg: JSAny = arguments[1];
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ const thisArg: JSAny = arguments[1];
- // 6. Let kept be a new empty List.
- // TODO(v8:4153): Support huge TypedArrays here. (growable fixed arrays
- // can't be longer than kMaxSmiValue).
- let kept = growable_fixed_array::NewGrowableFixedArray();
- let witness = typed_array::NewAttachedJSTypedArrayWitness(src);
+ // 6. Let kept be a new empty List.
+ // TODO(v8:4153): Support huge TypedArrays here. (growable fixed arrays
+ // can't be longer than kMaxSmiValue).
+ let kept = growable_fixed_array::NewGrowableFixedArray();
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(src);
- // 7. Let k be 0.
- // 8. Let captured be 0.
- // 9. Repeat, while k < len
- for (let k: uintptr = 0; k < len; k++) {
- witness.Recheck() otherwise IsDetached;
+ // 7. Let k be 0.
+ // 8. Let captured be 0.
+ // 9. Repeat, while k < len
+ for (let k: uintptr = 0; k < len; k++) {
+ witness.Recheck() otherwise IsDetached;
- // a. Let Pk be ! ToString(k).
- // b. Let kValue be ? Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // a. Let Pk be ! ToString(k).
+ // b. Let kValue be ? Get(O, Pk).
+ const value: JSAny = witness.Load(k);
- // c. Let selected be ToBoolean(? Call(callbackfn, T, Ā« kValue, k, O
- // Ā»)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const selected: JSAny = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
- witness.GetStable());
+ // c. Let selected be ToBoolean(? Call(callbackfn, T, Ā« kValue, k, O
+ // Ā»)).
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const selected: JSAny = Call(
+ context, callbackfn, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
- // d. If selected is true, then
- // i. Append kValue to the end of kept.
- // ii. Increase captured by 1.
- if (ToBoolean(selected)) kept.Push(value);
+ // d. If selected is true, then
+ // i. Append kValue to the end of kept.
+ // ii. Increase captured by 1.
+ if (ToBoolean(selected)) kept.Push(value);
- // e.Increase k by 1.
- }
+ // e.Increase k by 1.
+ }
- // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
- const typedArray: JSTypedArray = TypedArraySpeciesCreateByLength(
- kBuiltinNameFilter, array, Unsigned(kept.length));
+ // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
+ const typedArray: JSTypedArray = TypedArraySpeciesCreateByLength(
+ kBuiltinNameFilter, array, Unsigned(kept.length));
- // 11. Let n be 0.
- // 12. For each element e of kept, do
- // a. Perform ! Set(A, ! ToString(n), e, true).
- // b. Increment n by 1.
- // TODO(v8:4153): Consider passing growable typed array directly to
- // TypedArrayCopyElements() to avoid JSArray materialization. Or collect
- // indices instead of values the loop above.
- const lengthNumber = Convert<Number>(Unsigned(kept.length));
- TypedArrayCopyElements(
- context, typedArray, kept.ToJSArray(), lengthNumber);
+ // 11. Let n be 0.
+ // 12. For each element e of kept, do
+ // a. Perform ! Set(A, ! ToString(n), e, true).
+ // b. Increment n by 1.
+ // TODO(v8:4153): Consider passing growable typed array directly to
+ // TypedArrayCopyElements() to avoid JSArray materialization. Or collect
+ // indices instead of values the loop above.
+ const lengthNumber = Convert<Number>(Unsigned(kept.length));
+ TypedArrayCopyElements(context, typedArray, kept.ToJSArray(), lengthNumber);
- // 13. Return A.
- return typedArray;
- }
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFilter);
- }
+ // 13. Return A.
+ return typedArray;
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFilter);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index ef4954274b..24a13dbc23 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -5,52 +5,49 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameFind: constexpr string = '%TypedArray%.prototype.find';
+const kBuiltinNameFind: constexpr string = '%TypedArray%.prototype.find';
- transitioning macro FindAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): JSAny {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const result = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
- witness.GetStable());
- if (ToBoolean(result)) {
- return value;
- }
+transitioning macro FindAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ thisArg: JSAny): JSAny {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ for (let k: uintptr = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result = Call(
+ context, callbackfn, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
+ if (ToBoolean(result)) {
+ return value;
}
- return Undefined;
}
+ return Undefined;
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
- transitioning javascript builtin
- TypedArrayPrototypeFind(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = thisArg
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
+transitioning javascript builtin
+TypedArrayPrototypeFind(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const thisArg = arguments[1];
- return FindAllElements(uarray, callbackfn, thisArg);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFind);
- }
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFind);
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ return FindAllElements(uarray, callbackfn, thisArg);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFind);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFind);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index 5af9aede29..7bb01151f3 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -5,56 +5,50 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameFindIndex: constexpr string =
- '%TypedArray%.prototype.findIndex';
+const kBuiltinNameFindIndex: constexpr string =
+ '%TypedArray%.prototype.findIndex';
- transitioning macro FindIndexAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Number {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const indexNumber: Number = Convert<Number>(k);
- const result = Call(
- context, callbackfn, thisArg, value, indexNumber,
- witness.GetStable());
- if (ToBoolean(result)) {
- return indexNumber;
- }
+transitioning macro FindIndexAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ thisArg: JSAny): Number {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ for (let k: uintptr = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const indexNumber: Number = Convert<Number>(k);
+ const result = Call(
+ context, callbackfn, thisArg, value, indexNumber, witness.GetStable());
+ if (ToBoolean(result)) {
+ return indexNumber;
}
- return -1;
}
+ return -1;
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex
- transitioning javascript builtin
- TypedArrayPrototypeFindIndex(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = thisArg.
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex
+transitioning javascript builtin
+TypedArrayPrototypeFindIndex(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg.
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const thisArg = arguments[1];
- return FindIndexAllElements(uarray, callbackfn, thisArg);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindIndex);
- }
- label IsDetached deferred {
- ThrowTypeError(
- MessageTemplate::kDetachedOperation, kBuiltinNameFindIndex);
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ return FindIndexAllElements(uarray, callbackfn, thisArg);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindIndex);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFindIndex);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index dd13417456..d696d9c8dd 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -5,52 +5,47 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameForEach: constexpr string =
- '%TypedArray%.prototype.forEach';
+const kBuiltinNameForEach: constexpr string = '%TypedArray%.prototype.forEach';
- transitioning macro ForEachAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Undefined {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
- witness.GetStable());
- }
- return Undefined;
+transitioning macro ForEachAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ thisArg: JSAny): Undefined {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ for (let k: uintptr = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ Call(
+ context, callbackfn, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
}
+ return Undefined;
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
- transitioning javascript builtin
- TypedArrayPrototypeForEach(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): Undefined {
- // arguments[0] = callback
- // arguments[1] = this_arg.
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
+transitioning javascript builtin
+TypedArrayPrototypeForEach(js-implicit context: NativeContext, receiver: JSAny)(
+ ...arguments): Undefined {
+ // arguments[0] = callback
+ // arguments[1] = this_arg.
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const thisArg = arguments[1];
- return ForEachAllElements(uarray, callbackfn, thisArg);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameForEach);
- }
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameForEach);
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ return ForEachAllElements(uarray, callbackfn, thisArg);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameForEach);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameForEach);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-from.tq b/deps/v8/src/builtins/typed-array-from.tq
index a0c2fa72a8..56d4d1d6cf 100644
--- a/deps/v8/src/builtins/typed-array-from.tq
+++ b/deps/v8/src/builtins/typed-array-from.tq
@@ -5,190 +5,179 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameFrom: constexpr string = '%TypedArray%.from';
+const kBuiltinNameFrom: constexpr string = '%TypedArray%.from';
+
+type BuiltinsName extends int31 constexpr 'Builtins::Name';
+const kTypedArrayPrototypeValues: constexpr BuiltinsName
+ generates 'Builtins::kTypedArrayPrototypeValues';
+
+extern builtin IterableToList(implicit context: Context)(JSAny, JSAny): JSArray;
+
+// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
+// https://tc39.github.io/ecma262/#sec-%typedarray%.from
+transitioning javascript builtin
+TypedArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(
+ ...arguments): JSTypedArray {
+ try {
+ const source: JSAny = arguments[0];
+ const mapfnObj: JSAny = arguments[1];
+ const thisArg = arguments[2];
+
+ // 1. Let C be the this value.
+ // 2. If IsConstructor(C) is false, throw a TypeError exception.
+ const constructor = Cast<Constructor>(receiver) otherwise NotConstructor;
+
+ // 3. If mapfn is undefined, then let mapping be false.
+ // 4. Else,
+ // a. If IsCallable(mapfn) is false, throw a TypeError exception.
+ // b. Let mapping be true.
+ const mapping: bool = mapfnObj != Undefined;
+ if (mapping && !Is<Callable>(mapfnObj)) deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfnObj);
+ }
- type BuiltinsName extends int31 constexpr 'Builtins::Name';
- const kTypedArrayPrototypeValues: constexpr BuiltinsName
- generates 'Builtins::kTypedArrayPrototypeValues';
+ // We split up this builtin differently to the way it is written in the
+ // spec. We already have great code in the elements accessor for copying
+ // from a JSArray into a TypedArray, so we use that when possible. We only
+ // avoid calling into the elements accessor when we have a mapping
+ // function, because we can't handle that. Here, presence of a mapping
+ // function is the slow path. We also combine the two different loops in
+ // the specification (starting at 7.e and 13) because they are essentially
+ // identical. We also save on code-size this way.
- extern builtin IterableToList(implicit context: Context)(JSAny, JSAny):
- JSArray;
+ let finalLength: uintptr;
+ let finalSource: JSAny;
- // %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
- // https://tc39.github.io/ecma262/#sec-%typedarray%.from
- transitioning javascript builtin
- TypedArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSTypedArray {
try {
- const source: JSAny = arguments[0];
-
- // 1. Let C be the this value.
- // 2. If IsConstructor(C) is false, throw a TypeError exception.
- const constructor = Cast<Constructor>(receiver) otherwise NotConstructor;
-
- // 3. If mapfn is present and mapfn is not undefined, then
- // a. If IsCallable(mapfn) is false, throw a TypeError exception.
- // b. Let mapping be true.
- // 4. Else, let mapping be false.
- const mapping: bool = arguments.length > 1;
- const mapfnObj: JSAny = mapping ? arguments[1] : Undefined;
- if (mapping && !TaggedIsCallable(mapfnObj)) deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfnObj);
- }
-
- // 5. If thisArg is present, let T be thisArg; else let T be undefined.
- const thisArg = arguments.length > 2 ? arguments[2] : Undefined;
-
- // We split up this builtin differently to the way it is written in the
- // spec. We already have great code in the elements accessor for copying
- // from a JSArray into a TypedArray, so we use that when possible. We only
- // avoid calling into the elements accessor when we have a mapping
- // function, because we can't handle that. Here, presence of a mapping
- // function is the slow path. We also combine the two different loops in
- // the specification (starting at 7.e and 13) because they are essentially
- // identical. We also save on code-size this way.
-
- let finalLength: uintptr;
- let finalSource: JSAny;
+ // 5. Let usingIterator be ? GetMethod(source, @@iterator).
+ // TODO(v8:8906): Use iterator::GetIteratorMethod() once it supports
+ // labels.
+ const usingIterator = GetMethod(source, IteratorSymbolConstant())
+ otherwise IteratorIsUndefined, IteratorNotCallable;
try {
- // 6. Let usingIterator be ? GetMethod(source, @@iterator).
- // TODO(v8:8906): Use iterator::GetIteratorMethod() once it supports
- // labels.
- const usingIterator = GetMethod(source, IteratorSymbolConstant())
- otherwise IteratorIsUndefined, IteratorNotCallable;
-
- try {
- // TypedArrays have iterators, so normally we would go through the
- // IterableToList case below, which would convert the TypedArray to a
- // JSArray (boxing the values if they won't fit in a Smi).
- //
- // However, if we can guarantee that the source object has the
- // built-in iterator and that the %ArrayIteratorPrototype%.next method
- // has not been overridden, then we know the behavior of the iterator:
- // returning the values in the TypedArray sequentially from index 0 to
- // length-1.
- //
- // In this case, we can avoid creating the intermediate array and the
- // associated HeapNumbers, and use the fast path in
- // TypedArrayCopyElements which uses the same ordering as the default
- // iterator.
- //
- // Drop through to the default check_iterator behavior if any of these
- // checks fail.
- const sourceTypedArray =
- Cast<JSTypedArray>(source) otherwise UseUserProvidedIterator;
- const sourceBuffer = sourceTypedArray.buffer;
- if (IsDetachedBuffer(sourceBuffer)) goto UseUserProvidedIterator;
-
- // Check that the iterator function is exactly
- // Builtins::kTypedArrayPrototypeValues.
- const iteratorFn =
- Cast<JSFunction>(usingIterator) otherwise UseUserProvidedIterator;
- if (!TaggedEqual(
- iteratorFn.shared_function_info.function_data,
- SmiConstant(kTypedArrayPrototypeValues)))
- goto UseUserProvidedIterator;
-
- // Check that the ArrayIterator prototype's "next" method hasn't been
- // overridden.
- if (IsArrayIteratorProtectorCellInvalid())
- goto UseUserProvidedIterator;
-
- // Source is a TypedArray with unmodified iterator behavior. Use the
- // source object directly, taking advantage of the special-case code
- // in TypedArrayCopyElements
- finalLength = sourceTypedArray.length;
- finalSource = source;
- }
- label UseUserProvidedIterator {
- // 7. If usingIterator is not undefined, then
- // a. Let values be ? IterableToList(source, usingIterator).
- // b. Let len be the number of elements in values.
- const values: JSArray = IterableToList(source, usingIterator);
-
- finalLength = Convert<uintptr>(values.length);
- finalSource = values;
- }
- }
- label IteratorIsUndefined {
- // 8. NOTE: source is not an Iterable so assume it is already an
- // array-like object.
-
- // 9. Let arrayLike be ! ToObject(source).
- const arrayLike: JSReceiver = ToObject_Inline(context, source);
-
- // 10. Let len be ? ToLength(? Get(arrayLike, "length")).
- const length = GetLengthProperty(arrayLike);
-
- try {
- finalLength = ChangeSafeIntegerNumberToUintPtr(length)
- otherwise IfInvalidLength;
- finalSource = arrayLike;
- }
- label IfInvalidLength deferred {
- ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
- }
- }
- label IteratorNotCallable(_value: JSAny) deferred {
- ThrowTypeError(MessageTemplate::kIteratorSymbolNonCallable);
+ // TypedArrays have iterators, so normally we would go through the
+ // IterableToList case below, which would convert the TypedArray to a
+ // JSArray (boxing the values if they won't fit in a Smi).
+ //
+ // However, if we can guarantee that the source object has the
+ // built-in iterator and that the %ArrayIteratorPrototype%.next method
+ // has not been overridden, then we know the behavior of the iterator:
+ // returning the values in the TypedArray sequentially from index 0 to
+ // length-1.
+ //
+ // In this case, we can avoid creating the intermediate array and the
+ // associated HeapNumbers, and use the fast path in
+ // TypedArrayCopyElements which uses the same ordering as the default
+ // iterator.
+ //
+ // Drop through to the default check_iterator behavior if any of these
+ // checks fail.
+ const sourceTypedArray =
+ Cast<JSTypedArray>(source) otherwise UseUserProvidedIterator;
+ const sourceBuffer = sourceTypedArray.buffer;
+ if (IsDetachedBuffer(sourceBuffer)) goto UseUserProvidedIterator;
+
+ // Check that the iterator function is exactly
+ // Builtins::kTypedArrayPrototypeValues.
+ const iteratorFn =
+ Cast<JSFunction>(usingIterator) otherwise UseUserProvidedIterator;
+ if (!TaggedEqual(
+ iteratorFn.shared_function_info.function_data,
+ SmiConstant(kTypedArrayPrototypeValues)))
+ goto UseUserProvidedIterator;
+
+ // Check that the ArrayIterator prototype's "next" method hasn't been
+ // overridden.
+ if (IsArrayIteratorProtectorCellInvalid()) goto UseUserProvidedIterator;
+
+ // Source is a TypedArray with unmodified iterator behavior. Use the
+ // source object directly, taking advantage of the special-case code
+ // in TypedArrayCopyElements
+ finalLength = sourceTypedArray.length;
+ finalSource = source;
+ } label UseUserProvidedIterator {
+ // 6. If usingIterator is not undefined, then
+ // a. Let values be ? IterableToList(source, usingIterator).
+ // b. Let len be the number of elements in values.
+ const values: JSArray = IterableToList(source, usingIterator);
+
+ finalLength = Convert<uintptr>(values.length);
+ finalSource = values;
}
+ } label IteratorIsUndefined {
+ // 7. NOTE: source is not an Iterable so assume it is already an
+ // array-like object.
- const finalLengthNum = Convert<Number>(finalLength);
+ // 8. Let arrayLike be ! ToObject(source).
+ const arrayLike: JSReceiver = ToObject_Inline(context, source);
- // 7c/11. Let targetObj be ? TypedArrayCreate(C, Ā«lenĀ»).
- const targetObj = TypedArrayCreateByLength(
- constructor, finalLengthNum, kBuiltinNameFrom);
+ // 9. Let len be ? LengthOfArrayLike(arrayLike).
+ const length = GetLengthProperty(arrayLike);
- if (!mapping) {
- // Fast path.
- if (finalLength != 0) {
- // Call runtime.
- TypedArrayCopyElements(
- context, targetObj, finalSource, finalLengthNum);
- }
- return targetObj;
+ try {
+ finalLength = ChangeSafeIntegerNumberToUintPtr(length)
+ otherwise IfInvalidLength;
+ finalSource = arrayLike;
+ } label IfInvalidLength deferred {
+ ThrowRangeError(MessageTemplate::kInvalidTypedArrayLength, length);
}
- // Slow path.
-
- const mapfn: Callable = Cast<Callable>(mapfnObj) otherwise unreachable;
- const accessor: TypedArrayAccessor =
- GetTypedArrayAccessor(targetObj.elements_kind);
-
- // 7d-7e and 12-13.
- // 12. Let k be 0.
- // 13. Repeat, while k < len
- for (let k: uintptr = 0; k < finalLength; k++) {
- // 13a. Let Pk be ! ToString(k).
- const kNum = Convert<Number>(k);
-
- // 13b. Let kValue be ? Get(arrayLike, Pk).
- const kValue: JSAny = GetProperty(finalSource, kNum);
-
- let mappedValue: JSAny;
- // 13c. If mapping is true, then
- if (mapping) {
- // i. Let mappedValue be ? Call(mapfn, T, Ā« kValue, k Ā»).
- mappedValue = Call(context, mapfn, thisArg, kValue, kNum);
- } else {
- // 13d. Else, let mappedValue be kValue.
- mappedValue = kValue;
- }
-
- // 13e. Perform ? Set(targetObj, Pk, mappedValue, true).
- // Buffer may be detached during executing ToNumber/ToBigInt.
- accessor.StoreJSAny(context, targetObj, k, mappedValue)
- otherwise IfDetached;
-
- // 13f. Set k to k + 1. (done by the loop).
+ } label IteratorNotCallable(_value: JSAny) deferred {
+ ThrowTypeError(MessageTemplate::kIteratorSymbolNonCallable);
+ }
+
+ const finalLengthNum = Convert<Number>(finalLength);
+
+ // 6c/10. Let targetObj be ? TypedArrayCreate(C, Ā«lenĀ»).
+ const targetObj =
+ TypedArrayCreateByLength(constructor, finalLengthNum, kBuiltinNameFrom);
+
+ if (!mapping) {
+ // Fast path.
+ if (finalLength != 0) {
+ // Call runtime.
+ TypedArrayCopyElements(context, targetObj, finalSource, finalLengthNum);
}
return targetObj;
}
- label NotConstructor deferred {
- ThrowTypeError(MessageTemplate::kNotConstructor, receiver);
- }
- label IfDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFrom);
+ // Slow path.
+
+ const mapfn: Callable = Cast<Callable>(mapfnObj) otherwise unreachable;
+ const accessor: TypedArrayAccessor =
+ GetTypedArrayAccessor(targetObj.elements_kind);
+
+ // 6d-6e and 11-12.
+ // 11. Let k be 0.
+ // 12. Repeat, while k < len
+ for (let k: uintptr = 0; k < finalLength; k++) {
+ // 12a. Let Pk be ! ToString(k).
+ const kNum = Convert<Number>(k);
+
+ // 12b. Let kValue be ? Get(arrayLike, Pk).
+ const kValue: JSAny = GetProperty(finalSource, kNum);
+
+ let mappedValue: JSAny;
+ // 12c. If mapping is true, then
+ if (mapping) {
+ // i. Let mappedValue be ? Call(mapfn, T, Ā« kValue, k Ā»).
+ mappedValue = Call(context, mapfn, thisArg, kValue, kNum);
+ } else {
+ // 12d. Else, let mappedValue be kValue.
+ mappedValue = kValue;
+ }
+
+ // 12e. Perform ? Set(targetObj, Pk, mappedValue, true).
+ // Buffer may be detached during executing ToNumber/ToBigInt.
+ accessor.StoreJSAny(context, targetObj, k, mappedValue)
+ otherwise IfDetached;
+
+ // 12f. Set k to k + 1. (done by the loop).
}
+ return targetObj;
+ } label NotConstructor deferred {
+ ThrowTypeError(MessageTemplate::kNotConstructor, receiver);
+ } label IfDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFrom);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-of.tq b/deps/v8/src/builtins/typed-array-of.tq
index c65deeeb15..b5d42ef9a2 100644
--- a/deps/v8/src/builtins/typed-array-of.tq
+++ b/deps/v8/src/builtins/typed-array-of.tq
@@ -5,52 +5,50 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameOf: constexpr string = '%TypedArray%.of';
-
- // %TypedArray%.of ( ...items )
- // https://tc39.github.io/ecma262/#sec-%typedarray%.of
- transitioning javascript builtin
- TypedArrayOf(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSTypedArray {
- try {
- // 1. Let len be the actual number of arguments passed to this function.
- const len: uintptr = Unsigned(arguments.length);
-
- // 2. Let items be the List of arguments passed to this function.
-
- // 3. Let C be the this value.
- // 4. If IsConstructor(C) is false, throw a TypeError exception.
- const constructor = Cast<Constructor>(receiver) otherwise NotConstructor;
-
- // 5. Let newObj be ? TypedArrayCreate(C, len).
- const newObj = TypedArrayCreateByLength(
- constructor, Convert<Number>(len), kBuiltinNameOf);
-
- const accessor: TypedArrayAccessor =
- GetTypedArrayAccessor(newObj.elements_kind);
-
- // 6. Let k be 0.
- // 7. Repeat, while k < len
- for (let k: uintptr = 0; k < len; k++) {
- // 7a. Let kValue be items[k].
- const kValue: JSAny = arguments[Signed(k)];
-
- // 7b. Let Pk be ! ToString(k).
- // 7c. Perform ? Set(newObj, Pk, kValue, true).
- // Buffer may be detached during executing ToNumber/ToBigInt.
- accessor.StoreJSAny(context, newObj, k, kValue) otherwise IfDetached;
-
- // 7d. Increase k by 1. (done by the loop).
- }
-
- // 8. Return newObj.
- return newObj;
- }
- label NotConstructor deferred {
- ThrowTypeError(MessageTemplate::kNotConstructor, receiver);
- }
- label IfDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameOf);
+const kBuiltinNameOf: constexpr string = '%TypedArray%.of';
+
+// %TypedArray%.of ( ...items )
+// https://tc39.github.io/ecma262/#sec-%typedarray%.of
+transitioning javascript builtin
+TypedArrayOf(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
+ JSTypedArray {
+ try {
+ // 1. Let len be the actual number of arguments passed to this function.
+ const len: uintptr = Unsigned(arguments.length);
+
+ // 2. Let items be the List of arguments passed to this function.
+
+ // 3. Let C be the this value.
+ // 4. If IsConstructor(C) is false, throw a TypeError exception.
+ const constructor = Cast<Constructor>(receiver) otherwise NotConstructor;
+
+ // 5. Let newObj be ? TypedArrayCreate(C, len).
+ const newObj = TypedArrayCreateByLength(
+ constructor, Convert<Number>(len), kBuiltinNameOf);
+
+ const accessor: TypedArrayAccessor =
+ GetTypedArrayAccessor(newObj.elements_kind);
+
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ for (let k: uintptr = 0; k < len; k++) {
+ // 7a. Let kValue be items[k].
+ const kValue: JSAny = arguments[Signed(k)];
+
+ // 7b. Let Pk be ! ToString(k).
+ // 7c. Perform ? Set(newObj, Pk, kValue, true).
+ // Buffer may be detached during executing ToNumber/ToBigInt.
+ accessor.StoreJSAny(context, newObj, k, kValue) otherwise IfDetached;
+
+ // 7d. Increase k by 1. (done by the loop).
}
+
+ // 8. Return newObj.
+ return newObj;
+ } label NotConstructor deferred {
+ ThrowTypeError(MessageTemplate::kNotConstructor, receiver);
+ } label IfDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameOf);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index a2ff53f65c..a54ed1040e 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -5,65 +5,61 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameReduce: constexpr string = '%TypedArray%.prototype.reduce';
+const kBuiltinNameReduce: constexpr string = '%TypedArray%.prototype.reduce';
- transitioning macro ReduceAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- initialValue: JSAny|TheHole): JSAny {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- let accumulator = initialValue;
- for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- typeswitch (accumulator) {
- case (TheHole): {
- accumulator = value;
- }
- case (accumulatorNotHole: JSAny): {
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- accumulator = Call(
- context, callbackfn, Undefined, accumulatorNotHole, value,
- Convert<Number>(k), witness.GetStable());
- }
- }
- }
+transitioning macro ReduceAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ initialValue: JSAny|TheHole): JSAny {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ let accumulator = initialValue;
+ for (let k: uintptr = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
typeswitch (accumulator) {
case (TheHole): {
- ThrowTypeError(MessageTemplate::kReduceNoInitial, kBuiltinNameReduce);
+ accumulator = value;
}
- case (accumulator: JSAny): {
- return accumulator;
+ case (accumulatorNotHole: JSAny): {
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulatorNotHole, value,
+ Convert<Number>(k), witness.GetStable());
}
}
}
-
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce
- transitioning javascript builtin
- TypedArrayPrototypeReduce(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = initialValue.
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
-
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
- return ReduceAllElements(uarray, callbackfn, initialValue);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduce);
+ typeswitch (accumulator) {
+ case (TheHole): {
+ ThrowTypeError(MessageTemplate::kReduceNoInitial, kBuiltinNameReduce);
}
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameReduce);
+ case (accumulator: JSAny): {
+ return accumulator;
}
}
}
+
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce
+transitioning javascript builtin
+TypedArrayPrototypeReduce(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = initialValue.
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
+ return ReduceAllElements(uarray, callbackfn, initialValue);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduce);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameReduce);
+ }
+}
+}
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index ab334a1b86..9ba2f70de4 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -5,69 +5,65 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameReduceRight: constexpr string =
- '%TypedArray%.prototype.reduceRight';
+const kBuiltinNameReduceRight: constexpr string =
+ '%TypedArray%.prototype.reduceRight';
- transitioning macro ReduceRightAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- initialValue: JSAny|TheHole): JSAny {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- let accumulator = initialValue;
- for (let k: uintptr = length; k-- > 0;) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- typeswitch (accumulator) {
- case (TheHole): {
- accumulator = value;
- }
- case (accumulatorNotHole: JSAny): {
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- accumulator = Call(
- context, callbackfn, Undefined, accumulatorNotHole, value,
- Convert<Number>(k), witness.GetStable());
- }
- }
- }
+transitioning macro ReduceRightAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ initialValue: JSAny|TheHole): JSAny {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ let accumulator = initialValue;
+ for (let k: uintptr = length; k-- > 0;) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
typeswitch (accumulator) {
case (TheHole): {
- ThrowTypeError(
- MessageTemplate::kReduceNoInitial, kBuiltinNameReduceRight);
+ accumulator = value;
}
- case (accumulator: JSAny): {
- return accumulator;
+ case (accumulatorNotHole: JSAny): {
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ accumulator = Call(
+ context, callbackfn, Undefined, accumulatorNotHole, value,
+ Convert<Number>(k), witness.GetStable());
}
}
}
+ typeswitch (accumulator) {
+ case (TheHole): {
+ ThrowTypeError(
+ MessageTemplate::kReduceNoInitial, kBuiltinNameReduceRight);
+ }
+ case (accumulator: JSAny): {
+ return accumulator;
+ }
+ }
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
- transitioning javascript builtin
- TypedArrayPrototypeReduceRight(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = initialValue.
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
+transitioning javascript builtin
+TypedArrayPrototypeReduceRight(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = initialValue.
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
- return ReduceRightAllElements(uarray, callbackfn, initialValue);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduceRight);
- }
- label IsDetached deferred {
- ThrowTypeError(
- MessageTemplate::kDetachedOperation, kBuiltinNameReduceRight);
- }
+ return ReduceRightAllElements(uarray, callbackfn, initialValue);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduceRight);
+ } label IsDetached deferred {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation, kBuiltinNameReduceRight);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index bb70692e76..b5c9dcb261 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -5,315 +5,306 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameSet: constexpr string = '%TypedArray%.prototype.set';
-
- extern runtime TypedArraySet(Context, JSTypedArray, Object, Number, Number):
- void;
-
- extern macro
- TypedArrayBuiltinsAssembler::CallCCopyFastNumberJSArrayElementsToTypedArray(
- Context,
- FastJSArray, // source
- AttachedJSTypedArray, // dest
- uintptr, // sourceLength
- uintptr // destOffset
- ): void;
-
- extern macro
- TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsToTypedArray(
- AttachedJSTypedArray, // source
- AttachedJSTypedArray, // dest
- uintptr, // sourceLength
- uintptr // destOffset
- ): void;
-
- // %TypedArray%.prototype.set ( overloaded [ , offset ] )
- // https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-overloaded-offset
- transitioning javascript builtin
- TypedArrayPrototypeSet(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // Steps 2-8 are the same for
- // %TypedArray%.prototype.set ( array [ , offset ] ) and
- // %TypedArray%.prototype.set ( typedArray [ , offset ] ) overloads.
-
- let target: JSTypedArray;
- try {
- // 2. Let target be the this value.
- // 3. Perform ? RequireInternalSlot(target, [[TypedArrayName]]).
- // 4. Assert: target has a [[ViewedArrayBuffer]] internal slot.
- target = Cast<JSTypedArray>(receiver) otherwise NotTypedArray;
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSet);
- }
-
- try {
- // 5. Let targetOffset be ? ToInteger(offset).
- // 6. If targetOffset < 0, throw a RangeError exception.
- let targetOffsetOverflowed: bool = false;
- let targetOffset: uintptr = 0;
- if (arguments.length > 1) {
- const offsetArg = arguments[1];
- try {
- targetOffset = ToUintPtr(offsetArg)
- // On values less than zero throw RangeError immediately.
- otherwise OffsetOutOfBounds,
- // On UintPtr or SafeInteger range overflow throw RangeError after
- // performing observable steps to follow the spec.
- OffsetOverflow, OffsetOverflow;
- }
- label OffsetOverflow {
- targetOffsetOverflowed = true;
- }
- } else {
- // If the offset argument is not provided then the targetOffset is 0.
- }
-
- // 7. Let targetBuffer be target.[[ViewedArrayBuffer]].
- // 8. If IsDetachedBuffer(targetBuffer) is true, throw a TypeError
- // exception.
- const utarget = typed_array::EnsureAttached(target) otherwise IsDetached;
+const kBuiltinNameSet: constexpr string = '%TypedArray%.prototype.set';
+
+extern runtime TypedArraySet(
+ Context, JSTypedArray, Object, Number, Number): void;
+
+extern macro
+TypedArrayBuiltinsAssembler::CallCCopyFastNumberJSArrayElementsToTypedArray(
+ Context,
+ FastJSArray, // source
+ AttachedJSTypedArray, // dest
+ uintptr, // sourceLength
+ uintptr // destOffset
+ ): void;
+
+extern macro
+TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsToTypedArray(
+ AttachedJSTypedArray, // source
+ AttachedJSTypedArray, // dest
+ uintptr, // sourceLength
+ uintptr // destOffset
+ ): void;
+
+// %TypedArray%.prototype.set ( overloaded [ , offset ] )
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-overloaded-offset
+transitioning javascript builtin
+TypedArrayPrototypeSet(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // Steps 2-8 are the same for
+ // %TypedArray%.prototype.set ( array [ , offset ] ) and
+ // %TypedArray%.prototype.set ( typedArray [ , offset ] ) overloads.
+
+ let target: JSTypedArray;
+ try {
+ // 2. Let target be the this value.
+ // 3. Perform ? RequireInternalSlot(target, [[TypedArrayName]]).
+ // 4. Assert: target has a [[ViewedArrayBuffer]] internal slot.
+ target = Cast<JSTypedArray>(receiver) otherwise NotTypedArray;
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSet);
+ }
- const overloadedArg = arguments[0];
+ try {
+ // 5. Let targetOffset be ? ToInteger(offset).
+ // 6. If targetOffset < 0, throw a RangeError exception.
+ let targetOffsetOverflowed: bool = false;
+ let targetOffset: uintptr = 0;
+ if (arguments.length > 1) {
+ const offsetArg = arguments[1];
try {
- // 1. Choose 22.2.3.23.2 or 22.2.3.23.1 depending on whether the
- // overloadedArg has a [[TypedArrayName]] internal slot.
- // If it does, the definition in 22.2.3.23.2 applies.
- // If it does not, the definition in 22.2.3.23.1 applies.
- const typedArray =
- Cast<JSTypedArray>(overloadedArg) otherwise NotTypedArray;
-
- // Step 9 is not observable, do it later.
-
- // 10. Let srcBuffer be typedArray.[[ViewedArrayBuffer]].
- // 11. If IsDetachedBuffer(srcBuffer) is true, throw a TypeError
- // exception.
- const utypedArray =
- typed_array::EnsureAttached(typedArray) otherwise IsDetached;
-
- TypedArrayPrototypeSetTypedArray(
- utarget, utypedArray, targetOffset, targetOffsetOverflowed)
- otherwise OffsetOutOfBounds;
- return Undefined;
- }
- label NotTypedArray deferred {
- TypedArrayPrototypeSetArray(
- utarget, overloadedArg, targetOffset, targetOffsetOverflowed)
- otherwise OffsetOutOfBounds, IsDetached;
- return Undefined;
+ targetOffset = ToUintPtr(offsetArg)
+ // On values less than zero throw RangeError immediately.
+ otherwise OffsetOutOfBounds,
+ // On UintPtr or SafeInteger range overflow throw RangeError after
+ // performing observable steps to follow the spec.
+ OffsetOverflow, OffsetOverflow;
+ } label OffsetOverflow {
+ targetOffsetOverflowed = true;
}
+ } else {
+ // If the offset argument is not provided then the targetOffset is 0.
}
- label OffsetOutOfBounds deferred {
- ThrowRangeError(MessageTemplate::kTypedArraySetOffsetOutOfBounds);
- }
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSet);
- }
- }
- // %TypedArray%.prototype.set ( array [ , offset ] )
- // https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-array-offset
- transitioning macro
- TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
- target: JSTypedArray, arrayArg: JSAny, targetOffset: uintptr,
- targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds,
- IfDetached {
- // Steps 9-13 are not observable, do them later.
-
- // TODO(v8:8906): This ported behaviour is an observable spec violation and
- // the comment below seems to be outdated. Consider removing this code.
+ // 7. Let targetBuffer be target.[[ViewedArrayBuffer]].
+ // 8. If IsDetachedBuffer(targetBuffer) is true, throw a TypeError
+ // exception.
+ const utarget = typed_array::EnsureAttached(target) otherwise IsDetached;
+
+ const overloadedArg = arguments[0];
try {
- const _arrayArgNum = Cast<Number>(arrayArg) otherwise NotNumber;
- // For number as a first argument, throw TypeError instead of silently
- // ignoring the call, so that users know they did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- ThrowTypeError(MessageTemplate::kInvalidArgument);
- }
- label NotNumber {
- // Proceed to step 14.
- }
+ // 1. Choose 22.2.3.23.2 or 22.2.3.23.1 depending on whether the
+ // overloadedArg has a [[TypedArrayName]] internal slot.
+ // If it does, the definition in 22.2.3.23.2 applies.
+ // If it does not, the definition in 22.2.3.23.1 applies.
+ const typedArray =
+ Cast<JSTypedArray>(overloadedArg) otherwise NotTypedArray;
- // 14. Let src be ? ToObject(array).
- const src: JSReceiver = ToObject_Inline(context, arrayArg);
+ // Step 9 is not observable, do it later.
- // 15. Let srcLength be ? LengthOfArrayLike(src).
- const srcLengthNum: Number = GetLengthProperty(src);
+ // 10. Let srcBuffer be typedArray.[[ViewedArrayBuffer]].
+ // 11. If IsDetachedBuffer(srcBuffer) is true, throw a TypeError
+ // exception.
+ const utypedArray =
+ typed_array::EnsureAttached(typedArray) otherwise IsDetached;
+
+ TypedArrayPrototypeSetTypedArray(
+ utarget, utypedArray, targetOffset, targetOffsetOverflowed)
+ otherwise OffsetOutOfBounds;
+ return Undefined;
+ } label NotTypedArray deferred {
+ TypedArrayPrototypeSetArray(
+ utarget, overloadedArg, targetOffset, targetOffsetOverflowed)
+ otherwise OffsetOutOfBounds, IsDetached;
+ return Undefined;
+ }
+ } label OffsetOutOfBounds deferred {
+ ThrowRangeError(MessageTemplate::kTypedArraySetOffsetOutOfBounds);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSet);
+ }
+}
- if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
+// %TypedArray%.prototype.set ( array [ , offset ] )
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-array-offset
+transitioning macro
+TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
+ target: JSTypedArray, arrayArg: JSAny, targetOffset: uintptr,
+ targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds,
+ IfDetached {
+ // Steps 9-13 are not observable, do them later.
+
+ // TODO(v8:8906): This ported behaviour is an observable spec violation and
+ // the comment below seems to be outdated. Consider removing this code.
+ try {
+ const _arrayArgNum = Cast<Number>(arrayArg) otherwise NotNumber;
+ // For number as a first argument, throw TypeError instead of silently
+ // ignoring the call, so that users know they did something wrong.
+ // (Consistent with Firefox and Blink/WebKit)
+ ThrowTypeError(MessageTemplate::kInvalidArgument);
+ } label NotNumber {
+ // Proceed to step 14.
+ }
- // 9. Let targetLength be target.[[ArrayLength]].
- const targetLength = target.length;
+ // 14. Let src be ? ToObject(array).
+ const src: JSReceiver = ToObject_Inline(context, arrayArg);
+
+ // 15. Let srcLength be ? LengthOfArrayLike(src).
+ const srcLengthNum: Number = GetLengthProperty(src);
+
+ if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
+
+ // 9. Let targetLength be target.[[ArrayLength]].
+ const targetLength = target.length;
+
+ // 16. If srcLength + targetOffset > targetLength, throw a RangeError
+ // exception.
+ const srcLength = ChangeSafeIntegerNumberToUintPtr(srcLengthNum)
+ otherwise IfOffsetOutOfBounds;
+ CheckIntegerIndexAdditionOverflow(srcLength, targetOffset, targetLength)
+ otherwise IfOffsetOutOfBounds;
+
+ // All the obvervable side effects are executed, so there's nothing else
+ // to do with the empty source array.
+ if (srcLength == 0) return;
+
+ // 10. Let targetName be the String value of target.[[TypedArrayName]].
+ // 11. Let targetElementSize be the Element Size value specified in
+ // Table 62 for targetName.
+ // 12. Let targetType be the Element Type value in Table 62 for
+ // targetName.
+
+ try {
+ // BigInt typed arrays are not handled by
+ // CopyFastNumberJSArrayElementsToTypedArray.
+ if (IsBigInt64ElementsKind(target.elements_kind)) goto IfSlow;
+
+ const fastSrc: FastJSArray = Cast<FastJSArray>(src) otherwise goto IfSlow;
+ const srcKind: ElementsKind = fastSrc.map.elements_kind;
+
+ // CopyFastNumberJSArrayElementsToTypedArray() can be used only with the
+ // following elements kinds:
+ // PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS,
+ // HOLEY_DOUBLE_ELEMENTS.
+ if (IsElementsKindInRange(
+ srcKind, ElementsKind::PACKED_SMI_ELEMENTS,
+ ElementsKind::HOLEY_SMI_ELEMENTS) ||
+ IsElementsKindInRange(
+ srcKind, ElementsKind::PACKED_DOUBLE_ELEMENTS,
+ ElementsKind::HOLEY_DOUBLE_ELEMENTS)) {
+ const utarget = typed_array::EnsureAttached(target) otherwise IfDetached;
+ CallCCopyFastNumberJSArrayElementsToTypedArray(
+ context, fastSrc, utarget, srcLength, targetOffset);
+
+ } else {
+ goto IfSlow;
+ }
+ } label IfSlow deferred {
+ TypedArraySet(
+ context, target, src, srcLengthNum, Convert<Number>(targetOffset));
+ }
+}
- // 16. If srcLength + targetOffset > targetLength, throw a RangeError
- // exception.
- const srcLength = ChangeSafeIntegerNumberToUintPtr(srcLengthNum)
- otherwise IfOffsetOutOfBounds;
- CheckIntegerIndexAdditionOverflow(srcLength, targetOffset, targetLength)
- otherwise IfOffsetOutOfBounds;
+// %TypedArray%.prototype.set ( typedArray [ , offset ] )
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-typedarray-offset
+transitioning macro
+TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
+ target: AttachedJSTypedArray, typedArray: AttachedJSTypedArray,
+ targetOffset: uintptr,
+ targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds {
+ // Steps 12-20 are not observable, so we can handle offset overflow
+ // at step 21 here.
+ if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
+
+ // 9. Let targetLength be target.[[ArrayLength]].
+ const targetLength = target.length;
+
+ // 19. Let srcLength be typedArray.[[ArrayLength]].
+ const srcLength: uintptr = typedArray.length;
+
+ // Steps 12-20 are not observable, so we can do step 21 here.
+
+ // 21. If srcLength + targetOffset > targetLength, throw a RangeError
+ // exception.
+ CheckIntegerIndexAdditionOverflow(srcLength, targetOffset, targetLength)
+ otherwise IfOffsetOutOfBounds;
+
+ // 12. Let targetName be the String value of target.[[TypedArrayName]].
+ // 13. Let targetType be the Element Type value in Table 62 for
+ // targetName.
+ // 14. Let targetElementSize be the Element Size value specified in
+ // Table 62 for targetName.
+ const targetElementsInfo = GetTypedArrayElementsInfo(target);
+
+ // 16. Let srcName be the String value of typedArray.[[TypedArrayName]].
+ // 17. Let srcType be the Element Type value in Table 62 for srcName.
+ // 18. Let srcElementSize be the Element Size value specified in
+ // Table 62 for srcName.
+ const srcKind: ElementsKind = typedArray.elements_kind;
+ // const srcElementsInfo = GetTypedArrayElementsInfo(typedArray);
+
+ // We skip steps 23-25 because both memmove and
+ // CopyTypedArrayElementsToTypedArray() properly handle overlapping
+ // regions.
+
+ // 23. If both IsSharedArrayBuffer(srcBuffer) and
+ // IsSharedArrayBuffer(targetBuffer) are true, then
+ // 23a. If srcBuffer.[[ArrayBufferData]] and
+ // targetBuffer.[[ArrayBufferData]] are the same Shared Data Block
+ // values, let same be true; else let same be false.
+ // 24. Else, let same be SameValue(srcBuffer, targetBuffer).
+ // 25. If same is true, then
+ // a. Let srcByteLength be typedArray.[[ByteLength]].
+ // b. Set srcBuffer to ? CloneArrayBuffer(srcBuffer, srcByteOffset,
+ // srcByteLength, %ArrayBuffer%).
+ // c. NOTE: %ArrayBuffer% is used to clone srcBuffer because is it known
+ // to not have any observable side-effects.
+ // d. Let srcByteIndex be 0.
+
+ try {
+ // Use memmove if possible.
+ if (srcKind != targetElementsInfo.kind) {
+ // Uint8/Uint8Clamped elements could still be copied with memmove.
+ if (!IsUint8ElementsKind(srcKind) ||
+ !IsUint8ElementsKind(targetElementsInfo.kind)) {
+ goto IfSlow;
+ }
+ }
// All the obvervable side effects are executed, so there's nothing else
// to do with the empty source array.
if (srcLength == 0) return;
- // 10. Let targetName be the String value of target.[[TypedArrayName]].
- // 11. Let targetElementSize be the Element Size value specified in
- // Table 62 for targetName.
- // 12. Let targetType be the Element Type value in Table 62 for
- // targetName.
-
- try {
- // BigInt typed arrays are not handled by
- // CopyFastNumberJSArrayElementsToTypedArray.
- if (IsBigInt64ElementsKind(target.elements_kind)) goto IfSlow;
-
- const fastSrc: FastJSArray = Cast<FastJSArray>(src) otherwise goto IfSlow;
- const srcKind: ElementsKind = fastSrc.map.elements_kind;
-
- // CopyFastNumberJSArrayElementsToTypedArray() can be used only with the
- // following elements kinds:
- // PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS,
- // HOLEY_DOUBLE_ELEMENTS.
- if (IsElementsKindInRange(
- srcKind, ElementsKind::PACKED_SMI_ELEMENTS,
- ElementsKind::HOLEY_SMI_ELEMENTS) ||
- IsElementsKindInRange(
- srcKind, ElementsKind::PACKED_DOUBLE_ELEMENTS,
- ElementsKind::HOLEY_DOUBLE_ELEMENTS)) {
- const utarget =
- typed_array::EnsureAttached(target) otherwise IfDetached;
- CallCCopyFastNumberJSArrayElementsToTypedArray(
- context, fastSrc, utarget, srcLength, targetOffset);
-
- } else {
- goto IfSlow;
+ // Source and destination typed arrays have same elements kinds (modulo
+ // Uint8-Uint8Clamped difference) so we can use targetElementsInfo for
+ // calculations.
+ const countBytes: uintptr =
+ targetElementsInfo.CalculateByteLength(srcLength)
+ otherwise unreachable;
+ const startOffset: uintptr =
+ targetElementsInfo.CalculateByteLength(targetOffset)
+ otherwise unreachable;
+ const dstPtr: RawPtr = target.data_ptr + Convert<intptr>(startOffset);
+
+ assert(countBytes <= target.byte_length - startOffset);
+ assert(countBytes <= typedArray.byte_length);
+
+ // 29. If srcType is the same as targetType, then
+ // a. NOTE: If srcType and targetType are the same, the transfer must
+ // be performed in a manner that preserves the bit-level encoding of
+ // the source data.
+ // b. Repeat, while targetByteIndex < limit
+ // i. Let value be GetValueFromBuffer(srcBuffer, srcByteIndex, Uint8,
+ // true, Unordered).
+ // ii. Perform SetValueInBuffer(targetBuffer, targetByteIndex, Uint8,
+ // value, true, Unordered).
+ // iii. Set srcByteIndex to srcByteIndex + 1.
+ // iv. Set targetByteIndex to targetByteIndex + 1.
+ CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
+ } label IfSlow deferred {
+ // 22. If target.[[ContentType]] is not equal to
+ // typedArray.[[ContentType]], throw a TypeError exception.
+ if (IsBigInt64ElementsKind(srcKind) !=
+ IsBigInt64ElementsKind(targetElementsInfo.kind))
+ deferred {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
}
- }
- label IfSlow deferred {
- TypedArraySet(
- context, target, src, srcLengthNum, Convert<Number>(targetOffset));
- }
- }
-
- // %TypedArray%.prototype.set ( typedArray [ , offset ] )
- // https://tc39.es/ecma262/#sec-%typedarray%.prototype.set-typedarray-offset
- transitioning macro
- TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
- target: AttachedJSTypedArray, typedArray: AttachedJSTypedArray,
- targetOffset: uintptr,
- targetOffsetOverflowed: bool): void labels IfOffsetOutOfBounds {
- // Steps 12-20 are not observable, so we can handle offset overflow
- // at step 21 here.
- if (targetOffsetOverflowed) goto IfOffsetOutOfBounds;
-
- // 9. Let targetLength be target.[[ArrayLength]].
- const targetLength = target.length;
- // 19. Let srcLength be typedArray.[[ArrayLength]].
- const srcLength: uintptr = typedArray.length;
-
- // Steps 12-20 are not observable, so we can do step 21 here.
-
- // 21. If srcLength + targetOffset > targetLength, throw a RangeError
- // exception.
- CheckIntegerIndexAdditionOverflow(srcLength, targetOffset, targetLength)
- otherwise IfOffsetOutOfBounds;
-
- // 12. Let targetName be the String value of target.[[TypedArrayName]].
- // 13. Let targetType be the Element Type value in Table 62 for
- // targetName.
- // 14. Let targetElementSize be the Element Size value specified in
- // Table 62 for targetName.
- const targetElementsInfo = GetTypedArrayElementsInfo(target);
-
- // 16. Let srcName be the String value of typedArray.[[TypedArrayName]].
- // 17. Let srcType be the Element Type value in Table 62 for srcName.
- // 18. Let srcElementSize be the Element Size value specified in
- // Table 62 for srcName.
- const srcKind: ElementsKind = typedArray.elements_kind;
- // const srcElementsInfo = GetTypedArrayElementsInfo(typedArray);
-
- // We skip steps 23-25 because both memmove and
- // CopyTypedArrayElementsToTypedArray() properly handle overlapping
- // regions.
-
- // 23. If both IsSharedArrayBuffer(srcBuffer) and
- // IsSharedArrayBuffer(targetBuffer) are true, then
- // 23a. If srcBuffer.[[ArrayBufferData]] and
- // targetBuffer.[[ArrayBufferData]] are the same Shared Data Block
- // values, let same be true; else let same be false.
- // 24. Else, let same be SameValue(srcBuffer, targetBuffer).
- // 25. If same is true, then
- // a. Let srcByteLength be typedArray.[[ByteLength]].
- // b. Set srcBuffer to ? CloneArrayBuffer(srcBuffer, srcByteOffset,
- // srcByteLength, %ArrayBuffer%).
- // c. NOTE: %ArrayBuffer% is used to clone srcBuffer because is it known
- // to not have any observable side-effects.
- // d. Let srcByteIndex be 0.
-
- try {
- // Use memmove if possible.
- if (srcKind != targetElementsInfo.kind) {
- // Uint8/Uint8Clamped elements could still be copied with memmove.
- if (!IsUint8ElementsKind(srcKind) ||
- !IsUint8ElementsKind(targetElementsInfo.kind)) {
- goto IfSlow;
- }
- }
+ // All the obvervable side effects are executed, so there's nothing else
+ // to do with the empty source array.
+ if (srcLength == 0) return;
- // All the obvervable side effects are executed, so there's nothing else
- // to do with the empty source array.
- if (srcLength == 0) return;
-
- // Source and destination typed arrays have same elements kinds (modulo
- // Uint8-Uint8Clamped difference) so we can use targetElementsInfo for
- // calculations.
- const countBytes: uintptr =
- targetElementsInfo.CalculateByteLength(srcLength)
- otherwise unreachable;
- const startOffset: uintptr =
- targetElementsInfo.CalculateByteLength(targetOffset)
- otherwise unreachable;
- const dstPtr: RawPtr = target.data_ptr + Convert<intptr>(startOffset);
-
- assert(countBytes <= target.byte_length - startOffset);
- assert(countBytes <= typedArray.byte_length);
-
- // 29. If srcType is the same as targetType, then
- // a. NOTE: If srcType and targetType are the same, the transfer must
- // be performed in a manner that preserves the bit-level encoding of
- // the source data.
- // b. Repeat, while targetByteIndex < limit
- // i. Let value be GetValueFromBuffer(srcBuffer, srcByteIndex, Uint8,
- // true, Unordered).
- // ii. Perform SetValueInBuffer(targetBuffer, targetByteIndex, Uint8,
- // value, true, Unordered).
- // iii. Set srcByteIndex to srcByteIndex + 1.
- // iv. Set targetByteIndex to targetByteIndex + 1.
- CallCMemmove(dstPtr, typedArray.data_ptr, countBytes);
- }
- label IfSlow deferred {
- // 22. If target.[[ContentType]] is not equal to
- // typedArray.[[ContentType]], throw a TypeError exception.
- if (IsBigInt64ElementsKind(srcKind) !=
- IsBigInt64ElementsKind(targetElementsInfo.kind))
- deferred {
- ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
- }
-
- // All the obvervable side effects are executed, so there's nothing else
- // to do with the empty source array.
- if (srcLength == 0) return;
-
- // 30. Else,
- // a. Repeat, while targetByteIndex < limit
- // i. Let value be GetValueFromBuffer(srcBuffer, srcByteIndex,
- // srcType, true, Unordered).
- // ii. Perform SetValueInBuffer(targetBuffer, targetByteIndex,
- // targetType, value, true, Unordered).
- // iii. Set srcByteIndex to srcByteIndex + srcElementSize.
- // iv. Set targetByteIndex to targetByteIndex + targetElementSize.
- CallCCopyTypedArrayElementsToTypedArray(
- typedArray, target, srcLength, targetOffset);
- }
+ // 30. Else,
+ // a. Repeat, while targetByteIndex < limit
+ // i. Let value be GetValueFromBuffer(srcBuffer, srcByteIndex,
+ // srcType, true, Unordered).
+ // ii. Perform SetValueInBuffer(targetBuffer, targetByteIndex,
+ // targetType, value, true, Unordered).
+ // iii. Set srcByteIndex to srcByteIndex + srcElementSize.
+ // iv. Set targetByteIndex to targetByteIndex + targetElementSize.
+ CallCCopyTypedArrayElementsToTypedArray(
+ typedArray, target, srcLength, targetOffset);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index 578c2d017a..60604c548f 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -5,102 +5,99 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameSlice: constexpr string = '%TypedArray%.prototype.slice';
-
- extern macro TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
- JSTypedArray, JSTypedArray, uintptr, uintptr): void;
-
- macro FastCopy(
- src: typed_array::AttachedJSTypedArray, dest: JSTypedArray, k: uintptr,
- count: uintptr) labels IfSlow {
- if (IsForceSlowPath()) goto IfSlow;
-
- const srcKind: ElementsKind = src.elements_kind;
- const destInfo = typed_array::GetTypedArrayElementsInfo(dest);
-
- // dest could be a different type from src or share the same buffer
- // with the src because of custom species constructor. If the types
- // of src and result array are the same and they are not sharing the
- // same buffer, use memmove.
- if (srcKind != destInfo.kind) goto IfSlow;
- if (dest.buffer == src.buffer) {
- goto IfSlow;
- }
+const kBuiltinNameSlice: constexpr string = '%TypedArray%.prototype.slice';
+
+extern macro TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
+ JSTypedArray, JSTypedArray, uintptr, uintptr): void;
+
+macro FastCopy(
+ src: typed_array::AttachedJSTypedArray, dest: JSTypedArray, k: uintptr,
+ count: uintptr) labels IfSlow {
+ if (IsForceSlowPath()) goto IfSlow;
+
+ const srcKind: ElementsKind = src.elements_kind;
+ const destInfo = typed_array::GetTypedArrayElementsInfo(dest);
+
+ // dest could be a different type from src or share the same buffer
+ // with the src because of custom species constructor. If the types
+ // of src and result array are the same and they are not sharing the
+ // same buffer, use memmove.
+ if (srcKind != destInfo.kind) goto IfSlow;
+ if (dest.buffer == src.buffer) {
+ goto IfSlow;
+ }
- const countBytes: uintptr = destInfo.CalculateByteLength(count)
- otherwise unreachable;
- const startOffset: uintptr = destInfo.CalculateByteLength(k)
- otherwise unreachable;
- const srcPtr: RawPtr = src.data_ptr + Convert<intptr>(startOffset);
+ const countBytes: uintptr = destInfo.CalculateByteLength(count)
+ otherwise unreachable;
+ const startOffset: uintptr = destInfo.CalculateByteLength(k)
+ otherwise unreachable;
+ const srcPtr: RawPtr = src.data_ptr + Convert<intptr>(startOffset);
- assert(countBytes <= dest.byte_length);
- assert(countBytes <= src.byte_length - startOffset);
+ assert(countBytes <= dest.byte_length);
+ assert(countBytes <= src.byte_length - startOffset);
- typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
- }
+ typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes);
+}
- macro SlowCopy(implicit context: Context)(
- src: JSTypedArray, dest: JSTypedArray, k: uintptr, final: uintptr) {
- if (typed_array::IsBigInt64ElementsKind(src.elements_kind) !=
- typed_array::IsBigInt64ElementsKind(dest.elements_kind))
- deferred {
- ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
- }
+macro SlowCopy(implicit context: Context)(
+ src: JSTypedArray, dest: JSTypedArray, k: uintptr, final: uintptr) {
+ if (typed_array::IsBigInt64ElementsKind(src.elements_kind) !=
+ typed_array::IsBigInt64ElementsKind(dest.elements_kind))
+ deferred {
+ ThrowTypeError(MessageTemplate::kBigIntMixedTypes);
+ }
- CallCCopyTypedArrayElementsSlice(src, dest, k, final);
- }
+ CallCCopyTypedArrayElementsSlice(src, dest, k, final);
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice
- transitioning javascript builtin TypedArrayPrototypeSlice(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSAny {
- // arguments[0] = start
- // arguments[1] = end
-
- // 1. Let O be the this value.
- // 2. Perform ? ValidateTypedArray(O).
- const src: JSTypedArray =
- ValidateTypedArray(context, receiver, kBuiltinNameSlice);
-
- // 3. Let len be O.[[ArrayLength]].
- const len: uintptr = src.length;
-
- // 4. Let relativeStart be ? ToInteger(start).
- // 5. If relativeStart < 0, let k be max((len + relativeStart), 0);
- // else let k be min(relativeStart, len).
- const start = arguments[0];
- const k: uintptr =
- start != Undefined ? ConvertToRelativeIndex(start, len) : 0;
-
- // 6. If end is undefined, let relativeEnd be len;
- // else let relativeEnd be ? ToInteger(end).
- // 7. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
- // else let final be min(relativeEnd, len).
- const end = arguments[1];
- const final: uintptr =
- end != Undefined ? ConvertToRelativeIndex(end, len) : len;
-
- // 8. Let count be max(final - k, 0).
- const count: uintptr = Unsigned(IntPtrMax(Signed(final - k), 0));
-
- // 9. Let A be ? TypedArraySpeciesCreate(O, Ā« count Ā»).
- const dest: JSTypedArray =
- TypedArraySpeciesCreateByLength(kBuiltinNameSlice, src, count);
-
- if (count > 0) {
- try {
- const srcAttached = typed_array::EnsureAttached(src)
- otherwise IfDetached;
- FastCopy(srcAttached, dest, k, count) otherwise IfSlow;
- }
- label IfDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSlice);
- }
- label IfSlow deferred {
- SlowCopy(src, dest, k, final);
- }
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice
+transitioning javascript builtin TypedArrayPrototypeSlice(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = start
+ // arguments[1] = end
+
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ const src: JSTypedArray =
+ ValidateTypedArray(context, receiver, kBuiltinNameSlice);
+
+ // 3. Let len be O.[[ArrayLength]].
+ const len: uintptr = src.length;
+
+ // 4. Let relativeStart be ? ToInteger(start).
+ // 5. If relativeStart < 0, let k be max((len + relativeStart), 0);
+ // else let k be min(relativeStart, len).
+ const start = arguments[0];
+ const k: uintptr =
+ start != Undefined ? ConvertToRelativeIndex(start, len) : 0;
+
+ // 6. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ // 7. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ const end = arguments[1];
+ const final: uintptr =
+ end != Undefined ? ConvertToRelativeIndex(end, len) : len;
+
+ // 8. Let count be max(final - k, 0).
+ const count: uintptr = Unsigned(IntPtrMax(Signed(final - k), 0));
+
+ // 9. Let A be ? TypedArraySpeciesCreate(O, Ā« count Ā»).
+ const dest: JSTypedArray =
+ TypedArraySpeciesCreateByLength(kBuiltinNameSlice, src, count);
+
+ if (count > 0) {
+ try {
+ const srcAttached = typed_array::EnsureAttached(src)
+ otherwise IfDetached;
+ FastCopy(srcAttached, dest, k, count) otherwise IfSlow;
+ } label IfDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSlice);
+ } label IfSlow deferred {
+ SlowCopy(src, dest, k, final);
}
-
- return dest;
}
+
+ return dest;
+}
}
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index a09ce964d5..ecdfae1e8a 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -5,52 +5,49 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some';
+const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some';
- transitioning macro SomeAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Boolean {
- let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
- for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const result = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
- witness.GetStable());
- if (ToBoolean(result)) {
- return True;
- }
+transitioning macro SomeAllElements(implicit context: Context)(
+ array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ thisArg: JSAny): Boolean {
+ let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
+ const length: uintptr = witness.Get().length;
+ for (let k: uintptr = 0; k < length; k++) {
+ // BUG(4895): We should throw on detached buffers rather than simply exit.
+ witness.Recheck() otherwise break;
+ const value: JSAny = witness.Load(k);
+ // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
+ // indices to optimize Convert<Number>(k) for the most common case.
+ const result = Call(
+ context, callbackfn, thisArg, value, Convert<Number>(k),
+ witness.GetStable());
+ if (ToBoolean(result)) {
+ return True;
}
- return False;
}
+ return False;
+}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some
- transitioning javascript builtin
- TypedArrayPrototypeSome(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // arguments[0] = callback
- // arguments[1] = thisArg.
- try {
- const array: JSTypedArray = Cast<JSTypedArray>(receiver)
- otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some
+transitioning javascript builtin
+TypedArrayPrototypeSome(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // arguments[0] = callback
+ // arguments[1] = thisArg.
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+ const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const thisArg = arguments[1];
- return SomeAllElements(uarray, callbackfn, thisArg);
- }
- label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
- }
- label NotTypedArray deferred {
- ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSome);
- }
- label IsDetached deferred {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSome);
- }
+ const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const thisArg = arguments[1];
+ return SomeAllElements(uarray, callbackfn, thisArg);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSome);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSome);
}
}
+}
diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq
index 171068761d..c32808038d 100644
--- a/deps/v8/src/builtins/typed-array-sort.tq
+++ b/deps/v8/src/builtins/typed-array-sort.tq
@@ -5,141 +5,140 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- const kBuiltinNameSort: constexpr string = '%TypedArray%.prototype.sort';
+const kBuiltinNameSort: constexpr string = '%TypedArray%.prototype.sort';
- extern runtime TypedArraySortFast(Context, JSAny): JSTypedArray;
+extern runtime TypedArraySortFast(Context, JSAny): JSTypedArray;
- transitioning macro CallCompare(
- implicit context: Context, array: JSTypedArray,
- comparefn: Callable)(a: JSAny, b: JSAny): Number {
- // a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
- const v: Number =
- ToNumber_Inline(Call(context, comparefn, Undefined, a, b));
+transitioning macro CallCompare(
+ implicit context: Context, array: JSTypedArray, comparefn: Callable)(
+ a: JSAny, b: JSAny): Number {
+ // a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
+ const v: Number = ToNumber_Inline(Call(context, comparefn, Undefined, a, b));
- // b. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(array.buffer)) {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSort);
- }
+ // b. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (IsDetachedBuffer(array.buffer)) {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSort);
+ }
- // c. If v is NaN, return +0.
- if (NumberIsNaN(v)) return 0;
+ // c. If v is NaN, return +0.
+ if (NumberIsNaN(v)) return 0;
- // d. return v.
- return v;
- }
+ // d. return v.
+ return v;
+}
- // Merges two sorted runs [from, middle) and [middle, to)
- // from "source" into "target".
- transitioning macro
- TypedArrayMerge(
- implicit context: Context, array: JSTypedArray, comparefn: Callable)(
- source: FixedArray, from: uintptr, middle: uintptr, to: uintptr,
- target: FixedArray) {
- let left: uintptr = from;
- let right: uintptr = middle;
-
- for (let targetIndex: uintptr = from; targetIndex < to; ++targetIndex) {
- if (left < middle && right >= to) {
- // If the left run has elements, but the right does not, we take
- // from the left.
- target.objects[targetIndex] = source.objects[left++];
- } else if (left < middle) {
- // If both have elements, we need to compare.
- const leftElement = UnsafeCast<JSAny>(source.objects[left]);
- const rightElement = UnsafeCast<JSAny>(source.objects[right]);
- if (CallCompare(leftElement, rightElement) <= 0) {
- target.objects[targetIndex] = leftElement;
- left++;
- } else {
- target.objects[targetIndex] = rightElement;
- right++;
- }
+// Merges two sorted runs [from, middle) and [middle, to)
+// from "source" into "target".
+transitioning macro
+TypedArrayMerge(
+ implicit context: Context, array: JSTypedArray, comparefn: Callable)(
+ source: FixedArray, from: uintptr, middle: uintptr, to: uintptr,
+ target: FixedArray) {
+ let left: uintptr = from;
+ let right: uintptr = middle;
+
+ for (let targetIndex: uintptr = from; targetIndex < to; ++targetIndex) {
+ if (left < middle && right >= to) {
+ // If the left run has elements, but the right does not, we take
+ // from the left.
+ target.objects[targetIndex] = source.objects[left++];
+ } else if (left < middle) {
+ // If both have elements, we need to compare.
+ const leftElement = UnsafeCast<JSAny>(source.objects[left]);
+ const rightElement = UnsafeCast<JSAny>(source.objects[right]);
+ if (CallCompare(leftElement, rightElement) <= 0) {
+ target.objects[targetIndex] = leftElement;
+ left++;
} else {
- // No elements on the left, but the right does, so we take
- // from the right.
- assert(left == middle);
- target.objects[targetIndex] = source.objects[right++];
+ target.objects[targetIndex] = rightElement;
+ right++;
}
+ } else {
+ // No elements on the left, but the right does, so we take
+ // from the right.
+ assert(left == middle);
+ target.objects[targetIndex] = source.objects[right++];
}
}
+}
- transitioning builtin
- TypedArrayMergeSort(implicit context: Context)(
- source: FixedArray, from: uintptr, to: uintptr, target: FixedArray,
- array: JSTypedArray, comparefn: Callable): JSAny {
- assert(to - from > 1);
- const middle: uintptr = from + ((to - from) >>> 1);
-
- // On the next recursion step source becomes target and vice versa.
- // This saves the copy of the relevant range from the original
- // array into a work array on each recursion step.
- if (middle - from > 1) {
- TypedArrayMergeSort(target, from, middle, source, array, comparefn);
- }
- if (to - middle > 1) {
- TypedArrayMergeSort(target, middle, to, source, array, comparefn);
- }
-
- TypedArrayMerge(source, from, middle, to, target);
-
- return Undefined;
+transitioning builtin
+TypedArrayMergeSort(implicit context: Context)(
+ source: FixedArray, from: uintptr, to: uintptr, target: FixedArray,
+ array: JSTypedArray, comparefn: Callable): JSAny {
+ assert(to - from > 1);
+ const middle: uintptr = from + ((to - from) >>> 1);
+
+ // On the next recursion step source becomes target and vice versa.
+ // This saves the copy of the relevant range from the original
+ // array into a work array on each recursion step.
+ if (middle - from > 1) {
+ TypedArrayMergeSort(target, from, middle, source, array, comparefn);
+ }
+ if (to - middle > 1) {
+ TypedArrayMergeSort(target, middle, to, source, array, comparefn);
}
- // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
- transitioning javascript builtin TypedArrayPrototypeSort(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSTypedArray {
- // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
- // throw a TypeError exception.
- const comparefnObj: JSAny = arguments.length > 0 ? arguments[0] : Undefined;
- if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) {
- ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj);
- }
+ TypedArrayMerge(source, from, middle, to, target);
- // 2. Let obj be the this value.
- const obj: JSAny = receiver;
+ return Undefined;
+}
- // 3. Let buffer be ? ValidateTypedArray(obj).
- // ValidateTypedArray currently returns the array, not the ViewBuffer.
- const array: JSTypedArray =
- ValidateTypedArray(context, obj, kBuiltinNameSort);
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
+transitioning javascript builtin TypedArrayPrototypeSort(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(...arguments): JSTypedArray {
+ // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
+ // throw a TypeError exception.
+ const comparefnObj: JSAny = arguments[0];
+ if (comparefnObj != Undefined && !Is<Callable>(comparefnObj)) {
+ ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj);
+ }
- // 4. Let len be obj.[[ArrayLength]].
- const len: uintptr = array.length;
+ // 2. Let obj be the this value.
+ const obj: JSAny = receiver;
- // Arrays of length 1 or less are considered sorted.
- if (len < 2) return array;
+ // 3. Let buffer be ? ValidateTypedArray(obj).
+ // ValidateTypedArray currently returns the array, not the ViewBuffer.
+ const array: JSTypedArray =
+ ValidateTypedArray(context, obj, kBuiltinNameSort);
- // Default sorting is done in C++ using std::sort
- if (comparefnObj == Undefined) {
- return TypedArraySortFast(context, obj);
- }
+ // 4. Let len be obj.[[ArrayLength]].
+ const len: uintptr = array.length;
- const comparefn: Callable =
- Cast<Callable>(comparefnObj) otherwise unreachable;
- const accessor: TypedArrayAccessor =
- GetTypedArrayAccessor(array.elements_kind);
-
- // Prepare the two work arrays. All numbers are converted to tagged
- // objects first, and merge sorted between the two FixedArrays.
- // The result is then written back into the JSTypedArray.
- const work1: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
- const work2: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
-
- for (let i: uintptr = 0; i < len; ++i) {
- const element: Numeric = accessor.LoadNumeric(context, array, i);
- work1.objects[i] = element;
- work2.objects[i] = element;
- }
+ // Arrays of length 1 or less are considered sorted.
+ if (len < 2) return array;
+
+ // Default sorting is done in C++ using std::sort
+ if (comparefnObj == Undefined) {
+ return TypedArraySortFast(context, obj);
+ }
- TypedArrayMergeSort(work2, 0, len, work1, array, comparefn);
+ const comparefn: Callable =
+ Cast<Callable>(comparefnObj) otherwise unreachable;
+ const accessor: TypedArrayAccessor =
+ GetTypedArrayAccessor(array.elements_kind);
+
+ // Prepare the two work arrays. All numbers are converted to tagged
+ // objects first, and merge sorted between the two FixedArrays.
+ // The result is then written back into the JSTypedArray.
+ const work1: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
+ const work2: FixedArray = AllocateZeroedFixedArray(Convert<intptr>(len));
+
+ for (let i: uintptr = 0; i < len; ++i) {
+ const element: Numeric = accessor.LoadNumeric(array, i);
+ work1.objects[i] = element;
+ work2.objects[i] = element;
+ }
- // work1 contains the sorted numbers. Write them back.
- for (let i: uintptr = 0; i < len; ++i) {
- accessor.StoreNumeric(
- context, array, i, UnsafeCast<Numeric>(work1.objects[i]));
- }
+ TypedArrayMergeSort(work2, 0, len, work1, array, comparefn);
- return array;
+ // work1 contains the sorted numbers. Write them back.
+ for (let i: uintptr = 0; i < len; ++i) {
+ accessor.StoreNumeric(
+ context, array, i, UnsafeCast<Numeric>(work1.objects[i]));
}
+
+ return array;
+}
}
diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq
index c0adc5f19e..73d9e80c61 100644
--- a/deps/v8/src/builtins/typed-array-subarray.tq
+++ b/deps/v8/src/builtins/typed-array-subarray.tq
@@ -3,60 +3,60 @@
// found in the LICENSE file.
namespace typed_array {
- // ES %TypedArray%.prototype.subarray
- transitioning javascript builtin TypedArrayPrototypeSubArray(
- js-implicit context: NativeContext,
- receiver: JSAny)(...arguments): JSTypedArray {
- const methodName: constexpr string = '%TypedArray%.prototype.subarray';
-
- // 1. Let O be the this value.
- // 3. If O does not have a [[TypedArrayName]] internal slot, throw a
- // TypeError exception.
- const source = Cast<JSTypedArray>(receiver)
- otherwise ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver, methodName);
-
- // 5. Let buffer be O.[[ViewedArrayBuffer]].
- const buffer = typed_array::GetBuffer(source);
-
- // 6. Let srcLength be O.[[ArrayLength]].
- const srcLength: uintptr = source.length;
-
- // 7. Let relativeBegin be ? ToInteger(begin).
- // 8. If relativeBegin < 0, let beginIndex be max((srcLength +
- // relativeBegin), 0); else let beginIndex be min(relativeBegin,
- // srcLength).
- const arg0 = arguments[0];
- const begin: uintptr =
- arg0 != Undefined ? ConvertToRelativeIndex(arg0, srcLength) : 0;
-
- // 9. If end is undefined, let relativeEnd be srcLength;
- // else, let relativeEnd be ? ToInteger(end).
- // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd),
- // 0); else let endIndex be min(relativeEnd, srcLength).
- const arg1 = arguments[1];
- const end: uintptr =
- arg1 != Undefined ? ConvertToRelativeIndex(arg1, srcLength) : srcLength;
-
- // 11. Let newLength be max(endIndex - beginIndex, 0).
- const newLength: uintptr = Unsigned(IntPtrMax(Signed(end - begin), 0));
-
- // 12. Let constructorName be the String value of O.[[TypedArrayName]].
- // 13. Let elementSize be the Number value of the Element Size value
- // specified in Table 52 for constructorName.
- const elementsInfo = typed_array::GetTypedArrayElementsInfo(source);
-
- // 14. Let srcByteOffset be O.[[ByteOffset]].
- const srcByteOffset: uintptr = source.byte_offset;
-
- // 15. Let beginByteOffset be srcByteOffset + beginIndex Ɨ elementSize.
- const beginByteOffset =
- srcByteOffset + elementsInfo.CalculateByteLength(begin)
- otherwise ThrowRangeError(MessageTemplate::kInvalidArrayBufferLength);
-
- // 16. Let argumentsList be Ā« buffer, beginByteOffset, newLength Ā».
- // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
- return TypedArraySpeciesCreateByBuffer(
- methodName, source, buffer, beginByteOffset, newLength);
- }
+// ES %TypedArray%.prototype.subarray
+transitioning javascript builtin TypedArrayPrototypeSubArray(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(...arguments): JSTypedArray {
+ const methodName: constexpr string = '%TypedArray%.prototype.subarray';
+
+ // 1. Let O be the this value.
+ // 3. If O does not have a [[TypedArrayName]] internal slot, throw a
+ // TypeError exception.
+ const source = Cast<JSTypedArray>(receiver)
+ otherwise ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, methodName);
+
+ // 5. Let buffer be O.[[ViewedArrayBuffer]].
+ const buffer = typed_array::GetBuffer(source);
+
+ // 6. Let srcLength be O.[[ArrayLength]].
+ const srcLength: uintptr = source.length;
+
+ // 7. Let relativeBegin be ? ToInteger(begin).
+ // 8. If relativeBegin < 0, let beginIndex be max((srcLength +
+ // relativeBegin), 0); else let beginIndex be min(relativeBegin,
+ // srcLength).
+ const arg0 = arguments[0];
+ const begin: uintptr =
+ arg0 != Undefined ? ConvertToRelativeIndex(arg0, srcLength) : 0;
+
+ // 9. If end is undefined, let relativeEnd be srcLength;
+ // else, let relativeEnd be ? ToInteger(end).
+ // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd),
+ // 0); else let endIndex be min(relativeEnd, srcLength).
+ const arg1 = arguments[1];
+ const end: uintptr =
+ arg1 != Undefined ? ConvertToRelativeIndex(arg1, srcLength) : srcLength;
+
+ // 11. Let newLength be max(endIndex - beginIndex, 0).
+ const newLength: uintptr = Unsigned(IntPtrMax(Signed(end - begin), 0));
+
+ // 12. Let constructorName be the String value of O.[[TypedArrayName]].
+ // 13. Let elementSize be the Number value of the Element Size value
+ // specified in Table 52 for constructorName.
+ const elementsInfo = typed_array::GetTypedArrayElementsInfo(source);
+
+ // 14. Let srcByteOffset be O.[[ByteOffset]].
+ const srcByteOffset: uintptr = source.byte_offset;
+
+ // 15. Let beginByteOffset be srcByteOffset + beginIndex Ɨ elementSize.
+ const beginByteOffset =
+ srcByteOffset + elementsInfo.CalculateByteLength(begin)
+ otherwise ThrowRangeError(MessageTemplate::kInvalidArrayBufferLength);
+
+ // 16. Let argumentsList be Ā« buffer, beginByteOffset, newLength Ā».
+ // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
+ return TypedArraySpeciesCreateByBuffer(
+ methodName, source, buffer, beginByteOffset, newLength);
+}
}
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 1b23d3f572..033de32a1d 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -5,271 +5,267 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
- // Naming convention from elements.cc. We have a similar intent but implement
- // fastpaths using generics instead of using a class hierarchy for elements
- // kinds specific implementations.
- type Uint8Elements extends ElementsKind;
- type Int8Elements extends ElementsKind;
- type Uint16Elements extends ElementsKind;
- type Int16Elements extends ElementsKind;
- type Uint32Elements extends ElementsKind;
- type Int32Elements extends ElementsKind;
- type Float32Elements extends ElementsKind;
- type Float64Elements extends ElementsKind;
- type Uint8ClampedElements extends ElementsKind;
- type BigUint64Elements extends ElementsKind;
- type BigInt64Elements extends ElementsKind;
+// Naming convention from elements.cc. We have a similar intent but implement
+// fastpaths using generics instead of using a class hierarchy for elements
+// kinds specific implementations.
+type Uint8Elements extends ElementsKind;
+type Int8Elements extends ElementsKind;
+type Uint16Elements extends ElementsKind;
+type Int16Elements extends ElementsKind;
+type Uint32Elements extends ElementsKind;
+type Int32Elements extends ElementsKind;
+type Float32Elements extends ElementsKind;
+type Float64Elements extends ElementsKind;
+type Uint8ClampedElements extends ElementsKind;
+type BigUint64Elements extends ElementsKind;
+type BigInt64Elements extends ElementsKind;
- @export
- struct TypedArrayElementsInfo {
- // Calculates the number of bytes required for specified number of elements.
- macro CalculateByteLength(length: uintptr): uintptr labels IfInvalid {
- if (length > kTypedArrayMaxLength) goto IfInvalid;
- const maxArrayLength = kArrayBufferMaxByteLength >>> this.sizeLog2;
- if (length > maxArrayLength) goto IfInvalid;
- const byteLength = length << this.sizeLog2;
- return byteLength;
- }
-
- // Calculates the maximum number of elements supported by a specified number
- // of bytes.
- macro CalculateLength(byteLength: uintptr): uintptr labels IfInvalid {
- const length = byteLength >>> this.sizeLog2;
- if (length > kTypedArrayMaxLength) goto IfInvalid;
- return length;
- }
-
- // Determines if `bytes` (byte offset or length) cannot be evenly divided by
- // element size.
- macro IsUnaligned(bytes: uintptr): bool {
- // Exploits the fact the element size is a power of 2. Determining whether
- // there is remainder (not aligned) can be achieved efficiently with bit
- // masking. Shift is safe as sizeLog2 can be 3 at most (see
- // ElementsKindToShiftSize).
- return (bytes & ((1 << this.sizeLog2) - 1)) != 0;
- }
-
- sizeLog2: uintptr;
- kind: ElementsKind;
+@export
+struct TypedArrayElementsInfo {
+ // Calculates the number of bytes required for specified number of elements.
+ macro CalculateByteLength(length: uintptr): uintptr labels IfInvalid {
+ if (length > kTypedArrayMaxLength) goto IfInvalid;
+ const maxArrayLength = kArrayBufferMaxByteLength >>> this.sizeLog2;
+ if (length > maxArrayLength) goto IfInvalid;
+ const byteLength = length << this.sizeLog2;
+ return byteLength;
}
- extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
- void;
- extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
- Context, JSAny, constexpr string): JSTypedArray;
- extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
- RawPtr, RawPtr, uintptr): void;
- extern macro TypedArrayBuiltinsAssembler::CallCMemmove(
- RawPtr, RawPtr, uintptr): void;
- extern macro TypedArrayBuiltinsAssembler::CallCMemset(
- RawPtr, intptr, uintptr): void;
- extern macro TypedArrayBuiltinsAssembler::GetBuffer(
- implicit context: Context)(JSTypedArray): JSArrayBuffer;
- extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
- JSTypedArray): TypedArrayElementsInfo;
- extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map):
- TypedArrayElementsInfo;
- extern macro TypedArrayBuiltinsAssembler::IsUint8ElementsKind(ElementsKind):
- bool;
- extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
- ElementsKind): bool;
- extern macro LoadFixedTypedArrayElementAsTagged(
- RawPtr, uintptr, constexpr ElementsKind): Numeric;
- extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
- Context, JSTypedArray, uintptr, Numeric, constexpr ElementsKind);
- extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
- Context, JSTypedArray, uintptr, JSAny,
- constexpr ElementsKind) labels IfDetached;
+ // Calculates the maximum number of elements supported by a specified number
+ // of bytes.
+ macro CalculateLength(byteLength: uintptr): uintptr labels IfInvalid {
+ const length = byteLength >>> this.sizeLog2;
+ if (length > kTypedArrayMaxLength) goto IfInvalid;
+ return length;
+ }
- type LoadNumericFn = builtin(Context, JSTypedArray, uintptr) => Numeric;
- type StoreNumericFn = builtin(Context, JSTypedArray, uintptr, Numeric) => Smi;
- type StoreJSAnyFn = builtin(Context, JSTypedArray, uintptr, JSAny) => Smi;
+ // Determines if `bytes` (byte offset or length) cannot be evenly divided by
+ // element size.
+ macro IsUnaligned(bytes: uintptr): bool {
+ // Exploits the fact the element size is a power of 2. Determining whether
+ // there is remainder (not aligned) can be achieved efficiently with bit
+ // masking. Shift is safe as sizeLog2 can be 3 at most (see
+ // ElementsKindToShiftSize).
+ return (bytes & ((1 << this.sizeLog2) - 1)) != 0;
+ }
- // The result codes returned by StoreNumericFn and StoreJSAnyFn builtins.
- const kStoreSucceded: Smi = 0;
- const kStoreFailureArrayDetached: Smi = 1;
+ sizeLog2: uintptr;
+ kind: ElementsKind;
+}
+extern runtime TypedArrayCopyElements(
+ Context, JSTypedArray, Object, Number): void;
+extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
+ Context, JSAny, constexpr string): JSTypedArray;
- struct TypedArrayAccessor {
- macro LoadNumeric(
- context: Context, array: JSTypedArray, index: uintptr): Numeric {
- const loadfn: LoadNumericFn = this.loadNumericFn;
- return loadfn(context, array, index);
- }
+extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
+ RawPtr, RawPtr, uintptr): void;
+extern macro TypedArrayBuiltinsAssembler::CallCMemmove(
+ RawPtr, RawPtr, uintptr): void;
+extern macro TypedArrayBuiltinsAssembler::CallCMemset(
+ RawPtr, intptr, uintptr): void;
+extern macro TypedArrayBuiltinsAssembler::GetBuffer(implicit context: Context)(
+ JSTypedArray): JSArrayBuffer;
+extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
+ JSTypedArray): TypedArrayElementsInfo;
+extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map):
+ TypedArrayElementsInfo;
+extern macro TypedArrayBuiltinsAssembler::IsUint8ElementsKind(ElementsKind):
+ bool;
+extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(ElementsKind):
+ bool;
+extern macro LoadFixedTypedArrayElementAsTagged(
+ RawPtr, uintptr, constexpr ElementsKind): Numeric;
+extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
+ Context, JSTypedArray, uintptr, Numeric, constexpr ElementsKind);
+extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
+ Context, JSTypedArray, uintptr, JSAny, constexpr ElementsKind)
+ labels IfDetached;
- macro StoreNumeric(
- context: Context, array: JSTypedArray, index: uintptr, value: Numeric) {
- const storefn: StoreNumericFn = this.storeNumericFn;
- const result = storefn(context, array, index, value);
- assert(result == kStoreSucceded);
- }
+type LoadNumericFn = builtin(JSTypedArray, uintptr) => Numeric;
+type StoreNumericFn = builtin(Context, JSTypedArray, uintptr, Numeric) => Smi;
+type StoreJSAnyFn = builtin(Context, JSTypedArray, uintptr, JSAny) => Smi;
- macro StoreJSAny(
- context: Context, array: JSTypedArray, index: uintptr, value: JSAny)
- labels IfDetached {
- const storefn: StoreJSAnyFn = this.storeJSAnyFn;
- const result = storefn(context, array, index, value);
- if (result == kStoreFailureArrayDetached) {
- goto IfDetached;
- }
- assert(result == kStoreSucceded);
- }
+// The result codes returned by StoreNumericFn and StoreJSAnyFn builtins.
+const kStoreSucceded: Smi = 0;
+const kStoreFailureArrayDetached: Smi = 1;
- loadNumericFn: LoadNumericFn;
- storeNumericFn: StoreNumericFn;
- storeJSAnyFn: StoreJSAnyFn;
+struct TypedArrayAccessor {
+ macro LoadNumeric(array: JSTypedArray, index: uintptr): Numeric {
+ const loadfn: LoadNumericFn = this.loadNumericFn;
+ return loadfn(array, index);
}
- macro GetTypedArrayAccessor<T : type extends ElementsKind>():
- TypedArrayAccessor {
- const loadNumericFn = LoadTypedElement<T>;
- const storeNumericFn = StoreTypedElementNumeric<T>;
- const storeJSAnyFn = StoreTypedElementJSAny<T>;
- return TypedArrayAccessor{loadNumericFn, storeNumericFn, storeJSAnyFn};
+ macro StoreNumeric(
+ context: Context, array: JSTypedArray, index: uintptr, value: Numeric) {
+ const storefn: StoreNumericFn = this.storeNumericFn;
+ const result = storefn(context, array, index, value);
+ assert(result == kStoreSucceded);
}
- macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
- if (IsElementsKindGreaterThan(
- elementsKind, ElementsKind::UINT32_ELEMENTS)) {
- if (elementsKind == ElementsKind::INT32_ELEMENTS) {
- return GetTypedArrayAccessor<Int32Elements>();
- } else if (elementsKind == ElementsKind::FLOAT32_ELEMENTS) {
- return GetTypedArrayAccessor<Float32Elements>();
- } else if (elementsKind == ElementsKind::FLOAT64_ELEMENTS) {
- return GetTypedArrayAccessor<Float64Elements>();
- } else if (elementsKind == ElementsKind::UINT8_CLAMPED_ELEMENTS) {
- return GetTypedArrayAccessor<Uint8ClampedElements>();
- } else if (elementsKind == ElementsKind::BIGUINT64_ELEMENTS) {
- return GetTypedArrayAccessor<BigUint64Elements>();
- } else if (elementsKind == ElementsKind::BIGINT64_ELEMENTS) {
- return GetTypedArrayAccessor<BigInt64Elements>();
- }
- } else {
- if (elementsKind == ElementsKind::UINT8_ELEMENTS) {
- return GetTypedArrayAccessor<Uint8Elements>();
- } else if (elementsKind == ElementsKind::INT8_ELEMENTS) {
- return GetTypedArrayAccessor<Int8Elements>();
- } else if (elementsKind == ElementsKind::UINT16_ELEMENTS) {
- return GetTypedArrayAccessor<Uint16Elements>();
- } else if (elementsKind == ElementsKind::INT16_ELEMENTS) {
- return GetTypedArrayAccessor<Int16Elements>();
- } else if (elementsKind == ElementsKind::UINT32_ELEMENTS) {
- return GetTypedArrayAccessor<Uint32Elements>();
- }
+ macro StoreJSAny(
+ context: Context, array: JSTypedArray, index: uintptr, value: JSAny)
+ labels IfDetached {
+ const storefn: StoreJSAnyFn = this.storeJSAnyFn;
+ const result = storefn(context, array, index, value);
+ if (result == kStoreFailureArrayDetached) {
+ goto IfDetached;
}
- unreachable;
+ assert(result == kStoreSucceded);
}
- extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
- JSTypedArray, ByteArray, uintptr): void;
- extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
- JSTypedArray, RawPtr, uintptr): void;
-
- // AttachedJSTypedArray guards that the array's buffer is not detached.
- transient type AttachedJSTypedArray extends JSTypedArray;
+ loadNumericFn: LoadNumericFn;
+ storeNumericFn: StoreNumericFn;
+ storeJSAnyFn: StoreJSAnyFn;
+}
- macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray
- labels Detached {
- if (IsDetachedBuffer(array.buffer)) goto Detached;
- return %RawDownCast<AttachedJSTypedArray>(array);
- }
+macro GetTypedArrayAccessor<T : type extends ElementsKind>():
+ TypedArrayAccessor {
+ const loadNumericFn = LoadTypedElement<T>;
+ const storeNumericFn = StoreTypedElementNumeric<T>;
+ const storeJSAnyFn = StoreTypedElementJSAny<T>;
+ return TypedArrayAccessor{loadNumericFn, storeNumericFn, storeJSAnyFn};
+}
- struct AttachedJSTypedArrayWitness {
- macro Get(): AttachedJSTypedArray {
- return this.unstable;
+macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
+ if (IsElementsKindGreaterThan(elementsKind, ElementsKind::UINT32_ELEMENTS)) {
+ if (elementsKind == ElementsKind::INT32_ELEMENTS) {
+ return GetTypedArrayAccessor<Int32Elements>();
+ } else if (elementsKind == ElementsKind::FLOAT32_ELEMENTS) {
+ return GetTypedArrayAccessor<Float32Elements>();
+ } else if (elementsKind == ElementsKind::FLOAT64_ELEMENTS) {
+ return GetTypedArrayAccessor<Float64Elements>();
+ } else if (elementsKind == ElementsKind::UINT8_CLAMPED_ELEMENTS) {
+ return GetTypedArrayAccessor<Uint8ClampedElements>();
+ } else if (elementsKind == ElementsKind::BIGUINT64_ELEMENTS) {
+ return GetTypedArrayAccessor<BigUint64Elements>();
+ } else if (elementsKind == ElementsKind::BIGINT64_ELEMENTS) {
+ return GetTypedArrayAccessor<BigInt64Elements>();
}
-
- macro GetStable(): JSTypedArray {
- return this.stable;
+ } else {
+ if (elementsKind == ElementsKind::UINT8_ELEMENTS) {
+ return GetTypedArrayAccessor<Uint8Elements>();
+ } else if (elementsKind == ElementsKind::INT8_ELEMENTS) {
+ return GetTypedArrayAccessor<Int8Elements>();
+ } else if (elementsKind == ElementsKind::UINT16_ELEMENTS) {
+ return GetTypedArrayAccessor<Uint16Elements>();
+ } else if (elementsKind == ElementsKind::INT16_ELEMENTS) {
+ return GetTypedArrayAccessor<Int16Elements>();
+ } else if (elementsKind == ElementsKind::UINT32_ELEMENTS) {
+ return GetTypedArrayAccessor<Uint32Elements>();
}
+ }
+ unreachable;
+}
- macro Recheck() labels Detached {
- if (IsDetachedBuffer(this.stable.buffer)) goto Detached;
- this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
- }
+extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
+ JSTypedArray, ByteArray, uintptr): void;
+extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
+ JSTypedArray, RawPtr, uintptr): void;
- macro Load(implicit context: Context)(k: uintptr): JSAny {
- const lf: LoadNumericFn = this.loadfn;
- return lf(context, this.unstable, k);
- }
+// AttachedJSTypedArray guards that the array's buffer is not detached.
+transient type AttachedJSTypedArray extends JSTypedArray;
- stable: JSTypedArray;
- unstable: AttachedJSTypedArray;
- loadfn: LoadNumericFn;
- }
+macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray
+ labels Detached {
+ if (IsDetachedBuffer(array.buffer)) goto Detached;
+ return %RawDownCast<AttachedJSTypedArray>(array);
+}
- macro NewAttachedJSTypedArrayWitness(array: AttachedJSTypedArray):
- AttachedJSTypedArrayWitness {
- const kind = array.elements_kind;
- const accessor: TypedArrayAccessor = GetTypedArrayAccessor(kind);
- return AttachedJSTypedArrayWitness{
- stable: array,
- unstable: array,
- loadfn: accessor.loadNumericFn
- };
+struct AttachedJSTypedArrayWitness {
+ macro Get(): AttachedJSTypedArray {
+ return this.unstable;
}
- macro KindForArrayType<T : type extends ElementsKind>():
- constexpr ElementsKind;
- KindForArrayType<Uint8Elements>(): constexpr ElementsKind {
- return ElementsKind::UINT8_ELEMENTS;
- }
- KindForArrayType<Int8Elements>(): constexpr ElementsKind {
- return ElementsKind::INT8_ELEMENTS;
- }
- KindForArrayType<Uint16Elements>(): constexpr ElementsKind {
- return ElementsKind::UINT16_ELEMENTS;
- }
- KindForArrayType<Int16Elements>(): constexpr ElementsKind {
- return ElementsKind::INT16_ELEMENTS;
- }
- KindForArrayType<Uint32Elements>(): constexpr ElementsKind {
- return ElementsKind::UINT32_ELEMENTS;
- }
- KindForArrayType<Int32Elements>(): constexpr ElementsKind {
- return ElementsKind::INT32_ELEMENTS;
- }
- KindForArrayType<Float32Elements>(): constexpr ElementsKind {
- return ElementsKind::FLOAT32_ELEMENTS;
- }
- KindForArrayType<Float64Elements>(): constexpr ElementsKind {
- return ElementsKind::FLOAT64_ELEMENTS;
- }
- KindForArrayType<Uint8ClampedElements>(): constexpr ElementsKind {
- return ElementsKind::UINT8_CLAMPED_ELEMENTS;
- }
- KindForArrayType<BigUint64Elements>(): constexpr ElementsKind {
- return ElementsKind::BIGUINT64_ELEMENTS;
- }
- KindForArrayType<BigInt64Elements>(): constexpr ElementsKind {
- return ElementsKind::BIGINT64_ELEMENTS;
+ macro GetStable(): JSTypedArray {
+ return this.stable;
}
- builtin LoadTypedElement<T : type extends ElementsKind>(
- _context: Context, array: JSTypedArray, index: uintptr): Numeric {
- return LoadFixedTypedArrayElementAsTagged(
- array.data_ptr, index, KindForArrayType<T>());
+ macro Recheck() labels Detached {
+ if (IsDetachedBuffer(this.stable.buffer)) goto Detached;
+ this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
}
- builtin StoreTypedElementNumeric<T : type extends ElementsKind>(
- context: Context, typedArray: JSTypedArray, index: uintptr,
- value: Numeric): Smi {
- StoreJSTypedArrayElementFromNumeric(
- context, typedArray, index, value, KindForArrayType<T>());
- return kStoreSucceded;
+ macro Load(implicit context: Context)(k: uintptr): JSAny {
+ const lf: LoadNumericFn = this.loadfn;
+ return lf(this.unstable, k);
}
- // Returns True on sucess or False if the typedArrays was detached.
- builtin StoreTypedElementJSAny<T : type extends ElementsKind>(
- context: Context, typedArray: JSTypedArray, index: uintptr,
- value: JSAny): Smi {
- try {
- StoreJSTypedArrayElementFromTagged(
- context, typedArray, index, value, KindForArrayType<T>())
- otherwise IfDetached;
- }
- label IfDetached {
- return kStoreFailureArrayDetached;
- }
- return kStoreSucceded;
+ stable: JSTypedArray;
+ unstable: AttachedJSTypedArray;
+ loadfn: LoadNumericFn;
+}
+
+macro NewAttachedJSTypedArrayWitness(array: AttachedJSTypedArray):
+ AttachedJSTypedArrayWitness {
+ const kind = array.elements_kind;
+ const accessor: TypedArrayAccessor = GetTypedArrayAccessor(kind);
+ return AttachedJSTypedArrayWitness{
+ stable: array,
+ unstable: array,
+ loadfn: accessor.loadNumericFn
+ };
+}
+
+macro KindForArrayType<T : type extends ElementsKind>(): constexpr ElementsKind;
+KindForArrayType<Uint8Elements>(): constexpr ElementsKind {
+ return ElementsKind::UINT8_ELEMENTS;
+}
+KindForArrayType<Int8Elements>(): constexpr ElementsKind {
+ return ElementsKind::INT8_ELEMENTS;
+}
+KindForArrayType<Uint16Elements>(): constexpr ElementsKind {
+ return ElementsKind::UINT16_ELEMENTS;
+}
+KindForArrayType<Int16Elements>(): constexpr ElementsKind {
+ return ElementsKind::INT16_ELEMENTS;
+}
+KindForArrayType<Uint32Elements>(): constexpr ElementsKind {
+ return ElementsKind::UINT32_ELEMENTS;
+}
+KindForArrayType<Int32Elements>(): constexpr ElementsKind {
+ return ElementsKind::INT32_ELEMENTS;
+}
+KindForArrayType<Float32Elements>(): constexpr ElementsKind {
+ return ElementsKind::FLOAT32_ELEMENTS;
+}
+KindForArrayType<Float64Elements>(): constexpr ElementsKind {
+ return ElementsKind::FLOAT64_ELEMENTS;
+}
+KindForArrayType<Uint8ClampedElements>(): constexpr ElementsKind {
+ return ElementsKind::UINT8_CLAMPED_ELEMENTS;
+}
+KindForArrayType<BigUint64Elements>(): constexpr ElementsKind {
+ return ElementsKind::BIGUINT64_ELEMENTS;
+}
+KindForArrayType<BigInt64Elements>(): constexpr ElementsKind {
+ return ElementsKind::BIGINT64_ELEMENTS;
+}
+
+builtin LoadTypedElement<T : type extends ElementsKind>(
+ array: JSTypedArray, index: uintptr): Numeric {
+ return LoadFixedTypedArrayElementAsTagged(
+ array.data_ptr, index, KindForArrayType<T>());
+}
+
+builtin StoreTypedElementNumeric<T : type extends ElementsKind>(
+ context: Context, typedArray: JSTypedArray, index: uintptr,
+ value: Numeric): Smi {
+ StoreJSTypedArrayElementFromNumeric(
+ context, typedArray, index, value, KindForArrayType<T>());
+ return kStoreSucceded;
+}
+
+// Returns True on sucess or False if the typedArrays was detached.
+builtin StoreTypedElementJSAny<T : type extends ElementsKind>(
+ context: Context, typedArray: JSTypedArray, index: uintptr,
+ value: JSAny): Smi {
+ try {
+ StoreJSTypedArrayElementFromTagged(
+ context, typedArray, index, value, KindForArrayType<T>())
+ otherwise IfDetached;
+ } label IfDetached {
+ return kStoreFailureArrayDetached;
}
+ return kStoreSucceded;
+}
}
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
new file mode 100644
index 0000000000..097e39d430
--- /dev/null
+++ b/deps/v8/src/builtins/wasm.tq
@@ -0,0 +1,273 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-wasm-gen.h'
+
+namespace runtime {
+extern runtime WasmMemoryGrow(Context, WasmInstanceObject, Smi): Smi;
+extern runtime WasmRefFunc(Context, WasmInstanceObject, Smi): JSAny;
+extern runtime WasmFunctionTableGet(
+ Context, WasmInstanceObject, Smi, Smi): JSAny;
+extern runtime WasmFunctionTableSet(
+ Context, WasmInstanceObject, Smi, Smi, Object): JSAny;
+extern runtime ThrowWasmError(Context, Smi): JSAny;
+extern runtime Throw(Context, Object): JSAny;
+extern runtime ReThrow(Context, Object): JSAny;
+extern runtime WasmStackGuard(Context): JSAny;
+extern runtime ThrowWasmStackOverflow(Context): JSAny;
+extern runtime WasmTraceMemory(Context, Smi): JSAny;
+}
+
+namespace wasm {
+const kFuncTableType:
+ constexpr int31 generates 'wasm::ValueType::Kind::kFuncRef';
+
+extern macro WasmBuiltinsAssembler::LoadInstanceFromFrame(): WasmInstanceObject;
+
+// WasmInstanceObject has a field layout that Torque can't handle yet.
+// TODO(bbudge) Eliminate these functions when Torque is ready.
+extern macro WasmBuiltinsAssembler::LoadContextFromInstance(WasmInstanceObject):
+ NativeContext;
+extern macro WasmBuiltinsAssembler::LoadTablesFromInstance(WasmInstanceObject):
+ FixedArray;
+extern macro WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance(
+ WasmInstanceObject): FixedArray;
+
+macro LoadContextFromFrame(): NativeContext {
+ return LoadContextFromInstance(LoadInstanceFromFrame());
+}
+
+builtin WasmInt32ToHeapNumber(val: int32): HeapNumber {
+ return AllocateHeapNumberWithValue(Convert<float64>(val));
+}
+
+builtin WasmTaggedNonSmiToInt32(implicit context: Context)(val: JSAnyNotSmi):
+ int32 {
+ return ChangeTaggedNonSmiToInt32(val);
+}
+
+builtin WasmTaggedToFloat64(implicit context: Context)(val: JSAny): float64 {
+ return ChangeTaggedToFloat64(val);
+}
+
+builtin WasmMemoryGrow(numPages: int32): int32 {
+ if (!IsValidPositiveSmi(ChangeInt32ToIntPtr(numPages)))
+ return Int32Constant(-1);
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const context: NativeContext = LoadContextFromInstance(instance);
+ const result: Smi =
+ runtime::WasmMemoryGrow(context, instance, SmiFromInt32(numPages));
+ return SmiToInt32(result);
+}
+
+builtin WasmTableGet(tableIndex: intptr, index: int32): Object {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const entryIndex: intptr = ChangeInt32ToIntPtr(index);
+ try {
+ assert(IsValidPositiveSmi(tableIndex));
+ if (!IsValidPositiveSmi(entryIndex)) goto IndexOutOfRange;
+
+ const tables: FixedArray = LoadTablesFromInstance(instance);
+ const table: WasmTableObject = %RawDownCast<WasmTableObject>(
+ LoadFixedArrayElement(tables, tableIndex));
+ const entriesCount: intptr = Convert<intptr, Smi>(table.current_length);
+ if (entryIndex >= entriesCount) goto IndexOutOfRange;
+
+ const entries: FixedArray = table.entries;
+ const entry: Object = LoadFixedArrayElement(entries, entryIndex);
+
+ try {
+ const entryObject: HeapObject =
+ TaggedToHeapObject<HeapObject>(entry) otherwise ReturnEntry;
+ if (IsTuple2Map(entryObject.map)) goto CallRuntime;
+ goto ReturnEntry;
+ } label ReturnEntry {
+ return entry;
+ }
+ } label CallRuntime deferred {
+ tail runtime::WasmFunctionTableGet(
+ LoadContextFromInstance(instance), instance, SmiFromIntPtr(tableIndex),
+ SmiFromIntPtr(entryIndex));
+ } label IndexOutOfRange deferred {
+ tail ThrowWasmTrapTableOutOfBounds();
+ }
+}
+
+builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const entryIndex: intptr = ChangeInt32ToIntPtr(index);
+ try {
+ assert(IsValidPositiveSmi(tableIndex));
+ if (!IsValidPositiveSmi(entryIndex)) goto IndexOutOfRange;
+
+ const tables: FixedArray = LoadTablesFromInstance(instance);
+ const table: WasmTableObject = %RawDownCast<WasmTableObject>(
+ LoadFixedArrayElement(tables, tableIndex));
+
+ // Fall back to the runtime to set funcrefs, since we have to update
+ // function dispatch tables.
+ const tableType: Smi = table.raw_type;
+ if (tableType == SmiConstant(kFuncTableType)) goto CallRuntime;
+
+ const entriesCount: intptr = Convert<intptr, Smi>(table.current_length);
+ if (entryIndex >= entriesCount) goto IndexOutOfRange;
+
+ const entries: FixedArray = table.entries;
+ StoreFixedArrayElement(entries, entryIndex, value);
+ return Undefined;
+ } label CallRuntime deferred {
+ tail runtime::WasmFunctionTableSet(
+ LoadContextFromInstance(instance), instance, SmiFromIntPtr(tableIndex),
+ SmiFromIntPtr(entryIndex), value);
+ } label IndexOutOfRange deferred {
+ tail ThrowWasmTrapTableOutOfBounds();
+ }
+}
+
+builtin WasmRefFunc(index: uint32): Object {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ try {
+ const table: FixedArray = LoadExternalFunctionsFromInstance(instance);
+ if (table == Undefined) goto CallRuntime;
+ const functionIndex: intptr = Signed(ChangeUint32ToWord(index));
+ const result: Object = LoadFixedArrayElement(table, functionIndex);
+ if (result == Undefined) goto CallRuntime;
+ return result;
+ } label CallRuntime deferred {
+ tail runtime::WasmRefFunc(
+ LoadContextFromInstance(instance), instance, SmiFromUint32(index));
+ }
+}
+
+builtin WasmThrow(exception: Object): JSAny {
+ tail runtime::Throw(LoadContextFromFrame(), exception);
+}
+
+builtin WasmRethrow(exception: Object): JSAny {
+ if (exception == Null) tail ThrowWasmTrapRethrowNullRef();
+ tail runtime::ReThrow(LoadContextFromFrame(), exception);
+}
+
+builtin WasmStackGuard(): JSAny {
+ tail runtime::WasmStackGuard(LoadContextFromFrame());
+}
+
+builtin WasmStackOverflow(): JSAny {
+ tail runtime::ThrowWasmStackOverflow(LoadContextFromFrame());
+}
+
+builtin WasmTraceMemory(info: Smi): JSAny {
+ tail runtime::WasmTraceMemory(LoadContextFromFrame(), info);
+}
+
+builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
+ const map: Map = GetFastPackedElementsJSArrayMap();
+ return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
+}
+
+extern macro TryHasOwnProperty(HeapObject, Map, InstanceType, Name): never
+ labels Found, NotFound, Bailout;
+type OnNonExistent constexpr 'OnNonExistent';
+const kReturnUndefined: constexpr OnNonExistent
+ generates 'OnNonExistent::kReturnUndefined';
+extern macro SmiConstant(constexpr OnNonExistent): Smi;
+extern transitioning builtin GetPropertyWithReceiver(implicit context: Context)(
+ JSAny, Name, JSAny, Smi): JSAny;
+
+transitioning builtin WasmGetOwnProperty(implicit context: Context)(
+ object: Object, uniqueName: Name): JSAny {
+ try {
+ const heapObject: HeapObject =
+ TaggedToHeapObject(object) otherwise NotFound;
+ const receiver: JSReceiver =
+ Cast<JSReceiver>(heapObject) otherwise NotFound;
+ try {
+ TryHasOwnProperty(
+ receiver, receiver.map, receiver.instanceType, uniqueName)
+ otherwise Found, NotFound, Bailout;
+ } label Found {
+ tail GetPropertyWithReceiver(
+ receiver, uniqueName, receiver, SmiConstant(kReturnUndefined));
+ }
+ } label NotFound deferred {
+ return Undefined;
+ } label Bailout deferred {
+ unreachable;
+ }
+}
+
+// Trap builtins.
+
+builtin WasmTrap(error: Smi): JSAny {
+ tail runtime::ThrowWasmError(LoadContextFromFrame(), error);
+}
+
+builtin ThrowWasmTrapUnreachable(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapUnreachable));
+}
+
+builtin ThrowWasmTrapMemOutOfBounds(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapMemOutOfBounds));
+}
+
+builtin ThrowWasmTrapUnalignedAccess(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapUnalignedAccess));
+}
+
+builtin ThrowWasmTrapDivByZero(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapDivByZero));
+}
+
+builtin ThrowWasmTrapDivUnrepresentable(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapDivUnrepresentable));
+}
+
+builtin ThrowWasmTrapRemByZero(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapRemByZero));
+}
+
+builtin ThrowWasmTrapFloatUnrepresentable(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFloatUnrepresentable));
+}
+
+builtin ThrowWasmTrapFuncInvalid(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncInvalid));
+}
+
+builtin ThrowWasmTrapFuncSigMismatch(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncSigMismatch));
+}
+
+builtin ThrowWasmTrapDataSegmentDropped(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapDataSegmentDropped));
+}
+
+builtin ThrowWasmTrapElemSegmentDropped(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapElemSegmentDropped));
+}
+
+builtin ThrowWasmTrapTableOutOfBounds(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapTableOutOfBounds));
+}
+
+builtin ThrowWasmTrapBrOnExnNullRef(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapBrOnExnNullRef));
+}
+
+builtin ThrowWasmTrapRethrowNullRef(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapRethrowNullRef));
+}
+
+builtin ThrowWasmTrapNullDereference(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapNullDereference));
+}
+
+builtin ThrowWasmTrapIllegalCast(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapIllegalCast));
+}
+
+builtin ThrowWasmTrapArrayOutOfBounds(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayOutOfBounds));
+}
+}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 8d028c88f0..bfabe26292 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -1769,7 +1769,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// static
-// TODO(victor): merge steps 1, 2 and 3 when V8_REVERSE_JSARGS is set.
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
@@ -1781,19 +1780,40 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// NOTE: The order of args are reversed if V8_REVERSE_JSARGS
// rax contains the number of arguments, n, not counting the receiver.
- // 1. Make sure we have at least one argument.
+#ifdef V8_REVERSE_JSARGS
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ {
+ StackArgumentsAccessor args(rax);
+ __ movq(rdi, args.GetReceiverOperand());
+ }
+
+ // 2. Save the return address and drop the callable.
+ __ PopReturnAddressTo(rbx);
+ __ Pop(kScratchRegister);
+
+ // 3. Make sure we have at least one argument.
{
Label done;
__ testq(rax, rax);
__ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(rbx);
-#ifdef V8_REVERSE_JSARGS
- __ Pop(kScratchRegister); // Pop the receiver.
__ PushRoot(RootIndex::kUndefinedValue);
- __ Push(kScratchRegister);
+ __ incq(rax);
+ __ bind(&done);
+ }
+
+ // 4. Push back the return address one slot down on the stack (overwriting the
+ // original callable), making the original first argument the new receiver.
+ __ PushReturnAddressFrom(rbx);
+ __ decq(rax); // One fewer argument (first argument is new receiver).
+
#else
+ // 1. Make sure we have at least one argument.
+ {
+ Label done;
+ __ testq(rax, rax);
+ __ j(not_zero, &done, Label::kNear);
+ __ PopReturnAddressTo(rbx);
__ PushRoot(RootIndex::kUndefinedValue);
-#endif
__ PushReturnAddressFrom(rbx);
__ incq(rax);
__ bind(&done);
@@ -1805,14 +1825,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ movq(rdi, args.GetReceiverOperand());
}
-#ifdef V8_REVERSE_JSARGS
- // 3. Shift return address one slot down on the stack (overwriting the
- // original receiver), making the original first argument the new receiver.
- {
- __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-#else
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
@@ -2205,10 +2217,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label copy, check;
Register src = r8, dest = rsp, num = r9, current = r11;
__ movq(src, rsp);
- __ movq(kScratchRegister, rcx);
- __ negq(kScratchRegister);
- __ leaq(rsp, Operand(rsp, kScratchRegister, times_system_pointer_size,
- 0)); // Update stack pointer.
+ __ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0));
+ __ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
__ Set(current, 0);
@@ -3500,8 +3510,9 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
DCHECK(api_function_address != name_arg);
__ LoadTaggedPointerField(
scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
- __ movq(api_function_address,
- FieldOperand(scratch, Foreign::kForeignAddressOffset));
+ __ LoadExternalPointerField(
+ api_function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
diff --git a/deps/v8/src/codegen/arm/constants-arm.h b/deps/v8/src/codegen/arm/constants-arm.h
index 171de9e1d7..a7726a8f25 100644
--- a/deps/v8/src/codegen/arm/constants-arm.h
+++ b/deps/v8/src/codegen/arm/constants-arm.h
@@ -459,23 +459,6 @@ class Instruction {
return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
}
- // Static support.
-
- // Extract a single bit from the instruction bits and return it as bit 0 in
- // the result.
- static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
-
- // Extract a bit field <hi:lo> from the instruction bits and return it in the
- // least-significant bits of the result.
- static inline int Bits(Instr instr, int hi, int lo) {
- return (instr >> lo) & ((2 << (hi - lo)) - 1);
- }
-
- // Read a bit field <hi:lo>, leaving its position unchanged in the result.
- static inline int BitField(Instr instr, int hi, int lo) {
- return instr & (((2 << (hi - lo)) - 1) << lo);
- }
-
// Accessors for the different named fields used in the ARM encoding.
// The naming of these accessor corresponds to figure A3-1.
//
diff --git a/deps/v8/src/codegen/arm/cpu-arm.cc b/deps/v8/src/codegen/arm/cpu-arm.cc
index 9113de705d..47fe4bdb74 100644
--- a/deps/v8/src/codegen/arm/cpu-arm.cc
+++ b/deps/v8/src/codegen/arm/cpu-arm.cc
@@ -37,18 +37,6 @@ V8_NOINLINE void CpuFeatures::FlushICache(void* start, size_t size) {
register uint32_t end asm("r1") = beg + size;
register uint32_t flg asm("r2") = 0;
-#ifdef __clang__
- // This variant of the asm avoids a constant pool entry, which can be
- // problematic when LTO'ing. It is also slightly shorter.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
-
- asm volatile("svc 0\n"
- :
- : "r"(beg), "r"(end), "r"(flg), "r"(scno)
- : "memory");
-#else
- // Use a different variant of the asm with GCC because some versions doesn't
- // support r7 as an asm input.
asm volatile(
// This assembly works for both ARM and Thumb targets.
@@ -66,7 +54,6 @@ V8_NOINLINE void CpuFeatures::FlushICache(void* start, size_t size) {
: "r"(beg), "r"(end), "r"(flg), [scno] "i"(__ARM_NR_cacheflush)
: "memory");
#endif
-#endif
#endif // !USE_SIMULATOR
}
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 575fd27805..5a4e08dc77 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -283,6 +283,30 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 349c8dc29e..7e5fa8cef1 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -17,7 +17,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/numbers/double.h"
@@ -427,6 +427,35 @@ void TurboAssembler::Push(Smi smi) {
push(scratch);
}
+void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+ PushArrayOrder order) {
+ UseScratchRegisterScope temps(this);
+ Register counter = scratch;
+ Register tmp = temps.Acquire();
+ DCHECK(!AreAliased(array, size, counter, tmp));
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ mov(counter, Operand(0));
+ b(&entry);
+ bind(&loop);
+ ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
+ push(tmp);
+ add(counter, counter, Operand(1));
+ bind(&entry);
+ cmp(counter, size);
+ b(lt, &loop);
+ } else {
+ mov(counter, size);
+ b(&entry);
+ bind(&loop);
+ ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
+ push(tmp);
+ bind(&entry);
+ sub(counter, counter, Operand(1), SetCC);
+ b(ge, &loop);
+ }
+}
+
void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
@@ -1556,7 +1585,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
// Load receiver to pass it later to DebugOnFunctionCall hook.
- ldr(r4, MemOperand(sp, actual_parameter_count, LSL, kPointerSizeLog2));
+ ldr(r4, ReceiverOperand(actual_parameter_count));
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 9ec1bafb58..a7dc5498b8 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -156,6 +156,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
}
+ enum class PushArrayOrder { kNormal, kReverse };
+ // `array` points to the first element (the lowest address).
+ // `array` and `size` are not modified.
+ void PushArray(Register array, Register size, Register scratch,
+ PushArrayOrder order = PushArrayOrder::kNormal);
+
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
@@ -720,6 +726,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
+ // It assumes that the arguments are located below the stack pointer.
+ // argc is the number of arguments not including the receiver.
+ // TODO(victorgomes): Remove this function once we stick with the reversed
+ // arguments order.
+ MemOperand ReceiverOperand(Register argc) {
+#ifdef V8_REVERSE_JSARGS
+ return MemOperand(sp, 0);
+#else
+ return MemOperand(sp, argc, LSL, kSystemPointerSizeLog2);
+#endif
+ }
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 0d453ec03e..77ae14f98c 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -351,6 +351,8 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = r4;
constexpr Register cp = r7; // JavaScript context pointer.
constexpr Register kRootRegister = r10; // Roots array pointer.
+constexpr DoubleRegister kFPReturnRegister0 = d0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index f3c3e55975..c47f8f1aa5 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -660,6 +660,7 @@ Address RelocInfo::constant_pool_entry_address() {
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
+ CHECK(!host_.is_null());
return HeapObject::cast(Object(DecompressTaggedAny(
host_.address(),
Assembler::target_compressed_address_at(pc_, constant_pool_))));
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index d5a0295934..97a57d6f3c 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -140,7 +140,9 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() {
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
- return instr->IsLdrLiteralX();
+ DCHECK_IMPLIES(instr->IsLdrLiteralW(), COMPRESS_POINTERS_BOOL);
+ return instr->IsLdrLiteralX() ||
+ (COMPRESS_POINTERS_BOOL && instr->IsLdrLiteralW());
}
uint32_t RelocInfo::wasm_call_tag() const {
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index c2224ffe34..7d986f286d 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -343,6 +343,10 @@ void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
formats_[0] = format0;
formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
+ // Support four parameters form (e.i. ld4r)
+ // to avoid using positional arguments in DisassemblingDecoder.
+ // See: https://crbug.com/v8/10365
+ formats_[3] = formats_[2];
}
void NEONFormatDecoder::SetFormatMap(unsigned index,
@@ -353,15 +357,18 @@ void NEONFormatDecoder::SetFormatMap(unsigned index,
}
const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
- return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
+ return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder,
+ kPlaceholder);
}
const char* NEONFormatDecoder::Substitute(const char* string,
SubstitutionMode mode0,
SubstitutionMode mode1,
- SubstitutionMode mode2) {
+ SubstitutionMode mode2,
+ SubstitutionMode mode3) {
snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
- GetSubstitute(1, mode1), GetSubstitute(2, mode2));
+ GetSubstitute(1, mode1), GetSubstitute(2, mode2),
+ GetSubstitute(3, mode3));
return form_buffer_;
}
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index d2341b972f..c115fb6924 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -619,7 +619,8 @@ class NEONFormatDecoder {
// substitution mode.
const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
SubstitutionMode mode1 = kFormat,
- SubstitutionMode mode2 = kFormat);
+ SubstitutionMode mode2 = kFormat,
+ SubstitutionMode mode3 = kFormat);
// Append a "2" to a mnemonic string based of the state of the Q bit.
const char* Mnemonic(const char* mnemonic);
@@ -738,7 +739,7 @@ class NEONFormatDecoder {
uint8_t PickBits(const uint8_t bits[]);
Instr instrbits_;
- const NEONFormatMap* formats_[3];
+ const NEONFormatMap* formats_[4];
char form_buffer_[64];
char mne_buffer_[16];
};
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index 2d86ace4bc..9f05922444 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -287,6 +287,30 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 809838bcf9..93b8136d9a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -713,7 +713,7 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
- Mov(tmp, bit_cast<uint32_t>(imm));
+ Mov(tmp, bits);
Fmov(vd, tmp);
}
} else {
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 1273904c9c..c157df2996 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -16,7 +16,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
@@ -1306,7 +1306,14 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
static_assert(kSystemPointerSize == kDRegSize,
"pointers must be the same size as doubles");
- int direction = (mode == kDstLessThanSrc) ? 1 : -1;
+ if (mode == kDstLessThanSrcAndReverse) {
+ Add(src, src, Operand(count, LSL, kSystemPointerSizeLog2));
+ Sub(src, src, kSystemPointerSize);
+ }
+
+ int src_direction = (mode == kDstLessThanSrc) ? 1 : -1;
+ int dst_direction = (mode == kSrcLessThanDst) ? -1 : 1;
+
UseScratchRegisterScope scope(this);
VRegister temp0 = scope.AcquireD();
VRegister temp1 = scope.AcquireD();
@@ -1314,23 +1321,30 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
Label pairs, loop, done;
Tbz(count, 0, &pairs);
- Ldr(temp0, MemOperand(src, direction * kSystemPointerSize, PostIndex));
+ Ldr(temp0, MemOperand(src, src_direction * kSystemPointerSize, PostIndex));
Sub(count, count, 1);
- Str(temp0, MemOperand(dst, direction * kSystemPointerSize, PostIndex));
+ Str(temp0, MemOperand(dst, dst_direction * kSystemPointerSize, PostIndex));
Bind(&pairs);
if (mode == kSrcLessThanDst) {
// Adjust pointers for post-index ldp/stp with negative offset:
Sub(dst, dst, kSystemPointerSize);
Sub(src, src, kSystemPointerSize);
+ } else if (mode == kDstLessThanSrcAndReverse) {
+ Sub(src, src, kSystemPointerSize);
}
Bind(&loop);
Cbz(count, &done);
Ldp(temp0, temp1,
- MemOperand(src, 2 * direction * kSystemPointerSize, PostIndex));
+ MemOperand(src, 2 * src_direction * kSystemPointerSize, PostIndex));
Sub(count, count, 2);
- Stp(temp0, temp1,
- MemOperand(dst, 2 * direction * kSystemPointerSize, PostIndex));
+ if (mode == kDstLessThanSrcAndReverse) {
+ Stp(temp1, temp0,
+ MemOperand(dst, 2 * dst_direction * kSystemPointerSize, PostIndex));
+ } else {
+ Stp(temp0, temp1,
+ MemOperand(dst, 2 * dst_direction * kSystemPointerSize, PostIndex));
+ }
B(&loop);
// TODO(all): large copies may benefit from using temporary Q registers
@@ -2093,7 +2107,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
// Load receiver to pass it later to DebugOnFunctionCall hook.
- Ldr(x4, MemOperand(sp, actual_parameter_count, LSL, kSystemPointerSizeLog2));
+ Peek(x4, ReceiverOperand(actual_parameter_count));
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (!new_target.is_valid()) new_target = padreg;
@@ -2165,6 +2179,14 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Bind(&done);
}
+Operand MacroAssembler::ReceiverOperand(Register arg_count) {
+#ifdef V8_REVERSE_JSARGS
+ return Operand(0);
+#else
+ return Operand(arg_count, LSL, kXRegSizeLog2);
+#endif
+}
+
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
InvokeFlag flag) {
@@ -2297,7 +2319,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// sp[2] : fp
// sp[1] : type
// sp[0] : for alignment
- } else if (type == StackFrame::WASM_COMPILED ||
+ } else if (type == StackFrame::WASM ||
type == StackFrame::WASM_COMPILE_LAZY ||
type == StackFrame::WASM_EXIT) {
Register type_reg = temps.AcquireX();
@@ -2966,7 +2988,9 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
// Copies of the printf vararg registers that we can pop from.
CPURegList pcs_varargs = kPCSVarargs;
+#ifndef V8_OS_WIN
CPURegList pcs_varargs_fp = kPCSVarargsFP;
+#endif
// Place the arguments. There are lots of clever tricks and optimizations we
// could use here, but Printf is a debug tool so instead we just try to keep
@@ -2981,7 +3005,14 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
} else if (args[i].IsVRegister()) {
// In C, floats are always cast to doubles for varargs calls.
+#ifdef V8_OS_WIN
+ // In case of variadic functions SIMD and Floating-point registers
+ // aren't used. The general x0-x7 should be used instead.
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
+ pcs[i] = pcs_varargs.PopLowestIndex().X();
+#else
pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
+#endif
} else {
DCHECK(args[i].IsNone());
arg_count = i;
@@ -3012,6 +3043,22 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
// Do a second pass to move values into their final positions and perform any
// conversions that may be required.
for (int i = 0; i < arg_count; i++) {
+#ifdef V8_OS_WIN
+ if (args[i].IsVRegister()) {
+ if (pcs[i].SizeInBytes() != args[i].SizeInBytes()) {
+ // If the argument is half- or single-precision
+ // converts to double-precision before that is
+ // moved into the one of X scratch register.
+ VRegister temp0 = temps.AcquireD();
+ Fcvt(temp0.VReg(), args[i].VReg());
+ Fmov(pcs[i].Reg(), temp0);
+ } else {
+ Fmov(pcs[i].Reg(), args[i].VReg());
+ }
+ } else {
+ Mov(pcs[i].Reg(), args[i].Reg(), kDiscardForSameWReg);
+ }
+#else
DCHECK(pcs[i].type() == args[i].type());
if (pcs[i].IsRegister()) {
Mov(pcs[i].Reg(), args[i].Reg(), kDiscardForSameWReg);
@@ -3023,6 +3070,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
Fcvt(pcs[i].VReg(), args[i].VReg());
}
}
+#endif
}
// Load the format string into x0, as per the procedure-call standard.
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 7b1fb69e95..109e73c3c2 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -703,7 +703,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CopySlots(Register dst, Register src, Register slot_count);
// Copy count double words from the address in register src to the address
- // in register dst. There are two modes for this function:
+ // in register dst. There are three modes for this function:
// 1) Address dst must be less than src, or the gap between them must be
// greater than or equal to count double words, otherwise the result is
// unpredictable. This is the default mode.
@@ -711,10 +711,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// greater than or equal to count double words, otherwise the result is
// undpredictable. In this mode, src and dst specify the last (highest)
// address of the regions to copy from and to.
+ // 3) The same as mode 1, but the words are copied in the reversed order.
// The case where src == dst is not supported.
// The function may corrupt its register arguments. The registers must not
// alias each other.
- enum CopyDoubleWordsMode { kDstLessThanSrc, kSrcLessThanDst };
+ enum CopyDoubleWordsMode {
+ kDstLessThanSrc,
+ kSrcLessThanDst,
+ kDstLessThanSrcAndReverse
+ };
void CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode = kDstLessThanSrc);
@@ -1762,6 +1767,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
+ // TODO(victorgomes): inline this function once we remove V8_REVERSE_JSARGS
+ // flag.
+ Operand ReceiverOperand(const Register arg_count);
+
// ---- SMI and Number Utilities ----
inline void SmiTag(Register dst, Register src);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 9571aa5ab5..c98b0f6162 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -470,8 +470,9 @@ ALIAS_REGISTER(Register, padreg, x31);
// Keeps the 0 double value.
ALIAS_REGISTER(VRegister, fp_zero, d15);
// MacroAssembler fixed V Registers.
-ALIAS_REGISTER(VRegister, fp_fixed1, d28);
-ALIAS_REGISTER(VRegister, fp_fixed2, d29);
+// d29 is not part of ALLOCATABLE_DOUBLE_REGISTERS, so use 27 and 28.
+ALIAS_REGISTER(VRegister, fp_fixed1, d27);
+ALIAS_REGISTER(VRegister, fp_fixed2, d28);
// MacroAssembler scratch V registers.
ALIAS_REGISTER(VRegister, fp_scratch, d30);
@@ -522,8 +523,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -696,6 +695,8 @@ constexpr Register kRuntimeCallArgvRegister = x11;
constexpr Register kWasmInstanceRegister = x7;
constexpr Register kWasmCompileLazyFuncIndexRegister = x8;
+constexpr DoubleRegister kFPReturnRegister0 = d0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 4bda1260a9..3b27bf5db9 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -41,7 +41,6 @@
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // For MemoryAllocator. TODO(jkummerow): Drop.
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/ostreams.h"
#include "src/utils/vector.h"
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 0464faea3b..901ce0c7b4 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -12,13 +12,15 @@
#include "src/execution/frames-inl.h"
#include "src/execution/frames.h"
#include "src/execution/protectors.h"
-#include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop.
+#include "src/heap/heap-inl.h" // For MemoryChunk. TODO(jkummerow): Drop.
+#include "src/heap/memory-chunk.h"
#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
+#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -144,164 +146,6 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
return value;
}
-void CodeStubAssembler::CollectCallableFeedback(
- TNode<Object> maybe_target, TNode<Context> context,
- TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id,
- CallableFeedbackMode mode) {
- Label extra_checks(this, Label::kDeferred), done(this);
-
- // Check if we have monomorphic {target} feedback already.
- TNode<MaybeObject> feedback =
- LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Comment("check if monomorphic");
- TNode<BoolT> is_monomorphic = IsWeakReferenceToObject(feedback, maybe_target);
- GotoIf(is_monomorphic, &done);
-
- // Check if it is a megamorphic {target}.
- Comment("check if megamorphic");
- TNode<BoolT> is_megamorphic = TaggedEqual(
- feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
- Branch(is_megamorphic, &done, &extra_checks);
-
- BIND(&extra_checks);
- {
- Label initialize(this), mark_megamorphic(this);
-
- Comment("check if weak reference");
- TNode<BoolT> is_uninitialized = TaggedEqual(
- feedback,
- HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
- GotoIf(is_uninitialized, &initialize);
- CSA_ASSERT(this, IsWeakOrCleared(feedback));
-
- // If the weak reference is cleared, we have a new chance to become
- // monomorphic.
- Comment("check if weak reference is cleared");
- GotoIf(IsCleared(feedback), &initialize);
- GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic);
-
- if (mode == CallableFeedbackMode::kDontCollectFeedbackCell) {
- Goto(&mark_megamorphic);
- } else {
- Label try_transition_to_feedback_cell(this);
-
- // Check if {target} is a JSFunction.
- Comment("check if target is a JSFunction");
- TNode<HeapObject> target = CAST(maybe_target);
- GotoIfNot(IsJSFunction(target), &mark_megamorphic);
-
- // Check if {target}s feedback vector cell matches the {feedback_value}.
- TNode<HeapObject> feedback_value = GetHeapObjectAssumeWeak(feedback);
- TNode<Object> target_feedback_cell =
- LoadObjectField(target, JSFunction::kFeedbackCellOffset);
- Branch(TaggedEqual(feedback_value, target_feedback_cell), &done,
- &try_transition_to_feedback_cell);
-
- BIND(&try_transition_to_feedback_cell);
- {
- // Check if {target} and {feedback_value} are both JSFunctions with
- // the same feedback vector cell, and that those functions were
- // actually compiled already.
- GotoIfNot(IsJSFunction(feedback_value), &mark_megamorphic);
- TNode<HeapObject> feedback_cell = CAST(
- LoadObjectField(feedback_value, JSFunction::kFeedbackCellOffset));
- GotoIfNot(TaggedEqual(feedback_cell, target_feedback_cell),
- &mark_megamorphic);
- GotoIfNot(IsFeedbackCell(feedback_cell), &mark_megamorphic);
-
- // Record the feedback vector cell.
- Comment("transition to polymorphic");
- StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
- feedback_cell);
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Call:FeedbackVectorCell");
- Goto(&done);
- }
- }
-
- BIND(&initialize);
- {
- Comment("check if function in same native context");
- GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic);
- TNode<HeapObject> target = CAST(maybe_target);
- // Check if the {target} is a JSFunction or JSBoundFunction
- // in the current native context.
- TVARIABLE(HeapObject, var_current, target);
- Label loop(this, &var_current), done_loop(this);
- Goto(&loop);
- BIND(&loop);
- {
- Label if_boundfunction(this), if_function(this);
- TNode<HeapObject> current = var_current.value();
- TNode<Uint16T> current_instance_type = LoadInstanceType(current);
- GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
- &if_boundfunction);
- Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
- &if_function, &mark_megamorphic);
-
- BIND(&if_function);
- {
- // Check that the JSFunction {current} is in the current native
- // context.
- TNode<Context> current_context =
- CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<NativeContext> current_native_context =
- LoadNativeContext(current_context);
- Branch(
- TaggedEqual(LoadNativeContext(context), current_native_context),
- &done_loop, &mark_megamorphic);
- }
- BIND(&if_boundfunction);
- {
- // Continue with the [[BoundTargetFunction]] of {target}.
- var_current = LoadObjectField<HeapObject>(
- current, JSBoundFunction::kBoundTargetFunctionOffset);
- Goto(&loop);
- }
- }
- BIND(&done_loop);
- StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, target);
- ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
- Goto(&done);
- }
-
- BIND(&mark_megamorphic);
- {
- // MegamorphicSentinel is an immortal immovable object so
- // write-barrier is not needed.
- Comment("transition to megamorphic");
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
- StoreFeedbackVectorSlot(
- feedback_vector, slot_id,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
- SKIP_WRITE_BARRIER);
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Call:TransitionMegamorphic");
- Goto(&done);
- }
- }
-
- BIND(&done);
-}
-
-void CodeStubAssembler::CollectCallFeedback(
- TNode<Object> maybe_target, TNode<Context> context,
- TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot_id) {
- Label feedback_done(this);
- // If feedback_vector is not valid, then nothing to do.
- GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
-
- // Increment the call count.
- TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
- IncrementCallCount(feedback_vector, slot_id);
-
- // Collect the callable {target} feedback.
- CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id,
- CallableFeedbackMode::kCollectFeedbackCell);
- Goto(&feedback_done);
-
- BIND(&feedback_done);
-}
void CodeStubAssembler::IncrementCallCount(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
@@ -373,7 +217,7 @@ TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
SmiConstant(false_value));
}
-TNode<Object> CodeStubAssembler::NoContextConstant() {
+TNode<Smi> CodeStubAssembler::NoContextConstant() {
return SmiConstant(Context::kNoContext);
}
@@ -762,8 +606,8 @@ TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
TNode<TaggedIndex> value) {
- return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
- IntPtrConstant(kSmiTagSize)));
+ return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value),
+ IntPtrConstant(kSmiTagSize)));
}
TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex(
@@ -858,16 +702,17 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, &constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
+ TNode<IntPtrT> raw_bits = BitcastTaggedToWordForTagAndSmiBits(value);
if (COMPRESS_POINTERS_BOOL) {
- return ChangeInt32ToIntPtr(SmiToInt32(value));
+ // Clear the upper half using sign-extension.
+ raw_bits = ChangeInt32ToIntPtr(TruncateIntPtrToInt32(raw_bits));
}
- return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
- SmiShiftBitsConstant()));
+ return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
if (COMPRESS_POINTERS_BOOL) {
- return Signed(Word32Sar(
+ return Signed(Word32SarShiftOutZeros(
TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)),
SmiShiftBitsConstant32()));
}
@@ -2226,9 +2071,10 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
TNode<JSTypedArray> typed_array) {
- // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer).
- TNode<RawPtrT> external_pointer = LoadObjectField<RawPtrT>(
- typed_array, JSTypedArray::kExternalPointerOffset);
+ // Data pointer = DecodeExternalPointer(external_pointer) +
+ // static_cast<Tagged_t>(base_pointer).
+ TNode<RawPtrT> external_pointer =
+ DecodeExternalPointer(LoadJSTypedArrayExternalPointer(typed_array));
TNode<IntPtrT> base_pointer;
if (COMPRESS_POINTERS_BOOL) {
@@ -3687,6 +3533,17 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
kTaggedSize, IndexAdvanceMode::kPost);
}
+void CodeStubAssembler::MakeFixedArrayCOW(TNode<FixedArray> array) {
+ CSA_ASSERT(this, IsFixedArrayMap(LoadMap(array)));
+ Label done(this);
+ // The empty fixed array is not modifiable anyway. And we shouldn't change its
+ // Map.
+ GotoIf(TaggedEqual(array, EmptyFixedArrayConstant()), &done);
+ StoreMap(array, FixedCOWArrayMapConstant());
+ Goto(&done);
+ BIND(&done);
+}
+
TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<IntPtrT> capacity) {
return UintPtrLessThanOrEqual(capacity,
@@ -4900,6 +4757,12 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
+TNode<JSAggregateError> CodeStubAssembler::HeapObjectToJSAggregateError(
+ TNode<HeapObject> heap_object, Label* fail) {
+ GotoIfNot(IsJSAggregateError(heap_object), fail);
+ return UncheckedCast<JSAggregateError>(heap_object);
+}
+
TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
TNode<HeapObject> base, Label* cast_fail) {
Label fixed_array(this);
@@ -5235,6 +5098,22 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
}
}
+TNode<Int32T> CodeStubAssembler::TruncateNumberToWord32(TNode<Number> number) {
+ TVARIABLE(Int32T, var_result);
+ Label done(this), if_heapnumber(this);
+ GotoIfNot(TaggedIsSmi(number), &if_heapnumber);
+ var_result = SmiToInt32(CAST(number));
+ Goto(&done);
+
+ BIND(&if_heapnumber);
+ TNode<Float64T> value = LoadHeapNumberValue(CAST(number));
+ var_result = Signed(TruncateFloat64ToWord32(value));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
TNode<HeapNumber> object) {
TNode<Float64T> value = LoadHeapNumberValue(object);
@@ -5248,6 +5127,38 @@ void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
TryFloat64ToSmi(value, var_result_smi, if_smi);
}
+void CodeStubAssembler::TryFloat32ToSmi(TNode<Float32T> value,
+ TVariable<Smi>* var_result_smi,
+ Label* if_smi) {
+ TNode<Int32T> ivalue = TruncateFloat32ToInt32(value);
+ TNode<Float32T> fvalue = RoundInt32ToFloat32(ivalue);
+
+ Label if_int32(this), if_heap_number(this);
+
+ GotoIfNot(Float32Equal(value, fvalue), &if_heap_number);
+ GotoIfNot(Word32Equal(ivalue, Int32Constant(0)), &if_int32);
+ Branch(Int32LessThan(UncheckedCast<Int32T>(BitcastFloat32ToInt32(value)),
+ Int32Constant(0)),
+ &if_heap_number, &if_int32);
+
+ TVARIABLE(Number, var_result);
+ BIND(&if_int32);
+ {
+ if (SmiValuesAre32Bits()) {
+ *var_result_smi = SmiTag(ChangeInt32ToIntPtr(ivalue));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(ivalue, ivalue);
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, &if_heap_number);
+ *var_result_smi =
+ BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
+ }
+ Goto(if_smi);
+ }
+ BIND(&if_heap_number);
+}
+
void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
TVariable<Smi>* var_result_smi,
Label* if_smi) {
@@ -5280,6 +5191,24 @@ void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
BIND(&if_heap_number);
}
+TNode<Number> CodeStubAssembler::ChangeFloat32ToTagged(TNode<Float32T> value) {
+ Label if_smi(this), done(this);
+ TVARIABLE(Smi, var_smi_result);
+ TVARIABLE(Number, var_result);
+ TryFloat32ToSmi(value, &var_smi_result, &if_smi);
+
+ var_result = AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value));
+ Goto(&done);
+
+ BIND(&if_smi);
+ {
+ var_result = var_smi_result.value();
+ Goto(&done);
+ }
+ BIND(&done);
+ return var_result.value();
+}
+
TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
SloppyTNode<Float64T> value) {
Label if_smi(this), done(this);
@@ -5464,6 +5393,42 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(TNode<Number> value) {
return result.value();
}
+TNode<Int32T> CodeStubAssembler::ChangeTaggedNonSmiToInt32(
+ TNode<Context> context, TNode<HeapObject> input) {
+ return Select<Int32T>(
+ IsHeapNumber(input),
+ [=] {
+ return Signed(TruncateFloat64ToWord32(LoadHeapNumberValue(input)));
+ },
+ [=] {
+ return TruncateNumberToWord32(
+ CAST(CallBuiltin(Builtins::kNonNumberToNumber, context, input)));
+ });
+}
+
+TNode<Float64T> CodeStubAssembler::ChangeTaggedToFloat64(TNode<Context> context,
+ TNode<Object> input) {
+ TVARIABLE(Float64T, var_result);
+ Label end(this), not_smi(this);
+
+ GotoIfNot(TaggedIsSmi(input), &not_smi);
+ var_result = SmiToFloat64(CAST(input));
+ Goto(&end);
+
+ BIND(&not_smi);
+ var_result = Select<Float64T>(
+ IsHeapNumber(CAST(input)),
+ [=] { return LoadHeapNumberValue(CAST(input)); },
+ [=] {
+ return ChangeNumberToFloat64(
+ CAST(CallBuiltin(Builtins::kNonNumberToNumber, context, input)));
+ });
+ Goto(&end);
+
+ BIND(&end);
+ return var_result.value();
+}
+
TNode<WordT> CodeStubAssembler::TimesSystemPointerSize(
SloppyTNode<WordT> value) {
return WordShl(value, kSystemPointerSizeLog2);
@@ -5759,12 +5724,10 @@ TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
return TaggedEqual(cell_value, invalid);
}
-TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid(
- TNode<NativeContext> native_context) {
- TNode<PropertyCell> cell = CAST(LoadContextElement(
- native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX));
- TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid() {
TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = RegExpSpeciesProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return TaggedEqual(cell_value, invalid);
}
@@ -5988,6 +5951,16 @@ TNode<BoolT> CodeStubAssembler::IsJSObject(SloppyTNode<HeapObject> object) {
return IsJSObjectMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsJSFinalizationRegistryMap(TNode<Map> map) {
+ return InstanceTypeEqual(LoadMapInstanceType(map),
+ JS_FINALIZATION_REGISTRY_TYPE);
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSFinalizationRegistry(
+ TNode<HeapObject> object) {
+ return IsJSFinalizationRegistryMap(LoadMap(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSPromiseMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
return InstanceTypeEqual(LoadMapInstanceType(map), JS_PROMISE_TYPE);
@@ -6029,6 +6002,10 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
+TNode<BoolT> CodeStubAssembler::IsJSAggregateError(TNode<HeapObject> object) {
+ return HasInstanceType(object, JS_AGGREGATE_ERROR_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
@@ -6746,12 +6723,13 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
{
STATIC_ASSERT(SeqOneByteString::kHeaderSize ==
SeqTwoByteString::kHeaderSize);
- TNode<IntPtrT> result = BitcastTaggedToWord(var_string_.value());
+ TNode<RawPtrT> result =
+ ReinterpretCast<RawPtrT>(BitcastTaggedToWord(var_string_.value()));
if (ptr_kind == PTR_TO_DATA) {
- result = IntPtrAdd(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
+ result = RawPtrAdd(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
kHeapObjectTag));
}
- var_result = ReinterpretCast<RawPtrT>(result);
+ var_result = result;
Goto(&out);
}
@@ -6761,13 +6739,13 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
if_bailout);
TNode<String> string = var_string_.value();
- TNode<IntPtrT> result =
- LoadObjectField<IntPtrT>(string, ExternalString::kResourceDataOffset);
+ TNode<RawPtrT> result =
+ DecodeExternalPointer(LoadExternalStringResourceData(CAST(string)));
if (ptr_kind == PTR_TO_STRING) {
- result = IntPtrSub(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
+ result = RawPtrSub(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
kHeapObjectTag));
}
- var_result = ReinterpretCast<RawPtrT>(result);
+ var_result = result;
Goto(&out);
}
@@ -6889,8 +6867,8 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
BIND(&runtime);
{
// No cache entry, go to the runtime.
- result =
- CAST(CallRuntime(Runtime::kNumberToString, NoContextConstant(), input));
+ result = CAST(
+ CallRuntime(Runtime::kNumberToStringSlow, NoContextConstant(), input));
Goto(&done);
}
BIND(&done);
@@ -7327,7 +7305,7 @@ TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
}
TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
- uint32_t shift, uint32_t mask) {
+ uint32_t shift, uintptr_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
IntPtrConstant(mask >> shift)));
@@ -7335,25 +7313,39 @@ TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
TNode<Uint32T> value,
- uint32_t shift, uint32_t mask) {
+ uint32_t shift, uint32_t mask,
+ bool starts_as_zero) {
DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask.
CSA_ASSERT(this, Uint32LessThanOrEqual(value, Uint32Constant(mask >> shift)));
TNode<Word32T> encoded_value = Word32Shl(value, Int32Constant(shift));
- TNode<Word32T> inverted_mask = Int32Constant(~mask);
- return Word32Or(Word32And(word, inverted_mask), encoded_value);
+ TNode<Word32T> masked_word;
+ if (starts_as_zero) {
+ CSA_ASSERT(this, Word32Equal(Word32And(word, Int32Constant(~mask)), word));
+ masked_word = word;
+ } else {
+ masked_word = Word32And(word, Int32Constant(~mask));
+ }
+ return Word32Or(masked_word, encoded_value);
}
TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
TNode<UintPtrT> value,
- uint32_t shift, uint32_t mask) {
+ uint32_t shift, uintptr_t mask,
+ bool starts_as_zero) {
DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask.
CSA_ASSERT(this,
UintPtrLessThanOrEqual(value, UintPtrConstant(mask >> shift)));
TNode<WordT> encoded_value = WordShl(value, static_cast<int>(shift));
- TNode<IntPtrT> inverted_mask = IntPtrConstant(~static_cast<intptr_t>(mask));
- return WordOr(WordAnd(word, inverted_mask), encoded_value);
+ TNode<WordT> masked_word;
+ if (starts_as_zero) {
+ CSA_ASSERT(this, WordEqual(WordAnd(word, UintPtrConstant(~mask)), word));
+ masked_word = word;
+ } else {
+ masked_word = WordAnd(word, UintPtrConstant(~mask));
+ }
+ return WordOr(masked_word, encoded_value);
}
void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
@@ -12487,15 +12479,9 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
ThrowIfArrayBufferIsDetached(context, buffer, method_name);
}
-TNode<Uint32T> CodeStubAssembler::LoadJSArrayBufferBitField(
+TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadObjectField<Uint32T>(array_buffer, JSArrayBuffer::kBitFieldOffset);
-}
-
-TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStore(
- TNode<JSArrayBuffer> array_buffer) {
- return LoadObjectField<RawPtrT>(array_buffer,
- JSArrayBuffer::kBackingStoreOffset);
+ return DecodeExternalPointer(LoadJSArrayBufferBackingStore(array_buffer));
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -12672,6 +12658,18 @@ TNode<BoolT> CodeStubAssembler::IsFastElementsKind(
Int32Constant(LAST_FAST_ELEMENTS_KIND));
}
+TNode<BoolT> CodeStubAssembler::IsFastOrNonExtensibleOrSealedElementsKind(
+ TNode<Int32T> elements_kind) {
+ STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
+ STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND + 1 == PACKED_NONEXTENSIBLE_ELEMENTS);
+ STATIC_ASSERT(PACKED_NONEXTENSIBLE_ELEMENTS + 1 ==
+ HOLEY_NONEXTENSIBLE_ELEMENTS);
+ STATIC_ASSERT(HOLEY_NONEXTENSIBLE_ELEMENTS + 1 == PACKED_SEALED_ELEMENTS);
+ STATIC_ASSERT(PACKED_SEALED_ELEMENTS + 1 == HOLEY_SEALED_ELEMENTS);
+ return Uint32LessThanOrEqual(elements_kind,
+ Int32Constant(HOLEY_SEALED_ELEMENTS));
+}
+
TNode<BoolT> CodeStubAssembler::IsDoubleElementsKind(
TNode<Int32T> elements_kind) {
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
@@ -13240,6 +13238,21 @@ TNode<String> CodeStubAssembler::TaggedToDirectString(TNode<Object> value,
return CAST(value);
}
+void CodeStubAssembler::RemoveFinalizationRegistryCellFromUnregisterTokenMap(
+ TNode<JSFinalizationRegistry> finalization_registry,
+ TNode<WeakCell> weak_cell) {
+ const TNode<ExternalReference> remove_cell = ExternalConstant(
+ ExternalReference::
+ js_finalization_registry_remove_cell_from_unregister_token_map());
+ const TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+
+ CallCFunction(remove_cell, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), isolate_ptr),
+ std::make_pair(MachineType::AnyTagged(), finalization_registry),
+ std::make_pair(MachineType::AnyTagged(), weak_cell));
+}
+
PrototypeCheckAssembler::PrototypeCheckAssembler(
compiler::CodeAssemblerState* state, Flags flags,
TNode<NativeContext> native_context, TNode<Map> initial_prototype_map,
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 618481ff47..b01729c73d 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/external-pointer.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler/code-assembler.h"
@@ -20,7 +21,6 @@
#include "src/objects/smi.h"
#include "src/objects/tagged-index.h"
#include "src/roots/roots.h"
-
#include "torque-generated/exported-macros-assembler-tq.h"
namespace v8 {
@@ -34,28 +34,79 @@ class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
-#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
- V(ArrayIteratorProtector, array_iterator_protector, ArrayIteratorProtector) \
- V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
- V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \
- V(NoElementsProtector, no_elements_protector, NoElementsProtector) \
- V(NumberStringCache, number_string_cache, NumberStringCache) \
- V(PromiseResolveProtector, promise_resolve_protector, \
- PromiseResolveProtector) \
- V(PromiseSpeciesProtector, promise_species_protector, \
- PromiseSpeciesProtector) \
- V(PromiseThenProtector, promise_then_protector, PromiseThenProtector) \
- V(SetIteratorProtector, set_iterator_protector, SetIteratorProtector) \
- V(SingleCharacterStringCache, single_character_string_cache, \
- SingleCharacterStringCache) \
- V(StringIteratorProtector, string_iterator_protector, \
- StringIteratorProtector) \
- V(TypedArraySpeciesProtector, typed_array_species_protector, \
+#define HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
+ V(ArrayIteratorProtector, array_iterator_protector, ArrayIteratorProtector) \
+ V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
+ V(AsyncFunctionAwaitRejectSharedFun, async_function_await_reject_shared_fun, \
+ AsyncFunctionAwaitRejectSharedFun) \
+ V(AsyncFunctionAwaitResolveSharedFun, \
+ async_function_await_resolve_shared_fun, \
+ AsyncFunctionAwaitResolveSharedFun) \
+ V(AsyncGeneratorAwaitRejectSharedFun, \
+ async_generator_await_reject_shared_fun, \
+ AsyncGeneratorAwaitRejectSharedFun) \
+ V(AsyncGeneratorAwaitResolveSharedFun, \
+ async_generator_await_resolve_shared_fun, \
+ AsyncGeneratorAwaitResolveSharedFun) \
+ V(AsyncGeneratorReturnClosedRejectSharedFun, \
+ async_generator_return_closed_reject_shared_fun, \
+ AsyncGeneratorReturnClosedRejectSharedFun) \
+ V(AsyncGeneratorReturnClosedResolveSharedFun, \
+ async_generator_return_closed_resolve_shared_fun, \
+ AsyncGeneratorReturnClosedResolveSharedFun) \
+ V(AsyncGeneratorReturnResolveSharedFun, \
+ async_generator_return_resolve_shared_fun, \
+ AsyncGeneratorReturnResolveSharedFun) \
+ V(AsyncGeneratorYieldResolveSharedFun, \
+ async_generator_yield_resolve_shared_fun, \
+ AsyncGeneratorYieldResolveSharedFun) \
+ V(AsyncIteratorValueUnwrapSharedFun, async_iterator_value_unwrap_shared_fun, \
+ AsyncIteratorValueUnwrapSharedFun) \
+ V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \
+ V(NoElementsProtector, no_elements_protector, NoElementsProtector) \
+ V(NumberStringCache, number_string_cache, NumberStringCache) \
+ V(PromiseAllResolveElementSharedFun, promise_all_resolve_element_shared_fun, \
+ PromiseAllResolveElementSharedFun) \
+ V(PromiseAllSettledRejectElementSharedFun, \
+ promise_all_settled_reject_element_shared_fun, \
+ PromiseAllSettledRejectElementSharedFun) \
+ V(PromiseAllSettledResolveElementSharedFun, \
+ promise_all_settled_resolve_element_shared_fun, \
+ PromiseAllSettledResolveElementSharedFun) \
+ V(PromiseAnyRejectElementSharedFun, promise_any_reject_element_shared_fun, \
+ PromiseAnyRejectElementSharedFun) \
+ V(PromiseCapabilityDefaultRejectSharedFun, \
+ promise_capability_default_reject_shared_fun, \
+ PromiseCapabilityDefaultRejectSharedFun) \
+ V(PromiseCapabilityDefaultResolveSharedFun, \
+ promise_capability_default_resolve_shared_fun, \
+ PromiseCapabilityDefaultResolveSharedFun) \
+ V(PromiseCatchFinallySharedFun, promise_catch_finally_shared_fun, \
+ PromiseCatchFinallySharedFun) \
+ V(PromiseGetCapabilitiesExecutorSharedFun, \
+ promise_get_capabilities_executor_shared_fun, \
+ PromiseGetCapabilitiesExecutorSharedFun) \
+ V(PromiseResolveProtector, promise_resolve_protector, \
+ PromiseResolveProtector) \
+ V(PromiseSpeciesProtector, promise_species_protector, \
+ PromiseSpeciesProtector) \
+ V(PromiseThenFinallySharedFun, promise_then_finally_shared_fun, \
+ PromiseThenFinallySharedFun) \
+ V(PromiseThenProtector, promise_then_protector, PromiseThenProtector) \
+ V(PromiseThrowerFinallySharedFun, promise_thrower_finally_shared_fun, \
+ PromiseThrowerFinallySharedFun) \
+ V(PromiseValueThunkFinallySharedFun, promise_value_thunk_finally_shared_fun, \
+ PromiseValueThunkFinallySharedFun) \
+ V(ProxyRevokeSharedFun, proxy_revoke_shared_fun, ProxyRevokeSharedFun) \
+ V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector) \
+ V(SetIteratorProtector, set_iterator_protector, SetIteratorProtector) \
+ V(SingleCharacterStringCache, single_character_string_cache, \
+ SingleCharacterStringCache) \
+ V(StringIteratorProtector, string_iterator_protector, \
+ StringIteratorProtector) \
+ V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector)
-#define TORQUE_INTERNAL_CLASS_LIST_CSA_ADAPTER(V, NAME, Name, name) \
- V(Name##Map, name##_map, Name##Map)
-
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
@@ -106,6 +157,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
V(match_symbol, match_symbol, MatchSymbol) \
V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
+ V(message_string, message_string, MessageString) \
V(MetaMap, meta_map, MetaMap) \
V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \
V(MinusZeroValue, minus_zero_value, MinusZero) \
@@ -178,8 +230,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) \
V(zero_string, zero_string, ZeroString) \
- TORQUE_INTERNAL_CLASS_LIST_GENERATOR(TORQUE_INTERNAL_CLASS_LIST_CSA_ADAPTER, \
- V)
+ TORQUE_INTERNAL_MAP_CSA_LIST(V)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
@@ -430,6 +481,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<HeapObject>(value);
}
+ TNode<JSAggregateError> HeapObjectToJSAggregateError(
+ TNode<HeapObject> heap_object, Label* fail);
+
TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
Label* fail) {
GotoIfNot(IsJSArray(heap_object), fail);
@@ -576,7 +630,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32BinaryNot(TaggedEqual(a, b));
}
- TNode<Object> NoContextConstant();
+ TNode<Smi> NoContextConstant();
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
TNode<std::remove_pointer<std::remove_reference<decltype( \
@@ -1065,6 +1119,31 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
+ // Convert external pointer from on-V8-heap representation to an actual
+ // external pointer value.
+ TNode<RawPtrT> DecodeExternalPointer(
+ TNode<ExternalPointerT> encoded_pointer) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ TNode<RawPtrT> value = ReinterpretCast<RawPtrT>(encoded_pointer);
+ if (V8_HEAP_SANDBOX_BOOL) {
+ value = UncheckedCast<RawPtrT>(
+ WordXor(value, UintPtrConstant(kExternalPointerSalt)));
+ }
+ return value;
+ }
+
+ // Convert external pointer value to on-V8-heap representation.
+ // This should eventually become a call to a non-allocating runtime function.
+ TNode<ExternalPointerT> EncodeExternalPointer(TNode<RawPtrT> pointer) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ TNode<RawPtrT> encoded_pointer = pointer;
+ if (V8_HEAP_SANDBOX_BOOL) {
+ encoded_pointer = UncheckedCast<RawPtrT>(
+ WordXor(encoded_pointer, UintPtrConstant(kExternalPointerSalt)));
+ }
+ return ReinterpretCast<ExternalPointerT>(encoded_pointer);
+ }
+
// Load value from current parent frame by given offset in bytes.
TNode<Object> LoadFromParentFrame(int offset);
@@ -1781,6 +1860,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> end_address,
TNode<Object> value);
+ // Marks the FixedArray copy-on-write without moving it.
+ void MakeFixedArrayCOW(TNode<FixedArray> array);
+
TNode<Cell> AllocateCellWithValue(
TNode<Object> value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
TNode<Cell> AllocateSmiCell(int value = 0) {
@@ -2047,35 +2129,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class DestroySource { kNo, kYes };
- // Collect the callable |maybe_target| feedback for either a CALL_IC or
- // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|. There are
- // two modes for feedback collection:
- //
- // kCollectFeedbackCell - collect JSFunctions, but devolve to the
- // FeedbackCell as long as all JSFunctions
- // seen share the same one.
- // kDontCollectFeedbackCell - collect JSFunctions without devolving
- // to the FeedbackCell in case a
- // different JSFunction appears. Go directly
- // to the Megamorphic sentinel value in this
- // case.
- enum class CallableFeedbackMode {
- kCollectFeedbackCell,
- kDontCollectFeedbackCell
- };
- void CollectCallableFeedback(TNode<Object> maybe_target,
- TNode<Context> context,
- TNode<FeedbackVector> feedback_vector,
- TNode<UintPtrT> slot_id,
- CallableFeedbackMode mode);
-
- // Collect CALL_IC feedback for |maybe_target| function in the
- // |feedback_vector| at |slot_id|, and the call counts in
- // the |feedback_vector| at |slot_id+1|.
- void CollectCallFeedback(TNode<Object> maybe_target, TNode<Context> context,
- TNode<HeapObject> maybe_feedback_vector,
- TNode<UintPtrT> slot_id);
-
// Increment the call count for a CALL_IC or construct call.
// The call count is located at feedback_vector[slot_id + 1].
void IncrementCallCount(TNode<FeedbackVector> feedback_vector,
@@ -2415,14 +2468,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<BigInt>* var_maybe_bigint,
TVariable<Smi>* var_feedback);
+ TNode<Int32T> TruncateNumberToWord32(TNode<Number> value);
// Truncate the floating point value of a HeapNumber to an Int32.
TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
// Conversions.
void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>* output,
Label* if_smi);
+ void TryFloat32ToSmi(TNode<Float32T> number, TVariable<Smi>* output,
+ Label* if_smi);
void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>* output,
Label* if_smi);
+ TNode<Number> ChangeFloat32ToTagged(TNode<Float32T> value);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
@@ -2430,6 +2487,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(TNode<Number> value);
+ TNode<Int32T> ChangeTaggedNonSmiToInt32(TNode<Context> context,
+ TNode<HeapObject> input);
+ TNode<Float64T> ChangeTaggedToFloat64(TNode<Context> context,
+ TNode<Object> input);
+
void TaggedToNumeric(TNode<Context> context, TNode<Object> value,
TVariable<Numeric>* var_numeric);
void TaggedToNumericWithFeedback(TNode<Context> context, TNode<Object> value,
@@ -2546,6 +2608,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSAggregateError(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2565,6 +2628,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSFinalizationRegistryMap(TNode<Map> map);
+ TNode<BoolT> IsJSFinalizationRegistry(TNode<HeapObject> object);
TNode<BoolT> IsJSPromiseMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
@@ -2643,8 +2708,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
- TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid(
- TNode<NativeContext> native_context);
+ TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
TNode<BoolT> IsMockArrayBufferAllocatorFlag() {
@@ -2698,6 +2762,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool IsFastElementsKind(ElementsKind kind) {
return v8::internal::IsFastElementsKind(kind);
}
+ TNode<BoolT> IsFastOrNonExtensibleOrSealedElementsKind(
+ TNode<Int32T> elements_kind);
+
TNode<BoolT> IsDictionaryElementsKind(TNode<Int32T> elements_kind) {
return ElementsKindEqual(elements_kind, Int32Constant(DICTIONARY_ELEMENTS));
}
@@ -2812,43 +2879,52 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Decodes an unsigned (!) value from |word| to a word-size node.
TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift,
- uint32_t mask);
+ uintptr_t mask);
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
- TNode<Word32T> UpdateWord32(TNode<Word32T> word, TNode<Uint32T> value) {
- return UpdateWord32(word, value, BitField::kShift, BitField::kMask);
+ TNode<Word32T> UpdateWord32(TNode<Word32T> word, TNode<Uint32T> value,
+ bool starts_as_zero = false) {
+ return UpdateWord32(word, value, BitField::kShift, BitField::kMask,
+ starts_as_zero);
}
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
- TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value) {
- return UpdateWord(word, value, BitField::kShift, BitField::kMask);
+ TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value,
+ bool starts_as_zero = false) {
+ return UpdateWord(word, value, BitField::kShift, BitField::kMask,
+ starts_as_zero);
}
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
- TNode<Word32T> UpdateWordInWord32(TNode<Word32T> word,
- TNode<UintPtrT> value) {
- return UncheckedCast<Uint32T>(TruncateIntPtrToInt32(
- Signed(UpdateWord<BitField>(ChangeUint32ToWord(word), value))));
+ TNode<Word32T> UpdateWordInWord32(TNode<Word32T> word, TNode<UintPtrT> value,
+ bool starts_as_zero = false) {
+ return UncheckedCast<Uint32T>(
+ TruncateIntPtrToInt32(Signed(UpdateWord<BitField>(
+ ChangeUint32ToWord(word), value, starts_as_zero))));
}
// Returns a node that contains the updated values of a |BitField|.
template <typename BitField>
- TNode<WordT> UpdateWord32InWord(TNode<WordT> word, TNode<Uint32T> value) {
- return UpdateWord<BitField>(word, ChangeUint32ToWord(value));
+ TNode<WordT> UpdateWord32InWord(TNode<WordT> word, TNode<Uint32T> value,
+ bool starts_as_zero = false) {
+ return UpdateWord<BitField>(word, ChangeUint32ToWord(value),
+ starts_as_zero);
}
// Returns a node that contains the updated {value} inside {word} starting
// at {shift} and fitting in {mask}.
TNode<Word32T> UpdateWord32(TNode<Word32T> word, TNode<Uint32T> value,
- uint32_t shift, uint32_t mask);
+ uint32_t shift, uint32_t mask,
+ bool starts_as_zero = false);
// Returns a node that contains the updated {value} inside {word} starting
// at {shift} and fitting in {mask}.
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value,
- uint32_t shift, uint32_t mask);
+ uint32_t shift, uintptr_t mask,
+ bool starts_as_zero = false);
// Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T>
@@ -3593,8 +3669,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsDebugActive();
// JSArrayBuffer helpers
- TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
- TNode<RawPtrT> LoadJSArrayBufferBackingStore(
+ TNode<RawPtrT> LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
@@ -3671,10 +3746,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<HeapObject> MakeTypeError(MessageTemplate message,
TNode<Context> context, TArgs... args) {
STATIC_ASSERT(sizeof...(TArgs) <= 3);
- const TNode<Object> make_type_error = LoadContextElement(
- LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
- return CAST(Call(context, make_type_error, UndefinedConstant(),
- SmiConstant(message), args...));
+ return CAST(CallRuntime(Runtime::kNewTypeError, context,
+ SmiConstant(message), args...));
}
void Abort(AbortReason reason) {
@@ -3844,6 +3917,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> RefillMathRandom(TNode<NativeContext> native_context);
+ void RemoveFinalizationRegistryCellFromUnregisterTokenMap(
+ TNode<JSFinalizationRegistry> finalization_registry,
+ TNode<WeakCell> weak_cell);
+
private:
friend class CodeStubArguments;
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 595e59f551..c436c57407 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -30,6 +30,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/maybe-handles.h"
@@ -42,10 +43,11 @@
#include "src/objects/map.h"
#include "src/objects/object-list-macros.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/rewriter.h"
+#include "src/parsing/pending-compilation-error-handler.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/snapshot/code-serializer.h"
#include "src/utils/ostreams.h"
@@ -179,13 +181,15 @@ CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
return UpdateState(FinalizeJobImpl(shared_info, isolate), State::kSucceeded);
}
-void UnoptimizedCompilationJob::RecordCompilationStats(Isolate* isolate) const {
+namespace {
+
+void RecordUnoptimizedCompilationStats(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info) {
int code_size;
- if (compilation_info()->has_bytecode_array()) {
- code_size = compilation_info()->bytecode_array()->SizeIncludingMetadata();
+ if (shared_info->HasBytecodeArray()) {
+ code_size = shared_info->GetBytecodeArray().SizeIncludingMetadata();
} else {
- DCHECK(compilation_info()->has_asm_wasm_data());
- code_size = compilation_info()->asm_wasm_data()->Size();
+ code_size = shared_info->asm_wasm_data().Size();
}
Counters* counters = isolate->counters();
@@ -197,27 +201,30 @@ void UnoptimizedCompilationJob::RecordCompilationStats(Isolate* isolate) const {
// Also add total time (there's now already timer_ on the base class).
}
-void UnoptimizedCompilationJob::RecordFunctionCompilation(
- CodeEventListener::LogEventsAndTags tag, Handle<SharedFunctionInfo> shared,
- Isolate* isolate) const {
+void RecordUnoptimizedFunctionCompilation(
+ Isolate* isolate, CodeEventListener::LogEventsAndTags tag,
+ Handle<SharedFunctionInfo> shared, base::TimeDelta time_taken_to_execute,
+ base::TimeDelta time_taken_to_finalize) {
Handle<AbstractCode> abstract_code;
- if (compilation_info()->has_bytecode_array()) {
+ if (shared->HasBytecodeArray()) {
abstract_code =
- Handle<AbstractCode>::cast(compilation_info()->bytecode_array());
+ handle(AbstractCode::cast(shared->GetBytecodeArray()), isolate);
} else {
- DCHECK(compilation_info()->has_asm_wasm_data());
+ DCHECK(shared->HasAsmWasmData());
abstract_code =
Handle<AbstractCode>::cast(BUILTIN_CODE(isolate, InstantiateAsmJs));
}
- double time_taken_ms = time_taken_to_execute_.InMillisecondsF() +
- time_taken_to_finalize_.InMillisecondsF();
+ double time_taken_ms = time_taken_to_execute.InMillisecondsF() +
+ time_taken_to_finalize.InMillisecondsF();
Handle<Script> script(Script::cast(shared->script()), isolate);
LogFunctionCompilation(tag, shared, script, abstract_code, false,
time_taken_ms, isolate);
}
+} // namespace
+
// ----------------------------------------------------------------------------
// Implementation of OptimizedCompilationJob
@@ -383,13 +390,15 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
return literal->scope()->IsAsmModule();
}
-void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared_info,
- ParseInfo* parse_info, Isolate* isolate) {
- if (!FLAG_interpreted_frames_native_stack) {
- shared_info->set_bytecode_array(*bytecode_array);
+void InstallInterpreterTrampolineCopy(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info) {
+ DCHECK(FLAG_interpreted_frames_native_stack);
+ if (!shared_info->function_data().IsBytecodeArray()) {
+ DCHECK(!shared_info->HasBytecodeArray());
return;
}
+ Handle<BytecodeArray> bytecode_array(shared_info->GetBytecodeArray(),
+ isolate);
Handle<Code> code = isolate->factory()->CopyCode(Handle<Code>::cast(
isolate->factory()->interpreter_entry_trampoline_for_profiling()));
@@ -419,9 +428,23 @@ void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
script_name, line_num, column_num));
}
+void InstallCoverageInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ Handle<CoverageInfo> coverage_info) {
+ DCHECK(isolate->is_block_code_coverage());
+ isolate->debug()->InstallCoverageInfo(shared, coverage_info);
+}
+
+void InstallCoverageInfo(OffThreadIsolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ Handle<CoverageInfo> coverage_info) {
+ // We should only have coverage info when finalizing on the main thread.
+ UNREACHABLE();
+}
+
+template <typename LocalIsolate>
void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<SharedFunctionInfo> shared_info,
- ParseInfo* parse_info, Isolate* isolate) {
+ LocalIsolate* isolate) {
DCHECK_EQ(shared_info->language_mode(),
compilation_info->literal()->language_mode());
@@ -440,63 +463,52 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
shared_info->set_is_asm_wasm_broken(true);
}
- InstallBytecodeArray(compilation_info->bytecode_array(), shared_info,
- parse_info, isolate);
+ shared_info->set_bytecode_array(*compilation_info->bytecode_array());
Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
isolate, compilation_info->feedback_vector_spec());
shared_info->set_feedback_metadata(*feedback_metadata);
} else {
DCHECK(compilation_info->has_asm_wasm_data());
+ // We should only have asm/wasm data when finalizing on the main thread.
+ DCHECK((std::is_same<LocalIsolate, Isolate>::value));
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
shared_info->set_feedback_metadata(
ReadOnlyRoots(isolate).empty_feedback_metadata());
}
- // Install coverage info on the shared function info.
if (compilation_info->has_coverage_info() &&
!shared_info->HasCoverageInfo()) {
- DCHECK(isolate->is_block_code_coverage());
- isolate->debug()->InstallCoverageInfo(shared_info,
- compilation_info->coverage_info());
+ InstallCoverageInfo(isolate, shared_info,
+ compilation_info->coverage_info());
}
}
-void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
- Handle<SharedFunctionInfo> shared_info,
- ParseInfo* parse_info, OffThreadIsolate* isolate) {
- DCHECK_EQ(shared_info->language_mode(),
- compilation_info->literal()->language_mode());
-
- // Update the shared function info with the scope info.
- Handle<ScopeInfo> scope_info = compilation_info->scope()->scope_info();
- shared_info->set_scope_info(*scope_info);
-
- DCHECK(compilation_info->has_bytecode_array());
- DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once.
- DCHECK(!compilation_info->has_asm_wasm_data());
- DCHECK(!shared_info->HasFeedbackMetadata());
-
- // If the function failed asm-wasm compilation, mark asm_wasm as broken
- // to ensure we don't try to compile as asm-wasm.
- if (compilation_info->literal()->scope()->IsAsmModule()) {
- shared_info->set_is_asm_wasm_broken(true);
+void LogUnoptimizedCompilation(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info,
+ UnoptimizedCompileFlags flags,
+ base::TimeDelta time_taken_to_execute,
+ base::TimeDelta time_taken_to_finalize) {
+ CodeEventListener::LogEventsAndTags log_tag;
+ if (flags.is_toplevel()) {
+ log_tag = flags.is_eval() ? CodeEventListener::EVAL_TAG
+ : CodeEventListener::SCRIPT_TAG;
+ } else {
+ log_tag = flags.is_lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
+ : CodeEventListener::FUNCTION_TAG;
}
- shared_info->set_bytecode_array(*compilation_info->bytecode_array());
-
- Handle<FeedbackMetadata> feedback_metadata =
- FeedbackMetadata::New(isolate, compilation_info->feedback_vector_spec());
- shared_info->set_feedback_metadata(*feedback_metadata);
-
- DCHECK(!compilation_info->has_coverage_info());
+ RecordUnoptimizedFunctionCompilation(isolate, log_tag, shared_info,
+ time_taken_to_execute,
+ time_taken_to_finalize);
+ RecordUnoptimizedCompilationStats(isolate, shared_info);
}
template <typename LocalIsolate>
void EnsureSharedFunctionInfosArrayOnScript(Handle<Script> script,
ParseInfo* parse_info,
LocalIsolate* isolate) {
- DCHECK(parse_info->is_toplevel());
+ DCHECK(parse_info->flags().is_toplevel());
if (script->shared_function_infos().length() > 0) {
DCHECK_EQ(script->shared_function_infos().length(),
parse_info->max_function_literal_id() + 1);
@@ -524,63 +536,32 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
literal->has_static_private_methods_or_accessors());
}
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(
- UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
- UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
- ParseInfo* parse_info = job->parse_info();
-
- SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), *shared_info);
-
- CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
- if (status == CompilationJob::SUCCEEDED) {
- InstallUnoptimizedCode(compilation_info, shared_info, parse_info, isolate);
-
- // It's possible that source position collection was enabled after the
- // background compile was started in which the compiled bytecode will not be
- // missing source positions (for instance by enabling the cpu profiler). So
- // force source position collection now in that case.
- if (!parse_info->collect_source_positions() &&
- isolate->NeedsDetailedOptimizedCodeLineInfo()) {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
- }
-
- CodeEventListener::LogEventsAndTags log_tag;
- if (parse_info->is_toplevel()) {
- log_tag = compilation_info->is_eval() ? CodeEventListener::EVAL_TAG
- : CodeEventListener::SCRIPT_TAG;
- } else {
- log_tag = parse_info->lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
- : CodeEventListener::FUNCTION_TAG;
- }
- job->RecordFunctionCompilation(log_tag, shared_info, isolate);
- job->RecordCompilationStats(isolate);
- }
- return status;
-}
-
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(
+template <typename LocalIsolate>
+CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
- OffThreadIsolate* isolate) {
+ LocalIsolate* isolate,
+ FinalizeUnoptimizedCompilationDataList*
+ finalize_unoptimized_compilation_data_list) {
UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
- ParseInfo* parse_info = job->parse_info();
SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), *shared_info);
CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
if (status == CompilationJob::SUCCEEDED) {
- InstallUnoptimizedCode(compilation_info, shared_info, parse_info, isolate);
-
- // TODO(leszeks): Record the function compilation and compilation stats.
+ InstallUnoptimizedCode(compilation_info, shared_info, isolate);
+ finalize_unoptimized_compilation_data_list->emplace_back(
+ isolate, shared_info, job->time_taken_to_execute(),
+ job->time_taken_to_finalize());
}
return status;
}
-std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
+std::unique_ptr<UnoptimizedCompilationJob>
+ExecuteSingleUnoptimizedCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* inner_function_jobs) {
- if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
+ std::vector<FunctionLiteral*>* eager_inner_literals) {
+ if (UseAsmWasm(literal, parse_info->flags().is_asm_wasm_broken())) {
std::unique_ptr<UnoptimizedCompilationJob> asm_job(
AsmJs::NewCompilationJob(parse_info, literal, allocator));
if (asm_job->ExecuteJob() == CompilationJob::SUCCEEDED) {
@@ -592,21 +573,35 @@ std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
// with a validation error or another error that could be solve by falling
// through to standard unoptimized compile.
}
- std::vector<FunctionLiteral*> eager_inner_literals;
std::unique_ptr<UnoptimizedCompilationJob> job(
interpreter::Interpreter::NewCompilationJob(
- parse_info, literal, allocator, &eager_inner_literals));
+ parse_info, literal, allocator, eager_inner_literals));
if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
// Compilation failed, return null.
return std::unique_ptr<UnoptimizedCompilationJob>();
}
+ return job;
+}
+
+std::unique_ptr<UnoptimizedCompilationJob>
+RecursivelyExecuteUnoptimizedCompilationJobs(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ UnoptimizedCompilationJobList* inner_function_jobs) {
+ std::vector<FunctionLiteral*> eager_inner_literals;
+ std::unique_ptr<UnoptimizedCompilationJob> job =
+ ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
+ &eager_inner_literals);
+
+ if (!job) return std::unique_ptr<UnoptimizedCompilationJob>();
+
// Recursively compile eager inner literals.
for (FunctionLiteral* inner_literal : eager_inner_literals) {
std::unique_ptr<UnoptimizedCompilationJob> inner_job(
- ExecuteUnoptimizedCompileJobs(parse_info, inner_literal, allocator,
- inner_function_jobs));
+ RecursivelyExecuteUnoptimizedCompilationJobs(
+ parse_info, inner_literal, allocator, inner_function_jobs));
// Compilation failed, return null.
if (!inner_job) return std::unique_ptr<UnoptimizedCompilationJob>();
inner_function_jobs->emplace_front(std::move(inner_job));
@@ -615,39 +610,14 @@ std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
return job;
}
-std::unique_ptr<UnoptimizedCompilationJob> GenerateUnoptimizedCode(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* inner_function_jobs) {
- DisallowHeapAccess no_heap_access;
- DCHECK(inner_function_jobs->empty());
-
- std::unique_ptr<UnoptimizedCompilationJob> job;
- if (Compiler::Analyze(parse_info)) {
- job = ExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
- allocator, inner_function_jobs);
- }
-
- // Character stream shouldn't be used again.
- parse_info->ResetCharacterStream();
-
- return job;
-}
-
-MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
- Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
- AccountingAllocator* allocator, IsCompiledScope* is_compiled_scope) {
- EnsureSharedFunctionInfosArrayOnScript(script, parse_info, isolate);
- parse_info->ast_value_factory()->Internalize(isolate);
-
- if (!Compiler::Analyze(parse_info)) return MaybeHandle<SharedFunctionInfo>();
+bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
+ Isolate* isolate, Handle<SharedFunctionInfo> outer_shared_info,
+ Handle<Script> script, ParseInfo* parse_info,
+ AccountingAllocator* allocator, IsCompiledScope* is_compiled_scope,
+ FinalizeUnoptimizedCompilationDataList*
+ finalize_unoptimized_compilation_data_list) {
DeclarationScope::AllocateScopeInfos(parse_info, isolate);
- // Prepare and execute compilation of the outer-most function.
- // Create the SharedFunctionInfo and add it to the script's list.
- Handle<SharedFunctionInfo> top_level =
- isolate->factory()->NewSharedFunctionInfoForLiteral(parse_info->literal(),
- script, true);
-
std::vector<FunctionLiteral*> functions_to_compile;
functions_to_compile.push_back(parse_info->literal());
@@ -657,57 +627,42 @@ MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(literal, script, isolate);
if (shared_info->is_compiled()) continue;
- if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
- std::unique_ptr<UnoptimizedCompilationJob> asm_job(
- AsmJs::NewCompilationJob(parse_info, literal, allocator));
- if (asm_job->ExecuteJob() == CompilationJob::SUCCEEDED &&
- FinalizeUnoptimizedCompilationJob(asm_job.get(), shared_info,
- isolate) ==
- CompilationJob::SUCCEEDED) {
- continue;
- }
- // asm.js validation failed, fall through to standard unoptimized compile.
- // Note: we rely on the fact that AsmJs jobs have done all validation in
- // the PrepareJob and ExecuteJob phases and can't fail in FinalizeJob with
- // with a validation error or another error that could be solve by falling
- // through to standard unoptimized compile.
- }
-
- std::unique_ptr<UnoptimizedCompilationJob> job(
- interpreter::Interpreter::NewCompilationJob(
- parse_info, literal, allocator, &functions_to_compile));
- if (job->ExecuteJob() == CompilationJob::FAILED ||
- FinalizeUnoptimizedCompilationJob(job.get(), shared_info, isolate) ==
- CompilationJob::FAILED) {
- return MaybeHandle<SharedFunctionInfo>();
- }
+ std::unique_ptr<UnoptimizedCompilationJob> job =
+ ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
+ &functions_to_compile);
+ if (!job) return false;
- if (FLAG_stress_lazy_source_positions) {
- // Collect source positions immediately to try and flush out bytecode
- // mismatches.
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ if (FinalizeSingleUnoptimizedCompilationJob(
+ job.get(), shared_info, isolate,
+ finalize_unoptimized_compilation_data_list) !=
+ CompilationJob::SUCCEEDED) {
+ return false;
}
- if (shared_info.is_identical_to(top_level)) {
+ if (shared_info.is_identical_to(outer_shared_info)) {
// Ensure that the top level function is retained.
*is_compiled_scope = shared_info->is_compiled_scope();
DCHECK(is_compiled_scope->is_compiled());
}
}
- // Character stream shouldn't be used again.
- parse_info->ResetCharacterStream();
+ // Report any warnings generated during compilation.
+ if (parse_info->pending_error_handler()->has_pending_warnings()) {
+ parse_info->pending_error_handler()->PrepareWarnings(isolate);
+ }
- return top_level;
+ return true;
}
template <typename LocalIsolate>
-bool FinalizeUnoptimizedCode(
+bool FinalizeAllUnoptimizedCompilationJobs(
ParseInfo* parse_info, LocalIsolate* isolate,
Handle<SharedFunctionInfo> shared_info,
UnoptimizedCompilationJob* outer_function_job,
- UnoptimizedCompilationJobList* inner_function_jobs) {
+ UnoptimizedCompilationJobList* inner_function_jobs,
+ FinalizeUnoptimizedCompilationDataList*
+ finalize_unoptimized_compilation_data_list) {
// TODO(leszeks): Re-enable.
// DCHECK(AllowCompilation::IsAllowed(isolate));
@@ -718,8 +673,10 @@ bool FinalizeUnoptimizedCode(
DeclarationScope::AllocateScopeInfos(parse_info, isolate);
// Finalize the outer-most function's compilation job.
- if (FinalizeUnoptimizedCompilationJob(outer_function_job, shared_info,
- isolate) != CompilationJob::SUCCEEDED) {
+ if (FinalizeSingleUnoptimizedCompilationJob(
+ outer_function_job, shared_info, isolate,
+ finalize_unoptimized_compilation_data_list) !=
+ CompilationJob::SUCCEEDED) {
return false;
}
@@ -733,8 +690,9 @@ bool FinalizeUnoptimizedCode(
inner_job->compilation_info()->literal(), script, isolate);
// The inner function might be compiled already if compiling for debug.
if (inner_shared_info->is_compiled()) continue;
- if (FinalizeUnoptimizedCompilationJob(inner_job.get(), inner_shared_info,
- isolate) !=
+ if (FinalizeSingleUnoptimizedCompilationJob(
+ inner_job.get(), inner_shared_info, isolate,
+ finalize_unoptimized_compilation_data_list) !=
CompilationJob::SUCCEEDED) {
return false;
}
@@ -742,7 +700,7 @@ bool FinalizeUnoptimizedCode(
// Report any warnings generated during compilation.
if (parse_info->pending_error_handler()->has_pending_warnings()) {
- parse_info->pending_error_handler()->ReportWarnings(isolate, script);
+ parse_info->pending_error_handler()->PrepareWarnings(isolate);
}
return true;
@@ -994,15 +952,26 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
-bool FailWithPendingException(Isolate* isolate, Handle<Script> script,
- ParseInfo* parse_info,
- Compiler::ClearExceptionFlag flag) {
- if (flag == Compiler::CLEAR_EXCEPTION) {
- isolate->clear_pending_exception();
- } else if (!isolate->has_pending_exception()) {
- if (parse_info->pending_error_handler()->has_pending_error()) {
- parse_info->pending_error_handler()->ReportErrors(
- isolate, script, parse_info->ast_value_factory());
+bool FailAndClearPendingException(Isolate* isolate) {
+ isolate->clear_pending_exception();
+ return false;
+}
+
+template <typename LocalIsolate>
+bool PreparePendingException(LocalIsolate* isolate, ParseInfo* parse_info) {
+ if (parse_info->pending_error_handler()->has_pending_error()) {
+ parse_info->pending_error_handler()->PrepareErrors(
+ isolate, parse_info->ast_value_factory());
+ }
+ return false;
+}
+
+bool FailWithPreparedPendingException(
+ Isolate* isolate, Handle<Script> script,
+ const PendingCompilationErrorHandler* pending_error_handler) {
+ if (!isolate->has_pending_exception()) {
+ if (pending_error_handler->has_pending_error()) {
+ pending_error_handler->ReportErrors(isolate, script);
} else {
isolate->StackOverflow();
}
@@ -1010,25 +979,74 @@ bool FailWithPendingException(Isolate* isolate, Handle<Script> script,
return false;
}
-bool FailWithPendingException(OffThreadIsolate* isolate, Handle<Script> script,
+bool FailWithPendingException(Isolate* isolate, Handle<Script> script,
ParseInfo* parse_info,
Compiler::ClearExceptionFlag flag) {
- // TODO(leszeks): Implement.
- UNREACHABLE();
+ if (flag == Compiler::CLEAR_EXCEPTION) {
+ return FailAndClearPendingException(isolate);
+ }
+
+ PreparePendingException(isolate, parse_info);
+ return FailWithPreparedPendingException(isolate, script,
+ parse_info->pending_error_handler());
+}
+
+void FinalizeUnoptimizedCompilation(
+ Isolate* isolate, Handle<Script> script,
+ const UnoptimizedCompileFlags& flags,
+ const UnoptimizedCompileState* compile_state,
+ const FinalizeUnoptimizedCompilationDataList&
+ finalize_unoptimized_compilation_data_list) {
+ if (compile_state->pending_error_handler()->has_pending_warnings()) {
+ compile_state->pending_error_handler()->ReportWarnings(isolate, script);
+ }
+
+ bool need_source_positions = FLAG_stress_lazy_source_positions ||
+ (!flags.collect_source_positions() &&
+ isolate->NeedsSourcePositionsForProfiling());
+
+ for (const auto& finalize_data : finalize_unoptimized_compilation_data_list) {
+ Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
+ // It's unlikely, but possible, that the bytecode was flushed between being
+ // allocated and now, so guard against that case, and against it being
+ // flushed in the middle of this loop.
+ IsCompiledScope is_compiled_scope(*shared_info, isolate);
+ if (!is_compiled_scope.is_compiled()) continue;
+
+ if (need_source_positions) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ }
+ if (FLAG_interpreted_frames_native_stack) {
+ InstallInterpreterTrampolineCopy(isolate, shared_info);
+ }
+
+ LogUnoptimizedCompilation(isolate, shared_info, flags,
+ finalize_data.time_taken_to_execute(),
+ finalize_data.time_taken_to_finalize());
+ }
}
-void FinalizeScriptCompilation(Isolate* isolate, Handle<Script> script,
- ParseInfo* parse_info) {
+void FinalizeUnoptimizedScriptCompilation(
+ Isolate* isolate, Handle<Script> script,
+ const UnoptimizedCompileFlags& flags,
+ const UnoptimizedCompileState* compile_state,
+ const FinalizeUnoptimizedCompilationDataList&
+ finalize_unoptimized_compilation_data_list) {
+ FinalizeUnoptimizedCompilation(isolate, script, flags, compile_state,
+ finalize_unoptimized_compilation_data_list);
+
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- // Register any pending parallel tasks with the associated SFI.
- if (parse_info->parallel_tasks()) {
- CompilerDispatcher* dispatcher = parse_info->parallel_tasks()->dispatcher();
- for (auto& it : *parse_info->parallel_tasks()) {
+ UnoptimizedCompileState::ParallelTasks* parallel_tasks =
+ compile_state->parallel_tasks();
+ if (parallel_tasks) {
+ CompilerDispatcher* dispatcher = parallel_tasks->dispatcher();
+ for (auto& it : *parallel_tasks) {
FunctionLiteral* literal = it.first;
CompilerDispatcher::JobId job_id = it.second;
MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
- script->FindSharedFunctionInfo(isolate, literal);
+ script->FindSharedFunctionInfo(isolate,
+ literal->function_literal_id());
Handle<SharedFunctionInfo> shared_for_task;
if (maybe_shared_for_task.ToHandle(&shared_for_task)) {
dispatcher->RegisterSharedFunctionInfo(job_id, *shared_for_task);
@@ -1037,42 +1055,22 @@ void FinalizeScriptCompilation(Isolate* isolate, Handle<Script> script,
}
}
}
-}
-void FinalizeScriptCompilation(OffThreadIsolate* isolate, Handle<Script> script,
- ParseInfo* parse_info) {
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- DCHECK(!parse_info->parallel_tasks());
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(isolate, script);
+ }
}
+// Create shared function info for top level and shared function infos array for
+// inner functions.
template <typename LocalIsolate>
-MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
- ParseInfo* parse_info, Handle<Script> script, LocalIsolate* isolate,
- UnoptimizedCompilationJob* outer_function_job,
- UnoptimizedCompilationJobList* inner_function_jobs) {
- // Internalize ast values onto the heap.
- parse_info->ast_value_factory()->Internalize(isolate);
-
- // Create shared function infos for top level and shared function infos array
- // for inner functions.
+Handle<SharedFunctionInfo> CreateTopLevelSharedFunctionInfo(
+ ParseInfo* parse_info, Handle<Script> script, LocalIsolate* isolate) {
EnsureSharedFunctionInfosArrayOnScript(script, parse_info, isolate);
DCHECK_EQ(kNoSourcePosition,
parse_info->literal()->function_token_position());
- Handle<SharedFunctionInfo> shared_info =
- isolate->factory()->NewSharedFunctionInfoForLiteral(parse_info->literal(),
- script, true);
-
- // Finalize compilation of the unoptimized bytecode or asm-js data.
- if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
- outer_function_job, inner_function_jobs)) {
- FailWithPendingException(isolate, script, parse_info,
- Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
- return MaybeHandle<SharedFunctionInfo>();
- }
-
- FinalizeScriptCompilation(isolate, script, parse_info);
-
- return shared_info;
+ return isolate->factory()->NewSharedFunctionInfoForLiteral(
+ parse_info->literal(), script, true);
}
MaybeHandle<SharedFunctionInfo> CompileToplevel(
@@ -1086,8 +1084,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
RuntimeCallTimerScope runtimeTimer(
- isolate, parse_info->is_eval() ? RuntimeCallCounterId::kCompileEval
- : RuntimeCallCounterId::kCompileScript);
+ isolate, parse_info->flags().is_eval()
+ ? RuntimeCallCounterId::kCompileEval
+ : RuntimeCallCounterId::kCompileScript);
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
@@ -1097,24 +1096,36 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
- HistogramTimer* rate = parse_info->is_eval()
+ HistogramTimer* rate = parse_info->flags().is_eval()
? isolate->counters()->compile_eval()
: isolate->counters()->compile();
HistogramTimerScope timer(rate);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
+ parse_info->flags().is_eval() ? "V8.CompileEval" : "V8.Compile");
- // Generate the unoptimized bytecode or asm-js data.
- MaybeHandle<SharedFunctionInfo> shared_info =
- GenerateUnoptimizedCodeForToplevel(
- isolate, script, parse_info, isolate->allocator(), is_compiled_scope);
- if (shared_info.is_null()) {
+ // Prepare and execute compilation of the outer-most function.
+
+ // Create the SharedFunctionInfo and add it to the script's list.
+ Handle<SharedFunctionInfo> shared_info =
+ CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
+
+ FinalizeUnoptimizedCompilationDataList
+ finalize_unoptimized_compilation_data_list;
+
+ if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
+ isolate, shared_info, script, parse_info, isolate->allocator(),
+ is_compiled_scope, &finalize_unoptimized_compilation_data_list)) {
FailWithPendingException(isolate, script, parse_info,
Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
- FinalizeScriptCompilation(isolate, script, parse_info);
+ // Character stream shouldn't be used again.
+ parse_info->ResetCharacterStream();
+
+ FinalizeUnoptimizedScriptCompilation(
+ isolate, script, parse_info->flags(), parse_info->state(),
+ finalize_unoptimized_compilation_data_list);
return shared_info;
}
@@ -1126,15 +1137,25 @@ std::unique_ptr<UnoptimizedCompilationJob> CompileOnBackgroundThread(
"V8.CompileCodeBackground");
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
- parse_info->is_toplevel()
- ? parse_info->is_eval()
+ parse_info->flags().is_toplevel()
+ ? parse_info->flags().is_eval()
? RuntimeCallCounterId::kCompileBackgroundEval
: RuntimeCallCounterId::kCompileBackgroundScript
: RuntimeCallCounterId::kCompileBackgroundFunction);
// Generate the unoptimized bytecode or asm-js data.
- std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
- GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
+ DCHECK(inner_function_jobs->empty());
+
+ // TODO(leszeks): Once we can handle asm-js without bailing out of
+ // off-thread finalization entirely, and the finalization is off-thread by
+ // default, this can be changed to the iterative version.
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job =
+ RecursivelyExecuteUnoptimizedCompilationJobs(
+ parse_info, parse_info->literal(), allocator, inner_function_jobs);
+
+ // Character stream shouldn't be used again.
+ parse_info->ResetCharacterStream();
+
return outer_function_job;
}
@@ -1149,55 +1170,67 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate)
- : info_(new ParseInfo(isolate)),
- off_thread_isolate_(FLAG_finalize_streaming_on_background
- ? new OffThreadIsolate(isolate, info_->zone())
- : nullptr),
+ : flags_(UnoptimizedCompileFlags::ForToplevelCompile(
+ isolate, true, construct_language_mode(FLAG_use_strict),
+ REPLMode::kNo)),
+ compile_state_(isolate),
+ info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
+ start_position_(0),
+ end_position_(0),
+ function_literal_id_(kFunctionLiteralIdTopLevel),
stack_size_(i::FLAG_stack_size),
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
- allocator_(isolate->allocator()),
timer_(isolate->counters()->compile_script_on_background()),
- collected_source_positions_(false) {
+ language_mode_(info_->language_mode()) {
VMState<PARSER> state(isolate);
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
+
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
- info_->script_id()));
- info_->SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(), true,
- construct_language_mode(FLAG_use_strict),
- REPLMode::kNo);
- language_mode_ = info_->language_mode();
+ info_->flags().script_id()));
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
streamed_data->source_stream.get(), streamed_data->encoding));
info_->set_character_stream(std::move(stream));
- finalize_on_background_thread_ = FLAG_finalize_streaming_on_background;
+ // TODO(leszeks): Add block coverage support to off-thread finalization.
+ finalize_on_background_thread_ =
+ FLAG_finalize_streaming_on_background && !flags_.block_coverage_enabled();
+ if (finalize_on_background_thread()) {
+ off_thread_isolate_ =
+ std::make_unique<OffThreadIsolate>(isolate, info_->zone());
+ }
}
BackgroundCompileTask::BackgroundCompileTask(
- AccountingAllocator* allocator, const ParseInfo* outer_parse_info,
- const AstRawString* function_name, const FunctionLiteral* function_literal,
+ const ParseInfo* outer_parse_info, const AstRawString* function_name,
+ const FunctionLiteral* function_literal,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size)
- : info_(ParseInfo::FromParent(outer_parse_info, allocator, function_literal,
- function_name)),
+ : flags_(UnoptimizedCompileFlags::ForToplevelFunction(
+ outer_parse_info->flags(), function_literal)),
+ compile_state_(*outer_parse_info->state()),
+ info_(ParseInfo::ForToplevelFunction(flags_, &compile_state_,
+ function_literal, function_name)),
+ start_position_(function_literal->start_position()),
+ end_position_(function_literal->end_position()),
+ function_literal_id_(function_literal->function_literal_id()),
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
- allocator_(allocator),
timer_(timer),
language_mode_(info_->language_mode()),
- collected_source_positions_(false),
finalize_on_background_thread_(false) {
- DCHECK(outer_parse_info->is_toplevel());
+ DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
+ DCHECK_NULL(outer_parse_info->extension());
+
DCHECK(!function_literal->is_toplevel());
// Clone the character stream so both can be accessed independently.
std::unique_ptr<Utf16CharacterStream> character_stream =
outer_parse_info->character_stream()->Clone();
- character_stream->Seek(function_literal->start_position());
+ character_stream->Seek(start_position_);
info_->set_character_stream(std::move(character_stream));
// Get preparsed scope data from the function literal.
@@ -1225,14 +1258,14 @@ class OffThreadParseInfoScope {
original_runtime_call_stats_(parse_info_->runtime_call_stats()),
original_stack_limit_(parse_info_->stack_limit()),
worker_thread_scope_(worker_thread_runtime_stats) {
- parse_info_->set_runtime_call_stats(worker_thread_scope_.Get());
- parse_info_->set_stack_limit(GetCurrentStackPosition() - stack_size * KB);
+ parse_info_->SetPerThreadState(GetCurrentStackPosition() - stack_size * KB,
+ worker_thread_scope_.Get());
}
~OffThreadParseInfoScope() {
DCHECK_NOT_NULL(parse_info_);
- parse_info_->set_stack_limit(original_stack_limit_);
- parse_info_->set_runtime_call_stats(original_runtime_call_stats_);
+ parse_info_->SetPerThreadState(original_stack_limit_,
+ original_runtime_call_stats_);
}
private:
@@ -1244,6 +1277,20 @@ class OffThreadParseInfoScope {
DISALLOW_COPY_AND_ASSIGN(OffThreadParseInfoScope);
};
+bool CanOffThreadFinalizeAllJobs(
+ UnoptimizedCompilationJob* outer_job,
+ const UnoptimizedCompilationJobList& inner_function_jobs) {
+ if (!outer_job->can_off_thread_finalize()) return false;
+
+ for (auto& job : inner_function_jobs) {
+ if (!job->can_off_thread_finalize()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
} // namespace
void BackgroundCompileTask::Run() {
@@ -1270,82 +1317,94 @@ void BackgroundCompileTask::Run() {
parser_.reset(new Parser(info_.get()));
parser_->InitializeEmptyScopeChain(info_.get());
- parser_->ParseOnBackground(info_.get());
+ parser_->ParseOnBackground(info_.get(), start_position_, end_position_,
+ function_literal_id_);
if (info_->literal() != nullptr) {
// Parsing has succeeded, compile.
- outer_function_job_ = CompileOnBackgroundThread(info_.get(), allocator_,
- &inner_function_jobs_);
- // Save the language mode and record whether we collected source positions.
- language_mode_ = info_->language_mode();
- collected_source_positions_ = info_->collect_source_positions();
-
- if (finalize_on_background_thread_) {
- DCHECK(info_->is_toplevel());
+ outer_function_job_ = CompileOnBackgroundThread(
+ info_.get(), compile_state_.allocator(), &inner_function_jobs_);
+ }
+ // Save the language mode and record whether we collected source positions.
+ language_mode_ = info_->language_mode();
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground");
+ // We don't currently support off-thread finalization for some jobs (namely,
+ // asm.js), so release the off-thread isolate and fall back to main-thread
+ // finalization.
+ // TODO(leszeks): Still finalize Ignition tasks on the background thread,
+ // and fallback to main-thread finalization for asm.js jobs only.
+ finalize_on_background_thread_ =
+ finalize_on_background_thread_ && outer_function_job_ &&
+ CanOffThreadFinalizeAllJobs(outer_function_job(), *inner_function_jobs());
+
+ if (!finalize_on_background_thread_) {
+ off_thread_isolate_.reset();
+ return;
+ }
- off_thread_isolate_->PinToCurrentThread();
+ // ---
+ // At this point, off-thread compilation has completed and we are off-thread
+ // finalizing.
+ // ---
- OffThreadHandleScope handle_scope(off_thread_isolate_.get());
+ DCHECK(info_->flags().is_toplevel());
- // We don't have the script source or the script origin yet, so use a few
- // default values for them. These will be fixed up during the main-thread
- // merge.
- Handle<Script> script =
- info_->CreateScript(off_thread_isolate_.get(),
- off_thread_isolate_->factory()->empty_string(),
- ScriptOriginOptions(), NOT_NATIVES_CODE);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground");
- Handle<SharedFunctionInfo> outer_function_sfi =
- FinalizeTopLevel(info_.get(), script, off_thread_isolate_.get(),
- outer_function_job_.get(), &inner_function_jobs_)
- .ToHandleChecked();
+ OffThreadIsolate* isolate = off_thread_isolate();
+ isolate->PinToCurrentThread();
- parser_->HandleSourceURLComments(off_thread_isolate_.get(), script);
+ OffThreadHandleScope handle_scope(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground.Finish");
- off_thread_isolate_->FinishOffThread();
+ // We don't have the script source, origin, or details yet, so use default
+ // values for them. These will be fixed up during the main-thread merge.
+ Handle<Script> script =
+ info_->CreateScript(isolate, isolate->factory()->empty_string(),
+ kNullMaybeHandle, ScriptOriginOptions());
- // Off-thread handles will become invalid after the handle scope closes,
- // so save the raw object here.
- outer_function_sfi_ = *outer_function_sfi;
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ if (info_->literal() != nullptr) {
+ info_->ast_value_factory()->Internalize(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground.ReleaseParser");
- DCHECK_EQ(language_mode_, info_->language_mode());
- off_thread_scope.reset();
- parser_.reset();
- info_.reset();
- outer_function_job_.reset();
- inner_function_jobs_.clear();
+ Handle<SharedFunctionInfo> shared_info =
+ CreateTopLevelSharedFunctionInfo(info_.get(), script, isolate);
+ if (FinalizeAllUnoptimizedCompilationJobs(
+ info_.get(), isolate, shared_info, outer_function_job_.get(),
+ &inner_function_jobs_, &finalize_unoptimized_compilation_data_)) {
+ maybe_result = shared_info;
}
+
+ parser_->HandleSourceURLComments(isolate, script);
+ } else {
+ DCHECK(!outer_function_job_);
}
-}
-// ----------------------------------------------------------------------------
-// Implementation of Compiler
+ Handle<SharedFunctionInfo> result;
+ if (!maybe_result.ToHandle(&result)) {
+ DCHECK(compile_state_.pending_error_handler()->has_pending_error());
+ PreparePendingException(isolate, info_.get());
+ }
-bool Compiler::Analyze(ParseInfo* parse_info) {
- DCHECK_NOT_NULL(parse_info->literal());
- RuntimeCallTimerScope runtimeTimer(parse_info->runtime_call_stats(),
- RuntimeCallCounterId::kCompileAnalyse,
- RuntimeCallStats::kThreadSpecific);
- if (!Rewriter::Rewrite(parse_info)) return false;
- if (!DeclarationScope::Analyze(parse_info)) return false;
- return true;
-}
+ outer_function_sfi_ = isolate->TransferHandle(maybe_result);
+ script_ = isolate->TransferHandle(script);
-bool Compiler::ParseAndAnalyze(ParseInfo* parse_info,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
- if (!parsing::ParseAny(parse_info, shared_info, isolate)) {
- return false;
- }
- return Compiler::Analyze(parse_info);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground.Finish");
+ isolate->FinishOffThread();
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.FinalizeCodeBackground.ReleaseParser");
+ DCHECK_EQ(language_mode_, info_->language_mode());
+ off_thread_scope.reset();
+ parser_.reset();
+ info_.reset();
+ outer_function_job_.reset();
+ inner_function_jobs_.clear();
}
+// ----------------------------------------------------------------------------
+// Implementation of Compiler
+
// static
bool Compiler::CollectSourcePositions(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info) {
@@ -1385,10 +1444,14 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
HistogramTimerScope timer(isolate->counters()->collect_source_positions());
// Set up parse info.
- ParseInfo parse_info(isolate, *shared_info);
- parse_info.set_lazy_compile();
- parse_info.set_collect_source_positions();
- if (FLAG_allow_natives_syntax) parse_info.set_allow_natives_syntax();
+ UnoptimizedCompileFlags flags =
+ UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
+ flags.set_is_lazy_compile(true);
+ flags.set_collect_source_positions(true);
+ flags.set_allow_natives_syntax(FLAG_allow_natives_syntax);
+
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state);
// Parse and update ParseInfo with the results. Don't update parsing
// statistics since we've already parsed the code before.
@@ -1396,9 +1459,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
parsing::ReportErrorsAndStatisticsMode::kNo)) {
// Parsing failed probably as a result of stack exhaustion.
bytecode->SetSourcePositionsFailedToCollect();
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
+ return FailAndClearPendingException(isolate);
}
// Character stream shouldn't be used again.
@@ -1409,14 +1470,6 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// wasting time fully parsing them when they won't ever be used.
std::unique_ptr<UnoptimizedCompilationJob> job;
{
- if (!Compiler::Analyze(&parse_info)) {
- // Recompiling failed probably as a result of stack exhaustion.
- bytecode->SetSourcePositionsFailedToCollect();
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
- }
-
job = interpreter::Interpreter::NewSourcePositionCollectionJob(
&parse_info, parse_info.literal(), bytecode, isolate->allocator());
@@ -1424,13 +1477,11 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
job->FinalizeJob(shared_info, isolate) != CompilationJob::SUCCEEDED) {
// Recompiling failed probably as a result of stack exhaustion.
bytecode->SetSourcePositionsFailedToCollect();
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, Compiler::ClearExceptionFlag::CLEAR_EXCEPTION);
+ return FailAndClearPendingException(isolate);
}
}
- DCHECK(job->compilation_info()->collect_source_positions());
+ DCHECK(job->compilation_info()->flags().collect_source_positions());
// If debugging, make sure that instrumented bytecode has the source position
// table set on it as well.
@@ -1467,17 +1518,21 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
+ Handle<Script> script(Script::cast(shared_info->script()), isolate);
+
// Set up parse info.
- ParseInfo parse_info(isolate, *shared_info);
- parse_info.set_lazy_compile();
+ UnoptimizedCompileFlags flags =
+ UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
+ flags.set_is_lazy_compile(true);
+
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state);
// Check if the compiler dispatcher has shared_info enqueued for compile.
CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
if (dispatcher->IsEnqueued(shared_info)) {
if (!dispatcher->FinishNow(shared_info)) {
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, flag);
+ return FailWithPendingException(isolate, script, &parse_info, flag);
}
*is_compiled_scope = shared_info->is_compiled_scope();
DCHECK(is_compiled_scope->is_compiled());
@@ -1494,55 +1549,24 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
// Parse and update ParseInfo with the results.
if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, flag);
+ return FailWithPendingException(isolate, script, &parse_info, flag);
}
// Generate the unoptimized bytecode or asm-js data.
- UnoptimizedCompilationJobList inner_function_jobs;
- std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
- GenerateUnoptimizedCode(&parse_info, isolate->allocator(),
- &inner_function_jobs));
- if (!outer_function_job) {
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, flag);
- }
-
- // Internalize ast values onto the heap.
- parse_info.ast_value_factory()->Internalize(isolate);
+ FinalizeUnoptimizedCompilationDataList
+ finalize_unoptimized_compilation_data_list;
- // Finalize compilation of the unoptimized bytecode or asm-js data.
- if (!FinalizeUnoptimizedCode(&parse_info, isolate, shared_info,
- outer_function_job.get(),
- &inner_function_jobs)) {
- return FailWithPendingException(
- isolate, handle(Script::cast(shared_info->script()), isolate),
- &parse_info, flag);
+ if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
+ isolate, shared_info, script, &parse_info, isolate->allocator(),
+ is_compiled_scope, &finalize_unoptimized_compilation_data_list)) {
+ return FailWithPendingException(isolate, script, &parse_info, flag);
}
+ FinalizeUnoptimizedCompilation(isolate, script, flags, &compile_state,
+ finalize_unoptimized_compilation_data_list);
+
DCHECK(!isolate->has_pending_exception());
- *is_compiled_scope = shared_info->is_compiled_scope();
DCHECK(is_compiled_scope->is_compiled());
-
- if (FLAG_stress_lazy_source_positions) {
- // Collect source positions immediately to try and flush out bytecode
- // mismatches.
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
-
- Handle<Script> script(Script::cast(shared_info->script()), isolate);
-
- // Do the same for eagerly compiled inner functions.
- for (auto&& inner_job : inner_function_jobs) {
- Handle<SharedFunctionInfo> inner_shared_info =
- Compiler::GetSharedFunctionInfo(
- inner_job->compilation_info()->literal(), script, isolate);
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate,
- inner_shared_info);
- }
- }
-
return true;
}
@@ -1601,13 +1625,15 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
bool Compiler::FinalizeBackgroundCompileTask(
BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ClearExceptionFlag flag) {
+ DCHECK(!task->finalize_on_background_thread());
+
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
RuntimeCallTimerScope runtimeTimer(
isolate, RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
HandleScope scope(isolate);
ParseInfo* parse_info = task->info();
- DCHECK(!parse_info->is_toplevel());
+ DCHECK(!parse_info->flags().is_toplevel());
DCHECK(!shared_info->is_compiled());
Handle<Script> script(Script::cast(shared_info->script()), isolate);
@@ -1623,12 +1649,16 @@ bool Compiler::FinalizeBackgroundCompileTask(
// Parsing has succeeded - finalize compilation.
parse_info->ast_value_factory()->Internalize(isolate);
- if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
- task->outer_function_job(),
- task->inner_function_jobs())) {
+ if (!FinalizeAllUnoptimizedCompilationJobs(
+ parse_info, isolate, shared_info, task->outer_function_job(),
+ task->inner_function_jobs(),
+ task->finalize_unoptimized_compilation_data())) {
// Finalization failed - throw an exception.
return FailWithPendingException(isolate, script, parse_info, flag);
}
+ FinalizeUnoptimizedCompilation(
+ isolate, script, parse_info->flags(), parse_info->state(),
+ *task->finalize_unoptimized_compilation_data());
DCHECK(!isolate->has_pending_exception());
DCHECK(shared_info->is_compiled());
@@ -1717,22 +1747,23 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
is_compiled_scope = shared_info->is_compiled_scope();
allow_eval_cache = true;
} else {
- ParseInfo parse_info(isolate);
- parse_info.SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(),
- true, language_mode, REPLMode::kNo);
+ UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForToplevelCompile(
+ isolate, true, language_mode, REPLMode::kNo);
+ flags.set_is_eval(true);
+ flags.set_parse_restriction(restriction);
- parse_info.set_eval();
- parse_info.set_parse_restriction(restriction);
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state);
parse_info.set_parameters_end_pos(parameters_end_pos);
- DCHECK(!parse_info.is_module());
+ DCHECK(!parse_info.flags().is_module());
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (!context->IsNativeContext()) {
maybe_outer_scope_info = handle(context->scope_info(), isolate);
}
-
- script = parse_info.CreateScript(
- isolate, source, OriginOptionsForEval(outer_info->script()));
+ script =
+ parse_info.CreateScript(isolate, source, kNullMaybeHandle,
+ OriginOptionsForEval(outer_info->script()));
script->set_eval_from_shared(*outer_info);
if (eval_position == kNoSourcePosition) {
// If the position is missing, attempt to get the code offset by
@@ -1817,7 +1848,6 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback)
bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
Handle<i::Object>* source) {
- DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
DCHECK(isolate->modify_code_gen_callback());
DCHECK(source);
@@ -1858,10 +1888,8 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
// allow_code_gen_from_strings can be many things, so we'll always check
// against the 'false' literal, so that e.g. undefined and 'true' are treated
// the same.
- if (!context->allow_code_gen_from_strings().IsFalse(isolate)) {
- if (!original_source->IsString()) {
- return {MaybeHandle<String>(), true};
- }
+ if (!context->allow_code_gen_from_strings().IsFalse(isolate) &&
+ original_source->IsString()) {
return {Handle<String>::cast(original_source), false};
}
@@ -2138,16 +2166,22 @@ struct ScriptCompileTimerScope {
}
};
-void SetScriptFieldsFromDetails(Script script,
- Compiler::ScriptDetails script_details) {
+void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
+ Compiler::ScriptDetails script_details,
+ DisallowHeapAllocation* no_gc) {
Handle<Object> script_name;
if (script_details.name_obj.ToHandle(&script_name)) {
script.set_name(*script_name);
script.set_line_offset(script_details.line_offset);
script.set_column_offset(script_details.column_offset);
}
+ // The API can provide a source map URL, but a source map URL could also have
+ // been inferred by the parser from a magic comment. The latter takes
+ // preference over the former, so we don't want to override the source mapping
+ // URL if it already exists.
Handle<Object> source_map_url;
- if (script_details.source_map_url.ToHandle(&source_map_url)) {
+ if (script_details.source_map_url.ToHandle(&source_map_url) &&
+ script.source_mapping_url(isolate).IsUndefined(isolate)) {
script.set_source_mapping_url(*source_map_url);
}
Handle<FixedArray> host_defined_options;
@@ -2156,31 +2190,139 @@ void SetScriptFieldsFromDetails(Script script,
}
}
-Handle<Script> NewScript(Isolate* isolate, ParseInfo* parse_info,
- Handle<String> source,
- Compiler::ScriptDetails script_details,
- ScriptOriginOptions origin_options,
- NativesFlag natives) {
+Handle<Script> NewScript(
+ Isolate* isolate, ParseInfo* parse_info, Handle<String> source,
+ Compiler::ScriptDetails script_details, ScriptOriginOptions origin_options,
+ NativesFlag natives,
+ MaybeHandle<FixedArray> maybe_wrapped_arguments = kNullMaybeHandle) {
// Create a script object describing the script to be compiled.
- Handle<Script> script =
- parse_info->CreateScript(isolate, source, origin_options, natives);
- SetScriptFieldsFromDetails(*script, script_details);
+ Handle<Script> script = parse_info->CreateScript(
+ isolate, source, maybe_wrapped_arguments, origin_options, natives);
+ DisallowHeapAllocation no_gc;
+ SetScriptFieldsFromDetails(isolate, *script, script_details, &no_gc);
LOG(isolate, ScriptDetails(*script));
return script;
}
-void FixUpOffThreadAllocatedScript(Isolate* isolate, Handle<Script> script,
- Handle<String> source,
- Compiler::ScriptDetails script_details,
- ScriptOriginOptions origin_options,
- NativesFlag natives) {
- DisallowHeapAllocation no_gc;
- DCHECK_EQ(natives, NOT_NATIVES_CODE);
- DCHECK_EQ(script_details.repl_mode, REPLMode::kNo);
- script->set_origin_options(origin_options);
- script->set_source(*source);
- SetScriptFieldsFromDetails(*script, script_details);
- LOG(isolate, ScriptDetails(*script));
+MaybeHandle<SharedFunctionInfo> CompileScriptOnMainThread(
+ const UnoptimizedCompileFlags flags, Handle<String> source,
+ const Compiler::ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, NativesFlag natives,
+ v8::Extension* extension, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state);
+ parse_info.set_extension(extension);
+
+ Handle<Script> script = NewScript(isolate, &parse_info, source,
+ script_details, origin_options, natives);
+ DCHECK_IMPLIES(parse_info.flags().collect_type_profile(),
+ script->IsUserJavaScript());
+ DCHECK_EQ(parse_info.flags().is_repl_mode(), script->is_repl_mode());
+
+ return CompileToplevel(&parse_info, script, isolate, is_compiled_scope);
+}
+
+class StressBackgroundCompileThread : public base::Thread {
+ public:
+ StressBackgroundCompileThread(Isolate* isolate, Handle<String> source)
+ : base::Thread(
+ base::Thread::Options("StressBackgroundCompileThread", 2 * i::MB)),
+ source_(source),
+ streamed_source_(std::make_unique<SourceStream>(source, isolate),
+ v8::ScriptCompiler::StreamedSource::UTF8) {
+ data()->task = std::make_unique<i::BackgroundCompileTask>(data(), isolate);
+ }
+
+ void Run() override { data()->task->Run(); }
+
+ ScriptStreamingData* data() { return streamed_source_.impl(); }
+
+ private:
+ // Dummy external source stream which returns the whole source in one go.
+ // TODO(leszeks): Also test chunking the data.
+ class SourceStream : public v8::ScriptCompiler::ExternalSourceStream {
+ public:
+ SourceStream(Handle<String> source, Isolate* isolate) : done_(false) {
+ source_buffer_ = source->ToCString(ALLOW_NULLS, FAST_STRING_TRAVERSAL,
+ &source_length_);
+ }
+
+ size_t GetMoreData(const uint8_t** src) override {
+ if (done_) {
+ return 0;
+ }
+ *src = reinterpret_cast<uint8_t*>(source_buffer_.release());
+ done_ = true;
+
+ return source_length_;
+ }
+
+ private:
+ int source_length_;
+ std::unique_ptr<char[]> source_buffer_;
+ bool done_;
+ };
+
+ Handle<String> source_;
+ v8::ScriptCompiler::StreamedSource streamed_source_;
+};
+
+bool CanBackgroundCompile(const Compiler::ScriptDetails& script_details,
+ ScriptOriginOptions origin_options,
+ v8::Extension* extension,
+ ScriptCompiler::CompileOptions compile_options,
+ NativesFlag natives) {
+ // TODO(leszeks): Remove the module check once background compilation of
+ // modules is supported.
+ return !origin_options.IsModule() && !extension &&
+ script_details.repl_mode == REPLMode::kNo &&
+ compile_options == ScriptCompiler::kNoCompileOptions &&
+ natives == NOT_NATIVES_CODE;
+}
+
+MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
+ Handle<String> source, const Compiler::ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, Isolate* isolate,
+ IsCompiledScope* is_compiled_scope) {
+ // Start a background thread compiling the script.
+ StressBackgroundCompileThread background_compile_thread(isolate, source);
+
+ UnoptimizedCompileFlags flags_copy =
+ background_compile_thread.data()->task->flags();
+
+ CHECK(background_compile_thread.Start());
+ MaybeHandle<SharedFunctionInfo> main_thread_maybe_result;
+ // In parallel, compile on the main thread to flush out any data races.
+ {
+ IsCompiledScope inner_is_compiled_scope;
+ // The background thread should also create any relevant exceptions, so we
+ // can ignore the main-thread created ones.
+ // TODO(leszeks): Maybe verify that any thrown (or unthrown) exceptions are
+ // equivalent.
+ TryCatch ignore_try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ flags_copy.set_script_id(Script::kTemporaryScriptId);
+ main_thread_maybe_result = CompileScriptOnMainThread(
+ flags_copy, source, script_details, origin_options, NOT_NATIVES_CODE,
+ nullptr, isolate, &inner_is_compiled_scope);
+ }
+ // Join with background thread and finalize compilation.
+ background_compile_thread.Join();
+ MaybeHandle<SharedFunctionInfo> maybe_result =
+ Compiler::GetSharedFunctionInfoForStreamedScript(
+ isolate, source, script_details, origin_options,
+ background_compile_thread.data());
+
+ // Either both compiles should succeed, or both should fail.
+ // TODO(leszeks): Compare the contents of the results of the two compiles.
+ CHECK_EQ(maybe_result.is_null(), main_thread_maybe_result.is_null());
+
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ *is_compiled_scope = result->is_compiled_scope();
+ }
+
+ return maybe_result;
}
} // namespace
@@ -2254,25 +2396,28 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (maybe_result.is_null()) {
// No cache entry found compile the script.
- ParseInfo parse_info(isolate);
-
- parse_info.SetFlagsForToplevelCompile(
- isolate->is_collecting_type_profile(), natives == NOT_NATIVES_CODE,
- language_mode, script_details.repl_mode);
+ if (FLAG_stress_background_compile &&
+ CanBackgroundCompile(script_details, origin_options, extension,
+ compile_options, natives)) {
+ // If the --stress-background-compile flag is set, do the actual
+ // compilation on a background thread, and wait for its result.
+ maybe_result = CompileScriptOnBothBackgroundAndMainThread(
+ source, script_details, origin_options, isolate, &is_compiled_scope);
+ } else {
+ UnoptimizedCompileFlags flags =
+ UnoptimizedCompileFlags::ForToplevelCompile(
+ isolate, natives == NOT_NATIVES_CODE, language_mode,
+ script_details.repl_mode);
- parse_info.set_module(origin_options.IsModule());
- parse_info.set_extension(extension);
- parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
+ flags.set_is_eager(compile_options == ScriptCompiler::kEagerCompile);
+ flags.set_is_module(origin_options.IsModule());
- Handle<Script> script = NewScript(isolate, &parse_info, source,
- script_details, origin_options, natives);
- DCHECK_IMPLIES(parse_info.collect_type_profile(),
- script->IsUserJavaScript());
- DCHECK_EQ(parse_info.is_repl_mode(), script->is_repl_mode());
+ maybe_result = CompileScriptOnMainThread(
+ flags, source, script_details, origin_options, natives, extension,
+ isolate, &is_compiled_scope);
+ }
- // Compile the function and add it to the isolate cache.
- maybe_result =
- CompileToplevel(&parse_info, script, isolate, &is_compiled_scope);
+ // Add the result to the isolate cache.
Handle<SharedFunctionInfo> result;
if (extension == nullptr && maybe_result.ToHandle(&result)) {
DCHECK(is_compiled_scope.is_compiled());
@@ -2331,18 +2476,18 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<Script> script;
IsCompiledScope is_compiled_scope;
if (!maybe_result.ToHandle(&wrapped)) {
- ParseInfo parse_info(isolate);
- parse_info.SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(),
- true, language_mode,
- script_details.repl_mode);
-
- parse_info.set_eval(); // Use an eval scope as declaration scope.
- parse_info.set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
+ UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForToplevelCompile(
+ isolate, true, language_mode, script_details.repl_mode);
+ flags.set_is_eval(true); // Use an eval scope as declaration scope.
+ flags.set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
// TODO(delphick): Remove this and instead make the wrapped and wrapper
// functions fully non-lazy instead thus preventing source positions from
// being omitted.
- parse_info.set_collect_source_positions(true);
- // parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
+ flags.set_collect_source_positions(true);
+ // flags.set_eager(compile_options == ScriptCompiler::kEagerCompile);
+
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state);
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (!context->IsNativeContext()) {
@@ -2350,8 +2495,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
}
script = NewScript(isolate, &parse_info, source, script_details,
- origin_options, NOT_NATIVES_CODE);
- script->set_wrapped_arguments(*arguments);
+ origin_options, NOT_NATIVES_CODE, arguments);
Handle<SharedFunctionInfo> top_level;
maybe_result = CompileToplevel(&parse_info, script, maybe_outer_scope_info,
@@ -2383,6 +2527,9 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, ScriptOriginOptions origin_options,
ScriptStreamingData* streaming_data) {
+ DCHECK(!origin_options.IsModule());
+ DCHECK(!origin_options.IsWasm());
+
ScriptCompileTimerScope compile_timer(
isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
PostponeInterruptsScope postpone(isolate);
@@ -2410,73 +2557,71 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
}
if (maybe_result.is_null()) {
+ // No cache entry found, finalize compilation of the script and add it to
+ // the isolate cache.
+
+ Handle<Script> script;
if (task->finalize_on_background_thread()) {
RuntimeCallTimerScope runtimeTimerScope(
isolate, RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish");
- Handle<SharedFunctionInfo> sfi(task->outer_function_sfi(), isolate);
- Handle<Script> script(Script::cast(sfi->script()), isolate);
- task->off_thread_isolate()->factory()->Publish(isolate);
-
- FixUpOffThreadAllocatedScript(isolate, script, source, script_details,
- origin_options, NOT_NATIVES_CODE);
-
- // It's possible that source position collection was enabled after the
- // background compile was started (for instance by enabling the cpu
- // profiler), and the compiled bytecode is missing source positions. So,
- // walk all the SharedFunctionInfos in the script and force source
- // position collection.
- if (!task->collected_source_positions() &&
- isolate->NeedsDetailedOptimizedCodeLineInfo()) {
- Handle<WeakFixedArray> shared_function_infos(
- script->shared_function_infos(isolate), isolate);
- int length = shared_function_infos->length();
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i++, {
- Object entry = shared_function_infos->Get(isolate, i)
- .GetHeapObjectOrSmi(isolate);
- if (entry.IsSharedFunctionInfo(isolate)) {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(
- isolate, handle(SharedFunctionInfo::cast(entry), isolate));
- }
- });
- }
+ task->off_thread_isolate()->Publish(isolate);
- maybe_result = sfi;
+ maybe_result = task->outer_function_sfi();
+ script = task->script();
+ script->set_source(*source);
+ script->set_origin_options(origin_options);
} else {
ParseInfo* parse_info = task->info();
- DCHECK(parse_info->is_toplevel());
+ DCHECK(parse_info->flags().is_toplevel());
+
+ script = parse_info->CreateScript(isolate, source, kNullMaybeHandle,
+ origin_options);
- // No cache entry found, finalize compilation of the script and add it to
- // the isolate cache.
- Handle<Script> script =
- NewScript(isolate, parse_info, source, script_details, origin_options,
- NOT_NATIVES_CODE);
task->parser()->UpdateStatistics(isolate, script);
task->parser()->HandleSourceURLComments(isolate, script);
- if (parse_info->literal() == nullptr || !task->outer_function_job()) {
- // Parsing has failed - report error messages.
- FailWithPendingException(isolate, script, parse_info,
- Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
- } else {
- // Parsing has succeeded - finalize compilation.
- maybe_result = FinalizeTopLevel(parse_info, script, isolate,
- task->outer_function_job(),
- task->inner_function_jobs());
- if (maybe_result.is_null()) {
- // Finalization failed - throw an exception.
- FailWithPendingException(
- isolate, script, parse_info,
- Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
+ if (parse_info->literal() != nullptr && task->outer_function_job()) {
+ // Off-thread parse & compile has succeeded - finalize compilation.
+ parse_info->ast_value_factory()->Internalize(isolate);
+
+ Handle<SharedFunctionInfo> shared_info =
+ CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
+ if (FinalizeAllUnoptimizedCompilationJobs(
+ parse_info, isolate, shared_info, task->outer_function_job(),
+ task->inner_function_jobs(),
+ task->finalize_unoptimized_compilation_data())) {
+ maybe_result = shared_info;
}
}
+
+ if (maybe_result.is_null()) {
+ // Compilation failed - prepare to throw an exception after script
+ // fields have been set.
+ PreparePendingException(isolate, parse_info);
+ }
+ }
+
+ // Set the script fields after finalization, to keep this path the same
+ // between main-thread and off-thread finalization.
+ {
+ DisallowHeapAllocation no_gc;
+ SetScriptFieldsFromDetails(isolate, *script, script_details, &no_gc);
+ LOG(isolate, ScriptDetails(*script));
}
- // Add compiled code to the isolate cache.
Handle<SharedFunctionInfo> result;
- if (maybe_result.ToHandle(&result)) {
+ if (!maybe_result.ToHandle(&result)) {
+ FailWithPreparedPendingException(
+ isolate, script, task->compile_state()->pending_error_handler());
+ } else {
+ FinalizeUnoptimizedScriptCompilation(
+ isolate, script, task->flags(), task->compile_state(),
+ *task->finalize_unoptimized_compilation_data());
+
+ // Add compiled code to the isolate cache.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.AddToCache");
compilation_cache->PutScript(source, isolate->native_context(),
@@ -2497,7 +2642,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
MaybeHandle<SharedFunctionInfo> maybe_existing;
// Find any previously allocated shared function info for the given literal.
- maybe_existing = script->FindSharedFunctionInfo(isolate, literal);
+ maybe_existing =
+ script->FindSharedFunctionInfo(isolate, literal->function_literal_id());
// If we found an existing shared function info, return it.
Handle<SharedFunctionInfo> existing;
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 7284003de9..b851d6abd6 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -12,8 +12,11 @@
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/logging/code-events.h"
#include "src/objects/contexts.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/pending-compilation-error-handler.h"
#include "src/utils/allocation.h"
#include "src/zone/zone.h"
@@ -91,13 +94,6 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// offer this chance, optimized closure instantiation will not call this.
static void PostInstantiation(Handle<JSFunction> function);
- // Parser::Parse, then Compiler::Analyze.
- static bool ParseAndAnalyze(ParseInfo* parse_info,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate);
- // Rewrite and analyze scopes.
- static bool Analyze(ParseInfo* parse_info);
-
// ===========================================================================
// The following family of methods instantiates new functions for scripts or
// function literals. The decision whether those functions will be compiled,
@@ -245,12 +241,17 @@ class V8_EXPORT_PRIVATE CompilationJob {
// Either of phases can either fail or succeed.
class UnoptimizedCompilationJob : public CompilationJob {
public:
+ enum class CanOffThreadFinalize : bool { kYes = true, kNo = false };
+
UnoptimizedCompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
- UnoptimizedCompilationInfo* compilation_info)
+ UnoptimizedCompilationInfo* compilation_info,
+ CanOffThreadFinalize can_off_thread_finalize)
: CompilationJob(State::kReadyToExecute),
stack_limit_(stack_limit),
parse_info_(parse_info),
- compilation_info_(compilation_info) {}
+ compilation_info_(compilation_info),
+ can_off_thread_finalize_(can_off_thread_finalize ==
+ CanOffThreadFinalize::kYes) {}
// Executes the compile job. Can be called on a background thread.
V8_WARN_UNUSED_RESULT Status ExecuteJob();
@@ -275,6 +276,15 @@ class UnoptimizedCompilationJob : public CompilationJob {
uintptr_t stack_limit() const { return stack_limit_; }
+ base::TimeDelta time_taken_to_execute() const {
+ return time_taken_to_execute_;
+ }
+ base::TimeDelta time_taken_to_finalize() const {
+ return time_taken_to_finalize_;
+ }
+
+ bool can_off_thread_finalize() const { return can_off_thread_finalize_; }
+
protected:
// Overridden by the actual implementation.
virtual Status ExecuteJobImpl() = 0;
@@ -289,6 +299,7 @@ class UnoptimizedCompilationJob : public CompilationJob {
UnoptimizedCompilationInfo* compilation_info_;
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
+ bool can_off_thread_finalize_;
};
// A base class for optimized compilation jobs.
@@ -350,6 +361,55 @@ class OptimizedCompilationJob : public CompilationJob {
const char* compiler_name_;
};
+class FinalizeUnoptimizedCompilationData {
+ public:
+ FinalizeUnoptimizedCompilationData(Isolate* isolate,
+ Handle<SharedFunctionInfo> function_handle,
+ base::TimeDelta time_taken_to_execute,
+ base::TimeDelta time_taken_to_finalize)
+ : time_taken_to_execute_(time_taken_to_execute),
+ time_taken_to_finalize_(time_taken_to_finalize),
+ function_handle_(function_handle),
+ handle_state_(kHandle) {}
+
+ FinalizeUnoptimizedCompilationData(OffThreadIsolate* isolate,
+ Handle<SharedFunctionInfo> function_handle,
+ base::TimeDelta time_taken_to_execute,
+ base::TimeDelta time_taken_to_finalize)
+ : time_taken_to_execute_(time_taken_to_execute),
+ time_taken_to_finalize_(time_taken_to_finalize),
+ function_transfer_handle_(isolate->TransferHandle(function_handle)),
+ handle_state_(kTransferHandle) {}
+
+ Handle<SharedFunctionInfo> function_handle() const {
+ switch (handle_state_) {
+ case kHandle:
+ return function_handle_;
+ case kTransferHandle:
+ return function_transfer_handle_.ToHandle();
+ }
+ }
+
+ base::TimeDelta time_taken_to_execute() const {
+ return time_taken_to_execute_;
+ }
+ base::TimeDelta time_taken_to_finalize() const {
+ return time_taken_to_finalize_;
+ }
+
+ private:
+ base::TimeDelta time_taken_to_execute_;
+ base::TimeDelta time_taken_to_finalize_;
+ union {
+ Handle<SharedFunctionInfo> function_handle_;
+ OffThreadTransferHandle<SharedFunctionInfo> function_transfer_handle_;
+ };
+ enum { kHandle, kTransferHandle } handle_state_;
+};
+
+using FinalizeUnoptimizedCompilationDataList =
+ std::vector<FinalizeUnoptimizedCompilationData>;
+
class V8_EXPORT_PRIVATE BackgroundCompileTask {
public:
// Creates a new task that when run will parse and compile the streamed
@@ -363,8 +423,7 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
// |function_literal| and can be finalized with
// Compiler::FinalizeBackgroundCompileTask.
BackgroundCompileTask(
- AccountingAllocator* allocator, const ParseInfo* outer_parse_info,
- const AstRawString* function_name,
+ const ParseInfo* outer_parse_info, const AstRawString* function_name,
const FunctionLiteral* function_literal,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size);
@@ -382,23 +441,34 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
UnoptimizedCompilationJobList* inner_function_jobs() {
return &inner_function_jobs_;
}
+ UnoptimizedCompileFlags flags() const { return flags_; }
+ const UnoptimizedCompileState* compile_state() const {
+ return &compile_state_;
+ }
LanguageMode language_mode() { return language_mode_; }
- bool collected_source_positions() { return collected_source_positions_; }
bool finalize_on_background_thread() {
return finalize_on_background_thread_;
}
OffThreadIsolate* off_thread_isolate() { return off_thread_isolate_.get(); }
- SharedFunctionInfo outer_function_sfi() {
- // Make sure that this is an off-thread object, so that it won't have been
- // moved by the GC.
- DCHECK(Heap::InOffThreadSpace(outer_function_sfi_));
- return outer_function_sfi_;
+ MaybeHandle<SharedFunctionInfo> outer_function_sfi() {
+ DCHECK_NOT_NULL(off_thread_isolate_);
+ return outer_function_sfi_.ToHandle();
+ }
+ Handle<Script> script() {
+ DCHECK_NOT_NULL(off_thread_isolate_);
+ return script_.ToHandle();
+ }
+ FinalizeUnoptimizedCompilationDataList*
+ finalize_unoptimized_compilation_data() {
+ return &finalize_unoptimized_compilation_data_;
}
private:
// Data needed for parsing, and data needed to to be passed between thread
// between parsing and compilation. These need to be initialized before the
// compilation starts.
+ UnoptimizedCompileFlags flags_;
+ UnoptimizedCompileState compile_state_;
std::unique_ptr<ParseInfo> info_;
std::unique_ptr<Parser> parser_;
@@ -411,15 +481,19 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
// should add some stricter type-safety or DCHECKs to ensure that the user of
// the task knows this.
std::unique_ptr<OffThreadIsolate> off_thread_isolate_;
- // This is a raw pointer to the off-thread allocated SharedFunctionInfo.
- SharedFunctionInfo outer_function_sfi_;
+ OffThreadTransferMaybeHandle<SharedFunctionInfo> outer_function_sfi_;
+ OffThreadTransferHandle<Script> script_;
+ FinalizeUnoptimizedCompilationDataList finalize_unoptimized_compilation_data_;
+
+ // Single function data for top-level function compilation.
+ int start_position_;
+ int end_position_;
+ int function_literal_id_;
int stack_size_;
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
- AccountingAllocator* allocator_;
TimedHistogram* timer_;
LanguageMode language_mode_;
- bool collected_source_positions_;
// True if the background compilation should be finalized on the background
// thread. When this is true, the ParseInfo, Parser and compilation jobs are
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/codegen/external-reference-encoder.cc
index a843771fdb..0dfe3e976a 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/codegen/external-reference-encoder.cc
@@ -1,14 +1,11 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
+// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/snapshot/serializer-common.h"
+#include "src/codegen/external-reference-encoder.h"
#include "src/codegen/external-reference-table.h"
-#include "src/objects/foreign-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/objects/slots.h"
-#include "third_party/zlib/zlib.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
@@ -45,8 +42,8 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
}
}
-ExternalReferenceEncoder::~ExternalReferenceEncoder() {
#ifdef DEBUG
+ExternalReferenceEncoder::~ExternalReferenceEncoder() {
if (!i::FLAG_external_reference_stats) return;
if (api_references_ == nullptr) return;
for (uint32_t i = 0; api_references_[i] != 0; ++i) {
@@ -56,8 +53,8 @@ ExternalReferenceEncoder::~ExternalReferenceEncoder() {
"index=%5d count=%5d %-60s\n", i, count_[i],
ExternalReferenceTable::ResolveSymbol(reinterpret_cast<void*>(addr)));
}
-#endif // DEBUG
}
+#endif // DEBUG
Maybe<ExternalReferenceEncoder::Value> ExternalReferenceEncoder::TryEncode(
Address address) {
@@ -96,73 +93,5 @@ const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
return isolate->external_reference_table()->name(value.index());
}
-void SerializedData::AllocateData(uint32_t size) {
- DCHECK(!owns_data_);
- data_ = NewArray<byte>(size);
- size_ = size;
- owns_data_ = true;
-}
-
-// static
-constexpr uint32_t SerializedData::kMagicNumber;
-
-// The partial snapshot cache is terminated by undefined. We visit the
-// partial snapshot...
-// - during deserialization to populate it.
-// - during normal GC to keep its content alive.
-// - not during serialization. The partial serializer adds to it explicitly.
-DISABLE_CFI_PERF
-void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
- std::vector<Object>* cache = isolate->partial_snapshot_cache();
- for (size_t i = 0;; ++i) {
- // Extend the array ready to get a value when deserializing.
- if (cache->size() <= i) cache->push_back(Smi::zero());
- // During deserialization, the visitor populates the partial snapshot cache
- // and eventually terminates the cache with undefined.
- visitor->VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
- FullObjectSlot(&cache->at(i)));
- if (cache->at(i).IsUndefined(isolate)) break;
- }
-}
-
-bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
- // ArrayBuffer instances are serialized by first re-assigning a index
- // to the backing store field, then serializing the object, and then
- // storing the actual backing store address again (and the same for the
- // ArrayBufferExtension). If serialization of the object itself is deferred,
- // the real backing store address is written into the snapshot, which cannot
- // be processed when deserializing.
- return !o.IsString() && !o.IsScript() && !o.IsJSTypedArray() &&
- !o.IsJSArrayBuffer();
-}
-
-void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- const std::vector<AccessorInfo>& accessor_infos) {
- // Restore wiped accessor infos.
- for (AccessorInfo info : accessor_infos) {
- Foreign::cast(info.js_getter())
- .set_foreign_address(info.redirected_getter());
- }
-}
-
-void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- const std::vector<CallHandlerInfo>& call_handler_infos) {
- for (CallHandlerInfo info : call_handler_infos) {
- Foreign::cast(info.js_callback())
- .set_foreign_address(info.redirected_callback());
- }
-}
-
-uint32_t Checksum(Vector<const byte> payload) {
-#ifdef MEMORY_SANITIZER
- // Computing the checksum includes padding bytes for objects like strings.
- // Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload.begin(), payload.length());
-#endif // MEMORY_SANITIZER
- // Priming the adler32 call so it can see what CPU features are available.
- adler32(0, NULL, 0);
- return static_cast<uint32_t>(adler32(0, payload.begin(), payload.length()));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/external-reference-encoder.h b/deps/v8/src/codegen/external-reference-encoder.h
new file mode 100644
index 0000000000..7c41206f07
--- /dev/null
+++ b/deps/v8/src/codegen/external-reference-encoder.h
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_EXTERNAL_REFERENCE_ENCODER_H_
+#define V8_CODEGEN_EXTERNAL_REFERENCE_ENCODER_H_
+
+#include "src/base/bit-field.h"
+#include "src/common/globals.h"
+#include "src/utils/address-map.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+class ExternalReferenceEncoder {
+ public:
+ class Value {
+ public:
+ explicit Value(uint32_t raw) : value_(raw) {}
+ Value() : value_(0) {}
+ static uint32_t Encode(uint32_t index, bool is_from_api) {
+ return Index::encode(index) | IsFromAPI::encode(is_from_api);
+ }
+
+ bool is_from_api() const { return IsFromAPI::decode(value_); }
+ uint32_t index() const { return Index::decode(value_); }
+
+ private:
+ using Index = base::BitField<uint32_t, 0, 31>;
+ using IsFromAPI = base::BitField<bool, 31, 1>;
+ uint32_t value_;
+ };
+
+ explicit ExternalReferenceEncoder(Isolate* isolate);
+#ifdef DEBUG
+ ~ExternalReferenceEncoder();
+#endif // DEBUG
+
+ Value Encode(Address key);
+ Maybe<Value> TryEncode(Address key);
+
+ const char* NameOfAddress(Isolate* isolate, Address address) const;
+
+ private:
+ AddressToIndexHashMap* map_;
+
+#ifdef DEBUG
+ std::vector<int> count_;
+ const intptr_t* api_references_;
+#endif // DEBUG
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_EXTERNAL_REFERENCE_ENCODER_H_
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 7a42e40461..5c2c63e816 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -902,6 +902,10 @@ static int EnterMicrotaskContextWrapper(HandleScopeImplementer* hsi,
FUNCTION_REFERENCE(call_enter_context_function, EnterMicrotaskContextWrapper)
+FUNCTION_REFERENCE(
+ js_finalization_registry_remove_cell_from_unregister_token_map,
+ JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap)
+
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 2c5c8348f4..f42a7d7486 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -215,6 +215,8 @@ class StatsCounter;
V(atomic_pair_exchange_function, "atomic_pair_exchange_function") \
V(atomic_pair_compare_exchange_function, \
"atomic_pair_compare_exchange_function") \
+ V(js_finalization_registry_remove_cell_from_unregister_token_map, \
+ "JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap") \
EXTERNAL_REFERENCE_LIST_INTL(V)
#ifdef V8_INTL_SUPPORT
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 1d3b9e2644..551750936d 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -1091,6 +1091,25 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
+void Assembler::rol(Operand dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ DCHECK(is_uint5(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ emit_operand(eax, dst);
+ } else {
+ EMIT(0xC1);
+ emit_operand(eax, dst);
+ EMIT(imm8);
+ }
+}
+
+void Assembler::rol_cl(Operand dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD3);
+ emit_operand(eax, dst);
+}
+
void Assembler::ror(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@@ -2266,6 +2285,14 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::pmovmskb(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD7);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::maxsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2894,6 +2921,22 @@ void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(offset);
}
+void Assembler::vmovmskps(Register dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(xmm0, kL128, kNone, k0F, kWIG);
+ EMIT(0x50);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::vpmovmskb(Register dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(xmm0, kL128, k66, k0F, kWIG);
+ EMIT(0xD7);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::bmi1(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 246415ba67..60d978df5b 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -635,6 +635,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
+ void rol(Register dst, uint8_t imm8) { rol(Operand(dst), imm8); }
+ void rol(Operand dst, uint8_t imm8);
+ void rol_cl(Register dst) { rol_cl(Operand(dst)); }
+ void rol_cl(Operand dst);
+
void ror(Register dst, uint8_t imm8) { ror(Operand(dst), imm8); }
void ror(Operand dst, uint8_t imm8);
void ror_cl(Register dst) { ror_cl(Operand(dst)); }
@@ -958,6 +963,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
+ void pmovmskb(Register dst, XMMRegister src);
+
void cmpltsd(XMMRegister dst, XMMRegister src);
void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
@@ -1439,6 +1446,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x7E, src, xmm0, dst, k66, k0F, kWIG);
}
+ void vmovmskps(Register dst, XMMRegister src);
+
+ void vpmovmskb(Register dst, XMMRegister src);
+
// BMI instruction
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 428912c7bd..8b1ea8d880 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -284,6 +284,46 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
+void WasmFloat32ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ Register registers[] = {ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ Register registers[] = {ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 6f5778d3ca..b73050a680 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -15,7 +15,7 @@
#include "src/debug/debug.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
@@ -135,6 +135,31 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
j(below_equal, on_in_range, near_jump);
}
+void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+ PushArrayOrder order) {
+ DCHECK(!AreAliased(array, size, scratch));
+ Register counter = scratch;
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ mov(counter, 0);
+ jmp(&entry);
+ bind(&loop);
+ Push(Operand(array, counter, times_system_pointer_size, 0));
+ inc(counter);
+ bind(&entry);
+ cmp(counter, size);
+ j(less, &loop, Label::kNear);
+ } else {
+ mov(counter, size);
+ jmp(&entry);
+ bind(&loop);
+ Push(Operand(array, counter, times_system_pointer_size, 0));
+ bind(&entry);
+ dec(counter);
+ j(greater_equal, &loop, Label::kNear);
+ }
+}
+
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
// TODO(jgruber): Add support for enable_root_array_delta_access.
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 40b542f375..94ddb2f784 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -224,6 +224,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void PushPC();
+ enum class PushArrayOrder { kNormal, kReverse };
+ // `array` points to the first element (the lowest address).
+ // `array` and `size` are not modified.
+ void PushArray(Register array, Register size, Register scratch,
+ PushArrayOrder order = PushArrayOrder::kNormal);
+
// Operand pointing to an external reference.
// May emit code to set up the scratch register. The operand is
// only guaranteed to be correct as long as the scratch register
@@ -280,10 +286,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
AVX_OP2_WITH_TYPE(Movaps, movaps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Pmovmskb, pmovmskb, Register, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movmskps, movmskps, Register, XMMRegister)
#undef AVX_OP2_WITH_TYPE
@@ -325,6 +335,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Xorpd, xorpd)
AVX_OP3_XO(Sqrtss, sqrtss)
AVX_OP3_XO(Sqrtsd, sqrtsd)
+ AVX_OP3_XO(Orps, orps)
AVX_OP3_XO(Orpd, orpd)
AVX_OP3_XO(Andnpd, andnpd)
@@ -347,6 +358,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
AVX_PACKED_OP3(Addpd, addpd)
+ AVX_PACKED_OP3(Subps, subps)
AVX_PACKED_OP3(Subpd, subpd)
AVX_PACKED_OP3(Mulpd, mulpd)
AVX_PACKED_OP3(Divpd, divpd)
@@ -354,8 +366,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
AVX_PACKED_OP3(Cmpltpd, cmpltpd)
AVX_PACKED_OP3(Cmplepd, cmplepd)
+ AVX_PACKED_OP3(Minps, minps)
AVX_PACKED_OP3(Minpd, minpd)
+ AVX_PACKED_OP3(Maxps, maxps)
AVX_PACKED_OP3(Maxpd, maxpd)
+ AVX_PACKED_OP3(Cmpunordps, cmpunordps)
AVX_PACKED_OP3(Cmpunordpd, cmpunordpd)
AVX_PACKED_OP3(Psllw, psllw)
AVX_PACKED_OP3(Pslld, pslld)
diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index dafb8cbcf0..df3117e8d0 100644
--- a/deps/v8/src/codegen/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -159,6 +159,8 @@ constexpr Register kRootRegister = ebx;
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
constexpr Register kSpeculationPoisonRegister = no_reg;
+constexpr DoubleRegister kFPReturnRegister0 = xmm1; // xmm0 isn't allocatable.
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 42b45c0f33..503da3cb43 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -85,6 +85,8 @@ void CallDescriptors::InitializeOncePerProcess() {
DCHECK(!AllocateDescriptor{}.HasContextParameter());
DCHECK(!AllocateHeapNumberDescriptor{}.HasContextParameter());
DCHECK(!AbortDescriptor{}.HasContextParameter());
+ DCHECK(!WasmFloat32ToNumberDescriptor{}.HasContextParameter());
+ DCHECK(!WasmFloat64ToNumberDescriptor{}.HasContextParameter());
}
void CallDescriptors::TearDown() {
@@ -375,11 +377,20 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void WasmMemoryGrowDescriptor::InitializePlatformSpecific(
+#if !V8_TARGET_ARCH_IA32
+// We need a custom descriptor on ia32 to avoid using xmm0.
+void WasmFloat32ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+// We need a custom descriptor on ia32 to avoid using xmm0.
+void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+#endif // !V8_TARGET_ARCH_IA32
+
void WasmTableInitDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data,
@@ -392,21 +403,6 @@ void WasmTableCopyDescriptor::InitializePlatformSpecific(
kParameterCount - kStackArgumentsCount);
}
-void WasmTableGetDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void WasmTableSetDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void WasmThrowDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 36099d57fe..fc27b46ca1 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -32,24 +32,26 @@ namespace internal {
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(AsyncFunctionStackParameter) \
- V(BigIntToI64) \
V(BigIntToI32Pair) \
- V(I64ToBigInt) \
- V(I32PairToBigInt) \
+ V(BigIntToI64) \
V(BinaryOp) \
+ V(BinaryOp_WithFeedback) \
V(CallForwardVarargs) \
V(CallFunctionTemplate) \
V(CallTrampoline) \
+ V(CallTrampoline_WithFeedback) \
V(CallVarargs) \
V(CallWithArrayLike) \
V(CallWithSpread) \
V(CEntry1ArgvOnStack) \
V(CloneObjectWithVector) \
V(Compare) \
+ V(Compare_WithFeedback) \
V(ConstructForwardVarargs) \
V(ConstructStub) \
V(ConstructVarargs) \
V(ConstructWithArrayLike) \
+ V(Construct_WithFeedback) \
V(ConstructWithSpread) \
V(ContextOnly) \
V(CppBuiltinAdaptor) \
@@ -60,6 +62,8 @@ namespace internal {
V(GetIteratorStackParameter) \
V(GetProperty) \
V(GrowArrayElements) \
+ V(I32PairToBigInt) \
+ V(I64ToBigInt) \
V(InterpreterCEntry1) \
V(InterpreterCEntry2) \
V(InterpreterDispatch) \
@@ -69,15 +73,15 @@ namespace internal {
V(Load) \
V(LoadGlobal) \
V(LoadGlobalNoFeedback) \
- V(LoadNoFeedback) \
V(LoadGlobalWithVector) \
+ V(LoadNoFeedback) \
V(LoadWithVector) \
V(NewArgumentsElements) \
V(NoContext) \
V(RecordWrite) \
V(ResumeGenerator) \
- V(RunMicrotasksEntry) \
V(RunMicrotasks) \
+ V(RunMicrotasksEntry) \
V(Store) \
V(StoreGlobal) \
V(StoreGlobalWithVector) \
@@ -89,18 +93,17 @@ namespace internal {
V(TypeConversion) \
V(TypeConversionStackParameter) \
V(Typeof) \
+ V(UnaryOp_WithFeedback) \
V(Void) \
V(WasmAtomicNotify) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
- V(WasmMemoryGrow) \
V(WasmTableInit) \
V(WasmTableCopy) \
- V(WasmTableGet) \
- V(WasmTableSet) \
- V(WasmThrow) \
BUILTIN_LIST_TFS(V) \
TORQUE_BUILTIN_LIST_TFC(V)
@@ -514,10 +517,12 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
};
// This class is subclassed by Torque-generated call interface descriptors.
-template <int parameter_count>
+template <int parameter_count, bool has_context_parameter>
class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
public:
- static constexpr int kDescriptorFlags = CallInterfaceDescriptorData::kNoFlags;
+ static constexpr int kDescriptorFlags =
+ has_context_parameter ? CallInterfaceDescriptorData::kNoFlags
+ : CallInterfaceDescriptorData::kNoContext;
static constexpr int kParameterCount = parameter_count;
enum ParameterIndices { kContext = kParameterCount };
template <int i>
@@ -1310,12 +1315,20 @@ class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
static Register MicrotaskQueueRegister();
};
-class WasmMemoryGrowDescriptor final : public CallInterfaceDescriptor {
+class WasmFloat32ToNumberDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kValue)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
+ MachineType::Float32()) // value
+ DECLARE_DESCRIPTOR(WasmFloat32ToNumberDescriptor, CallInterfaceDescriptor)
+};
+
+class WasmFloat64ToNumberDescriptor final : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS_NO_CONTEXT(kNumPages)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // result 1
- MachineType::Int32()) // kNumPages
- DECLARE_DESCRIPTOR(WasmMemoryGrowDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS_NO_CONTEXT(kValue)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
+ MachineType::Float64()) // value
+ DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor, CallInterfaceDescriptor)
};
class WasmTableInitDescriptor final : public CallInterfaceDescriptor {
@@ -1364,32 +1377,6 @@ class WasmTableCopyDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmTableCopyDescriptor, CallInterfaceDescriptor)
};
-class WasmTableGetDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kTableIndex, kEntryIndex)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1
- MachineType::TaggedSigned(), // kTableIndex
- MachineType::Int32()) // kEntryIndex
- DECLARE_DESCRIPTOR(WasmTableGetDescriptor, CallInterfaceDescriptor)
-};
-
-class WasmTableSetDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kTableIndex, kEntryIndex, kValue)
- DEFINE_PARAMETER_TYPES(MachineType::TaggedSigned(), // kTableIndex
- MachineType::Int32(), // kEntryIndex
- MachineType::AnyTagged()) // kValue
- DECLARE_DESCRIPTOR(WasmTableSetDescriptor, CallInterfaceDescriptor)
-};
-
-class WasmThrowDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kException)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result 1
- MachineType::AnyTagged()) // kException
- DECLARE_DESCRIPTOR(WasmThrowDescriptor, CallInterfaceDescriptor)
-};
-
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
: public CallInterfaceDescriptor {
public:
@@ -1505,6 +1492,57 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor, CallInterfaceDescriptor)
};
+class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
+ MachineType::AnyTagged(), // kRight
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
+};
+
+class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
+ MachineType::Int32(), // kActualArgumentsCount
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
+ MachineType::AnyTagged(), // kRight
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
+};
+
+class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ // kSlot is passed in a register, kMaybeFeedbackVector on the stack.
+ DEFINE_JS_PARAMETERS(kSlot, kMaybeFeedbackVector)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor,
+ CallInterfaceDescriptor, 1)
+};
+
+class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kValue, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 8b8bc1b56d..6770ab5cce 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -326,6 +326,30 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 6ae70798c1..48b2acf456 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index f009a3fd0f..2b5f454dd4 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -376,6 +376,8 @@ constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+constexpr DoubleRegister kFPReturnRegister0 = f0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 37a05585c4..751d0f8703 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -84,14 +84,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
-#if defined(_MIPS_ARCH_MIPS64R6)
#if defined(_MIPS_MSA)
supported_ |= 1u << MIPS_SIMD;
#else
if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
#endif
#endif
-#endif
}
void CpuFeatures::PrintTarget() {}
@@ -261,6 +259,9 @@ Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
scratch_register_list_(at.bit()) {
+ if (CpuFeatures::IsSupported(MIPS_SIMD)) {
+ EnableCpuFeature(MIPS_SIMD);
+ }
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_trampoline_pool_end_ = 0;
@@ -1169,7 +1170,7 @@ void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
// MSA instructions
void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
(ws.code() << kWsShift) | (wd.code() << kWdShift);
@@ -1178,7 +1179,7 @@ void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
int32_t imm5, MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(ws.is_valid() && wd.is_valid());
DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
(operation == CEQI) || (operation == CLTI_S) ||
@@ -1192,7 +1193,7 @@ void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
uint32_t m, MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
Instr instr = MSA | operation | df | (m << kWtShift) |
(ws.code() << kWsShift) | (wd.code() << kWdShift);
@@ -1201,7 +1202,7 @@ void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
int32_t imm10, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(wd.is_valid() && is_int10(imm10));
Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
(wd.code() << kWdShift);
@@ -1211,7 +1212,7 @@ void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
template <typename RegType>
void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
RegType t, MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
Instr instr = MSA | operation | df | (t.code() << kWtShift) |
(ws.code() << kWsShift) | (wd.code() << kWdShift);
@@ -1221,7 +1222,7 @@ void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
template <typename DstType, typename SrcType>
void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
uint32_t n, SrcType src, DstType dst) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
Instr instr = MSA | operation | df | (n << kWtShift) |
(src.code() << kWsShift) | (dst.code() << kWdShift) |
@@ -1231,7 +1232,7 @@ void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
MSARegister wt, MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
DCHECK_LT(df, 2);
Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
@@ -1241,7 +1242,7 @@ void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
Instr instr = MSA | operation | (wt.code() << kWtShift) |
(ws.code() << kWsShift) | (wd.code() << kWdShift) |
@@ -1251,7 +1252,7 @@ void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
Register rs, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
(rs.code() << kWsShift) | (wd.code() << kWdShift);
@@ -1260,7 +1261,7 @@ void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(ws.is_valid() && wd.is_valid());
Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
(wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
@@ -1269,7 +1270,7 @@ void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
MSARegister ws, MSARegister wd) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(ws.is_valid() && wd.is_valid());
Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
(ws.code() << kWsShift) | (wd.code() << kWdShift) |
@@ -1279,7 +1280,7 @@ void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
int32_t offset16) {
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD));
+ DCHECK(IsEnabled(MIPS_SIMD));
DCHECK(wt.is_valid() && is_int16(offset16));
BlockTrampolinePoolScope block_trampoline_pool(this);
Instr instr =
@@ -3157,28 +3158,29 @@ MSA_BRANCH_LIST(MSA_BRANCH)
#undef MSA_BRANCH_LIST
#define MSA_LD_ST_LIST(V) \
- V(ld_b, LD_B) \
- V(ld_h, LD_H) \
- V(ld_w, LD_W) \
- V(ld_d, LD_D) \
- V(st_b, ST_B) \
- V(st_h, ST_H) \
- V(st_w, ST_W) \
- V(st_d, ST_D)
-
-#define MSA_LD_ST(name, opcode) \
- void Assembler::name(MSARegister wd, const MemOperand& rs) { \
- MemOperand source = rs; \
- AdjustBaseAndOffset(&source); \
- if (is_int10(source.offset())) { \
- GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
- } else { \
- UseScratchRegisterScope temps(this); \
- Register scratch = temps.Acquire(); \
- DCHECK(rs.rm() != scratch); \
- daddiu(scratch, source.rm(), source.offset()); \
- GenInstrMsaMI10(opcode, 0, scratch, wd); \
- } \
+ V(ld_b, LD_B, 1) \
+ V(ld_h, LD_H, 2) \
+ V(ld_w, LD_W, 4) \
+ V(ld_d, LD_D, 8) \
+ V(st_b, ST_B, 1) \
+ V(st_h, ST_H, 2) \
+ V(st_w, ST_W, 4) \
+ V(st_d, ST_D, 8)
+
+#define MSA_LD_ST(name, opcode, b) \
+ void Assembler::name(MSARegister wd, const MemOperand& rs) { \
+ MemOperand source = rs; \
+ AdjustBaseAndOffset(&source); \
+ if (is_int10(source.offset())) { \
+ DCHECK_EQ(source.offset() % b, 0); \
+ GenInstrMsaMI10(opcode, source.offset() / b, source.rm(), wd); \
+ } else { \
+ UseScratchRegisterScope temps(this); \
+ Register scratch = temps.Acquire(); \
+ DCHECK_NE(rs.rm(), scratch); \
+ daddiu(scratch, source.rm(), source.offset()); \
+ GenInstrMsaMI10(opcode, 0, scratch, wd); \
+ } \
}
MSA_LD_ST_LIST(MSA_LD_ST)
@@ -3291,7 +3293,7 @@ MSA_2R_LIST(MSA_2R)
#define MSA_FILL(format) \
void Assembler::fill_##format(MSARegister wd, Register rs) { \
- DCHECK((kArchVariant == kMips64r6) && IsEnabled(MIPS_SIMD)); \
+ DCHECK(IsEnabled(MIPS_SIMD)); \
DCHECK(rs.is_valid() && wd.is_valid()); \
Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \
(rs.code() << kWsShift) | (wd.code() << kWdShift) | \
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 84910f1ee9..077b49fa99 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -326,6 +326,30 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 78f3228f24..a665b76e80 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
@@ -2729,7 +2729,7 @@ void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
- if (kArchVariant == kMips64r6) {
+ if (IsEnabled(MIPS_SIMD)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
switch (cond) {
@@ -2775,6 +2775,8 @@ void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
UNREACHABLE();
}
}
+ } else {
+ UNREACHABLE();
}
if (bd == PROTECT) {
nop();
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 9915f7d942..d7b45eda38 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -207,6 +207,19 @@ constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
+enum MSARegisterCode {
+#define REGISTER_CODE(R) kMsaCode_##R,
+ SIMD128_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kMsaAfterLast
+};
+
+// MIPS SIMD (MSA) register
+class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr MSARegister(int code) : RegisterBase(code) {}
+};
+
enum DoubleRegisterCode {
#define REGISTER_CODE(R) kDoubleCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -234,24 +247,13 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
return FPURegister::from_code(code() + 1);
}
+ MSARegister toW() const { return MSARegister::from_code(code()); }
+
private:
friend class RegisterBase;
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
-enum MSARegisterCode {
-#define REGISTER_CODE(R) kMsaCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kMsaAfterLast
-};
-
-// MIPS SIMD (MSA) register
-class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
- friend class RegisterBase;
- explicit constexpr MSARegister(int code) : RegisterBase(code) {}
-};
-
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
// f28: 0.0
@@ -294,11 +296,13 @@ constexpr Register cp = s7;
constexpr Register kScratchReg = s3;
constexpr Register kScratchReg2 = s4;
constexpr DoubleRegister kScratchDoubleReg = f30;
+// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0.
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips64r6 for compare operations.
// We use the last non-callee saved odd register for N64 ABI
constexpr DoubleRegister kDoubleCompareReg = f23;
// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+// MSA zero reg is often used to hold 0, but it's not hardwired to 0.
constexpr Simd128Register kSimd128RegZero = w28;
constexpr Simd128Register kSimd128ScratchReg = w30;
@@ -383,6 +387,8 @@ constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+constexpr DoubleRegister kFPReturnRegister0 = f0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 7cf71e8d0f..19f93e674e 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -162,15 +162,13 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
case Code::BUILTIN:
return StackFrame::STUB;
case Code::WASM_FUNCTION:
- return StackFrame::WASM_COMPILED;
+ return StackFrame::WASM;
case Code::WASM_TO_CAPI_FUNCTION:
return StackFrame::WASM_EXIT;
case Code::JS_TO_WASM_FUNCTION:
return StackFrame::JS_TO_WASM;
case Code::WASM_TO_JS_FUNCTION:
return StackFrame::WASM_TO_JS;
- case Code::WASM_INTERPRETER_ENTRY:
- return StackFrame::WASM_INTERPRETER_ENTRY;
case Code::C_WASM_ENTRY:
return StackFrame::C_WASM_ENTRY;
default:
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index af3514b828..d6d4c88c99 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -248,6 +248,12 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return optimization_id_;
}
+ unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
+
+ void set_inlined_bytecode_size(unsigned size) {
+ inlined_bytecode_size_ = size;
+ }
+
struct InlinedFunctionHolder {
Handle<SharedFunctionInfo> shared_info;
Handle<BytecodeArray> bytecode_array; // Explicit to prevent flushing.
@@ -329,6 +335,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
InlinedFunctionList inlined_functions_;
int optimization_id_ = -1;
+ unsigned inlined_bytecode_size_ = 0;
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index c55a5a9c0b..f6a2f7849d 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -120,30 +120,86 @@ Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
-int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+void Assembler::set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode) {
+ Assembler::set_target_address_at(
+ pc, constant_pool, static_cast<Address>(target), icache_flush_mode);
+}
+
+int RelocInfo::target_address_size() {
+ if (IsCodedSpecially()) {
+ return Assembler::kSpecialTargetSize;
+ } else {
+ return kSystemPointerSize;
+ }
+}
+
+Tagged_t Assembler::target_compressed_address_at(Address pc,
+ Address constant_pool) {
+ return static_cast<Tagged_t>(target_address_at(pc, constant_pool));
+}
+
+Handle<Object> Assembler::code_target_object_handle_at(Address pc,
+ Address constant_pool) {
+ int index =
+ static_cast<int>(target_address_at(pc, constant_pool)) & 0xFFFFFFFF;
+ return GetCodeTarget(index);
+}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
- return HeapObject::cast(
- Object(Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ host_.address(),
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
- return target_object();
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ isolate,
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return target_object();
+ }
+}
+
+Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
+ Address pc, Address const_pool) {
+ return GetEmbeddedObject(target_compressed_address_at(pc, const_pool));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCodeTarget(rmode_)) {
+ return Handle<HeapObject>::cast(
+ origin->code_target_object_handle_at(pc_, constant_pool_));
+ } else {
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
+ }
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
- icache_flush_mode);
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(
+ pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
+ } else {
+ DCHECK(IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
@@ -181,13 +237,16 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory<Address>(pc_) = kNullAddress;
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(pc_, constant_pool_,
+ kNullAddress);
} else if (IsInternalReferenceEncoded(rmode_) || IsOffHeapTarget(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index a305a104f0..b9f09e23f2 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -511,7 +511,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundJumpTableEntryOpcode: {
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_start_ + pos),
- kPointerSize / kInstrSize);
+ kSystemPointerSize / kInstrSize);
// Keep internal references relative until EmitRelocations.
patcher.dp(target_pos);
break;
@@ -1757,6 +1757,32 @@ void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
frc.code() * B6 | rc);
}
+// Vector instructions
+void Assembler::mfvsrd(const Register ra, const DoubleRegister rs) {
+ int SX = 1;
+ emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
+}
+
+void Assembler::mfvsrwz(const Register ra, const DoubleRegister rs) {
+ int SX = 1;
+ emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
+}
+
+void Assembler::mtvsrd(const DoubleRegister rt, const Register ra) {
+ int TX = 1;
+ emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
+}
+
+void Assembler::vor(const DoubleRegister rt, const DoubleRegister ra,
+ const DoubleRegister rb) {
+ emit(VOR | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+void Assembler::vsro(const DoubleRegister rt, const DoubleRegister ra,
+ const DoubleRegister rb) {
+ emit(VSRO | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index b27a4fd8fe..778e94c185 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -254,6 +254,18 @@ class Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Tagged_t target_compressed_address_at(Address pc,
+ Address constant_pool);
+ inline static void set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ inline Handle<Object> code_target_object_handle_at(Address pc,
+ Address constant_pool);
+ inline Handle<HeapObject> compressed_embedded_object_handle_at(
+ Address pc, Address constant_pool);
+
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -434,6 +446,20 @@ class Assembler : public AssemblerBase {
PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
#undef DECLARE_PPC_XX3_INSTRUCTIONS
+#define DECLARE_PPC_VX_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
+ inline void name(const DoubleRegister rt, const DoubleRegister rb, \
+ const Operand& imm) { \
+ vx_form(instr_name, rt, rb, imm); \
+ }
+
+ inline void vx_form(Instr instr, DoubleRegister rt, DoubleRegister rb,
+ const Operand& imm) {
+ emit(instr | rt.code() * B21 | imm.immediate() * B16 | rb.code() * B11);
+ }
+
+ PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
+#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
+
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
@@ -920,6 +946,15 @@ class Assembler : public AssemblerBase {
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ // Vector instructions
+ void mfvsrd(const Register ra, const DoubleRegister r);
+ void mfvsrwz(const Register ra, const DoubleRegister r);
+ void mtvsrd(const DoubleRegister rt, const Register ra);
+ void vor(const DoubleRegister rt, const DoubleRegister ra,
+ const DoubleRegister rb);
+ void vsro(const DoubleRegister rt, const DoubleRegister ra,
+ const DoubleRegister rb);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -941,9 +976,9 @@ class Assembler : public AssemblerBase {
void push(Register src) {
#if V8_TARGET_ARCH_PPC64
- stdu(src, MemOperand(sp, -kPointerSize));
+ stdu(src, MemOperand(sp, -kSystemPointerSize));
#else
- stwu(src, MemOperand(sp, -kPointerSize));
+ stwu(src, MemOperand(sp, -kSystemPointerSize));
#endif
}
@@ -953,10 +988,10 @@ class Assembler : public AssemblerBase {
#else
lwz(dst, MemOperand(sp));
#endif
- addi(sp, sp, Operand(kPointerSize));
+ addi(sp, sp, Operand(kSystemPointerSize));
}
- void pop() { addi(sp, sp, Operand(kPointerSize)); }
+ void pop() { addi(sp, sp, Operand(kSystemPointerSize)); }
// Jump unconditionally to given label.
void jmp(Label* L) { b(L); }
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index fe7df45ae5..b75c3e3257 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -1229,7 +1229,11 @@ using Instr = uint32_t;
/* Store Floating-Point Single with Update Indexed */ \
V(stfsux, STFSUX, 0x7C00056E) \
/* Store Floating-Point Single Indexed */ \
- V(stfsx, STFSX, 0x7C00052E)
+ V(stfsx, STFSX, 0x7C00052E) \
+ /* Load Vector Indexed */ \
+ V(lvx, LVX, 0x7C0000CE) \
+ /* Store Vector Indexed */ \
+ V(stvx, STVX, 0x7C0001CE)
#define PPC_X_OPCODE_E_FORM_LIST(V) \
/* Shift Right Algebraic Word Immediate */ \
@@ -1693,8 +1697,6 @@ using Instr = uint32_t;
V(lvsl, LVSL, 0x7C00000C) \
/* Load Vector for Shift Right */ \
V(lvsr, LVSR, 0x7C00004C) \
- /* Load Vector Indexed */ \
- V(lvx, LVX, 0x7C0000CE) \
/* Load Vector Indexed Last */ \
V(lvxl, LVXL, 0x7C0002CE) \
/* Store Vector Element Byte Indexed */ \
@@ -1703,8 +1705,6 @@ using Instr = uint32_t;
V(stvehx, STVEHX, 0x7C00014E) \
/* Store Vector Element Word Indexed */ \
V(stvewx, STVEWX, 0x7C00018E) \
- /* Store Vector Indexed */ \
- V(stvx, STVX, 0x7C0001CE) \
/* Store Vector Indexed Last */ \
V(stvxl, STVXL, 0x7C0003CE) \
/* Vector Minimum Signed Doubleword */ \
@@ -2192,7 +2192,15 @@ using Instr = uint32_t;
/* Rotate Left Word then AND with Mask */ \
V(rlwnm, RLWNMX, 0x5C000000)
-#define PPC_VX_OPCODE_LIST(V) \
+#define PPC_VX_OPCODE_A_FORM_LIST(V) \
+ /* Vector Splat Byte */ \
+ V(vspltb, VSPLTB, 0x1000020C) \
+ /* Vector Splat Word */ \
+ V(vspltw, VSPLTW, 0x1000028C) \
+ /* Vector Splat Halfword */ \
+ V(vsplth, VSPLTH, 0x1000024C)
+
+#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
/* Decimal Subtract Modulo */ \
@@ -2427,18 +2435,12 @@ using Instr = uint32_t;
V(vslo, VSLO, 0x1000040C) \
/* Vector Shift Left Word */ \
V(vslw, VSLW, 0x10000184) \
- /* Vector Splat Byte */ \
- V(vspltb, VSPLTB, 0x1000020C) \
- /* Vector Splat Halfword */ \
- V(vsplth, VSPLTH, 0x1000024C) \
/* Vector Splat Immediate Signed Byte */ \
V(vspltisb, VSPLTISB, 0x1000030C) \
/* Vector Splat Immediate Signed Halfword */ \
V(vspltish, VSPLTISH, 0x1000034C) \
/* Vector Splat Immediate Signed Word */ \
V(vspltisw, VSPLTISW, 0x1000038C) \
- /* Vector Splat Word */ \
- V(vspltw, VSPLTW, 0x1000028C) \
/* Vector Shift Right */ \
V(vsr, VSR, 0x100002C4) \
/* Vector Shift Right Algebraic Byte */ \
@@ -2534,6 +2536,10 @@ using Instr = uint32_t;
/* Vector Merge Odd Word */ \
V(vmrgow, VMRGOW, 0x1000068C)
+#define PPC_VX_OPCODE_LIST(V) \
+ PPC_VX_OPCODE_A_FORM_LIST(V) \
+ PPC_VX_OPCODE_UNUSED_LIST(V)
+
#define PPC_XS_OPCODE_LIST(V) \
/* Shift Right Algebraic Doubleword Immediate */ \
V(sradi, SRADIX, 0x7C000674)
@@ -2587,7 +2593,8 @@ enum Opcode : uint32_t {
opcode_name = opcode_value,
PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
- EXT1 = 0x4C000000, // Extended code set 1
+ EXT0 = 0x10000000, // Extended code set 0
+ EXT1 = 0x4C000000, // Extended code set 1
EXT2 = 0x7C000000, // Extended code set 2
EXT3 = 0xEC000000, // Extended code set 3
EXT4 = 0xFC000000, // Extended code set 4
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index f2264b05fa..cd0ab1a328 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -283,6 +283,30 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index ca6d472c93..3cf819f102 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
@@ -50,7 +50,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
bytes += kNumCallerSavedDoubles * kDoubleSize;
@@ -75,7 +75,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == kSaveFPRegs) {
MultiPushDoubles(kCallerSavedDoubles);
@@ -106,7 +106,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
return bytes;
}
@@ -120,13 +120,13 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
- const uint32_t offset =
- FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
-
- CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- LoadP(destination, MemOperand(destination, offset), r0);
+ LoadTaggedPointerField(
+ destination,
+ FieldMemOperand(destination,
+ FixedArray::OffsetOfElementAt(constant_index)),
+ r0);
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
@@ -202,7 +202,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bind(&skip);
return;
}
- Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
+ int32_t target_index = AddCodeTarget(code);
+ Jump(static_cast<intptr_t>(target_index), rmode, cond, cr);
}
void TurboAssembler::Jump(const ExternalReference& reference) {
@@ -212,7 +213,8 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it.
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(scratch, kPointerSize));
+ LoadP(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(scratch, kSystemPointerSize));
LoadP(scratch, MemOperand(scratch, 0));
}
Jump(scratch);
@@ -291,17 +293,18 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
return;
}
DCHECK(code->IsExecutable());
- Call(code.address(), rmode, cond);
+ int32_t target_index = AddCodeTarget(code);
+ Call(static_cast<Address>(target_index), rmode, cond);
}
void TurboAssembler::Drop(int count) {
if (count > 0) {
- Add(sp, sp, count * kPointerSize, r0);
+ Add(sp, sp, count * kSystemPointerSize, r0);
}
}
void TurboAssembler::Drop(Register count, Register scratch) {
- ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
+ ShiftLeftImm(scratch, count, Operand(kSystemPointerSizeLog2));
add(sp, sp, scratch);
}
@@ -317,15 +320,22 @@ void TurboAssembler::Push(Smi smi) {
push(r0);
}
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
+void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ EmbeddedObjectIndex index = AddEmbeddedObject(value);
+ DCHECK(is_uint32(index));
+ mov(dst, Operand(static_cast<int>(index), rmode));
+ } else {
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
+ mov(dst, Operand(value.address(), rmode));
}
- mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
@@ -354,12 +364,12 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = base::bits::CountPopulation(regs);
- int16_t stack_offset = num_to_push * kPointerSize;
+ int16_t stack_offset = num_to_push * kSystemPointerSize;
subi(location, location, Operand(stack_offset));
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- stack_offset -= kPointerSize;
+ stack_offset -= kSystemPointerSize;
StoreP(ToRegister(i), MemOperand(location, stack_offset));
}
}
@@ -371,7 +381,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
LoadP(ToRegister(i), MemOperand(location, stack_offset));
- stack_offset += kPointerSize;
+ stack_offset += kSystemPointerSize;
}
}
addi(location, location, Operand(stack_offset));
@@ -411,6 +421,111 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
+void TurboAssembler::LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ LoadP(destination, field_operand, scratch);
+ }
+}
+
+void TurboAssembler::LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand);
+ } else {
+ LoadP(destination, field_operand, scratch);
+ }
+}
+
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
+ if (SmiValuesAre31Bits()) {
+ lwz(dst, src);
+ } else {
+ LoadP(dst, src);
+ }
+
+ SmiUntag(dst, rc);
+}
+
+void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src,
+ RCBit rc) {
+ SmiUntag(dst, src, rc);
+}
+
+void TurboAssembler::StoreTaggedFieldX(const Register& value,
+ const MemOperand& dst_field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ RecordComment("[ StoreTagged");
+ stwx(value, dst_field_operand);
+ RecordComment("]");
+ } else {
+ StorePX(value, dst_field_operand);
+ }
+}
+
+void TurboAssembler::StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand,
+ const Register& scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ RecordComment("[ StoreTagged");
+ StoreWord(value, dst_field_operand, scratch);
+ RecordComment("]");
+ } else {
+ StoreP(value, dst_field_operand, scratch);
+ }
+}
+
+void TurboAssembler::DecompressTaggedSigned(Register destination,
+ Register src) {
+ RecordComment("[ DecompressTaggedSigned");
+ ZeroExtWord32(destination, src);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedSigned(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTaggedSigned");
+ LoadWord(destination, field_operand, r0);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(Register destination,
+ Register source) {
+ RecordComment("[ DecompressTaggedPointer");
+ ZeroExtWord32(destination, source);
+ add(destination, destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressTaggedPointer");
+ LoadWord(destination, field_operand, r0);
+ add(destination, destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination,
+ MemOperand field_operand) {
+ RecordComment("[ DecompressAnyTagged");
+ LoadWord(destination, field_operand, r0);
+ add(destination, destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination,
+ Register source) {
+ RecordComment("[ DecompressAnyTagged");
+ ZeroExtWord32(destination, source);
+ add(destination, destination, kRootRegister);
+ RecordComment("]");
+}
+
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@@ -427,13 +542,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so so offset must be a multiple of kSystemPointerSize.
+ DCHECK(IsAligned(offset, kTaggedSize));
Add(dst, object, offset - kHeapObjectTag, r0);
if (emit_debug_code()) {
Label ok;
- andi(r0, dst, Operand(kPointerSize - 1));
+ andi(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, cr0);
stop();
bind(&ok);
@@ -569,7 +684,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
SmiCheck smi_check) {
DCHECK(object != value);
if (emit_debug_code()) {
- LoadP(r0, MemOperand(address));
+ LoadTaggedPointerField(r0, MemOperand(address));
cmp(r0, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -636,7 +751,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
fp_delta = 0;
}
}
- addi(fp, sp, Operand(fp_delta * kPointerSize));
+ addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
}
void TurboAssembler::PushStandardFrame(Register function_reg) {
@@ -659,7 +774,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
fp_delta = 1;
}
}
- addi(fp, sp, Operand(fp_delta * kPointerSize));
+ addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
}
void TurboAssembler::RestoreFrameStateForTailCall() {
@@ -1012,9 +1127,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
- DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
DCHECK_GT(stack_space, 0);
// This is an opportunity to build a frame to wrap
@@ -1052,22 +1167,23 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// since the sp slot and code slot were pushed after the fp.
}
- addi(sp, sp, Operand(-stack_space * kPointerSize));
+ addi(sp, sp, Operand(-stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
const int frame_alignment = ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp,
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
}
li(r0, Operand::Zero());
- StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+ StorePU(r0,
+ MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
// Set the exit frame sp value to point just before the return address
// location.
- addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1123,7 +1239,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count.is_valid()) {
if (!argument_count_is_length) {
- ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+ ShiftLeftImm(argument_count, argument_count,
+ Operand(kSystemPointerSizeLog2));
}
add(sp, sp, argument_count);
}
@@ -1143,19 +1260,19 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
+ // after we drop current frame. We add kSystemPointerSize to count the
+ // receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- ShiftLeftImm(dst_reg, caller_args_count, Operand(kPointerSizeLog2));
+ ShiftLeftImm(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
add(dst_reg, fp, dst_reg);
addi(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count;
- // Calculate the end of source area. +kPointerSize is for the receiver.
- ShiftLeftImm(src_reg, callee_args_count, Operand(kPointerSizeLog2));
+ // Calculate the end of source area. +kSystemPointerSize is for the receiver.
+ ShiftLeftImm(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
add(src_reg, sp, src_reg);
- addi(src_reg, src_reg, Operand(kPointerSize));
+ addi(src_reg, src_reg, Operand(kSystemPointerSize));
if (FLAG_debug_code) {
cmpl(src_reg, dst_reg);
@@ -1176,8 +1293,8 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
addi(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
mtctr(tmp_reg);
bind(&loop);
- LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
- StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+ LoadPU(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
+ StorePU(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
bdnz(&loop);
// Leave current frame.
@@ -1233,7 +1350,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
- ShiftLeftImm(r7, actual_parameter_count, Operand(kPointerSizeLog2));
+ ShiftLeftImm(r7, actual_parameter_count, Operand(kSystemPointerSizeLog2));
LoadPX(r7, MemOperand(sp, r7));
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1287,7 +1404,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedPointerField(code,
+ FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
CallCodeObject(code);
} else {
@@ -1312,8 +1430,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_reg = r5;
Register temp_reg = r7;
- LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ LoadTaggedPointerField(
+ temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadHalfWord(expected_reg,
FieldMemOperand(
temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1333,7 +1452,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r4);
// Get the function and setup the context.
- LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
actual_parameter_count, flag);
@@ -1352,8 +1471,8 @@ void MacroAssembler::MaybeDropFrames() {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
Push(Smi::zero()); // Padding.
@@ -1369,7 +1488,7 @@ void MacroAssembler::PushStackHandler() {
}
void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
@@ -1604,7 +1723,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- cmpi(in, Operand(kClearedWeakHeapObjectLower32));
+ cmpwi(in, Operand(kClearedWeakHeapObjectLower32));
beq(target_if_cleared);
mov(r0, Operand(~kWeakHeapObjectMask));
@@ -1690,14 +1809,16 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void MacroAssembler::LoadMap(Register destination, Register object) {
- LoadP(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(destination,
+ FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadMap(dst, cp);
- LoadP(dst, FieldMemOperand(
- dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadP(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedPointerField(
+ dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
void MacroAssembler::AssertNotSmi(Register object) {
@@ -1821,15 +1942,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
mr(scratch, sp);
- addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
+ addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp,
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
- StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ StoreP(scratch,
+ MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
// Make room for stack arguments
stack_space += stack_passed_arguments;
@@ -1837,7 +1959,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// Allocate frame with required slots to make ABI work.
li(r0, Operand::Zero());
- StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
+ StorePU(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
@@ -1931,7 +2053,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
// AIX/PPC64BE Linux uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
+ LoadP(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(function, kSystemPointerSize));
LoadP(ip, MemOperand(function, 0));
dest = ip;
} else if (ABI_CALL_VIA_IP) {
@@ -1963,10 +2086,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
- if (ActivationFrameAlignment() > kPointerSize) {
- LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ if (ActivationFrameAlignment() > kSystemPointerSize) {
+ LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
- addi(sp, sp, Operand(stack_space * kPointerSize));
+ addi(sp, sp, Operand(stack_space * kSystemPointerSize));
}
}
@@ -2368,7 +2491,7 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- Cmpi(src1, Operand(smi), scratch, cr);
+ Cmpwi(src1, Operand(smi), scratch, cr);
#else
LoadSmiLiteral(scratch, smi);
cmp(src1, scratch, cr);
@@ -2538,7 +2661,7 @@ void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
-void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadWord(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -2553,7 +2676,7 @@ void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
+void TurboAssembler::StoreWord(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -2903,14 +3026,14 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
- // Untagging is folded into the indexing operand below.
-#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- ShiftLeftImm(builtin_index, builtin_index,
- Operand(kSystemPointerSizeLog2 - kSmiShift));
-#else
- ShiftRightArithImm(builtin_index, builtin_index,
- kSmiShift - kSystemPointerSizeLog2);
-#endif
+ if (SmiValuesAre32Bits()) {
+ ShiftRightArithImm(builtin_index, builtin_index,
+ kSmiShift - kSystemPointerSizeLog2);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ ShiftLeftImm(builtin_index, builtin_index,
+ Operand(kSystemPointerSizeLog2 - kSmiShift));
+ }
addi(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
@@ -2996,7 +3119,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kSystemPointerSize));
LoadP(ip, MemOperand(target, 0));
dest = ip;
} else if (ABI_CALL_VIA_IP && dest != ip) {
@@ -3007,7 +3130,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
LoadPC(r7);
bind(&start_call);
addi(r7, r7, Operand(after_call_offset));
- StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
Call(dest);
DCHECK_EQ(after_call_offset - kInstrSize,
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 2c46124b24..cea89a472c 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -182,6 +182,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7);
void Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
+ void CompareTagged(Register src1, Register src2, CRegister cr = cr7) {
+ if (COMPRESS_POINTERS_BOOL) {
+ cmpw(src1, src2, cr);
+ } else {
+ cmp(src1, src2, cr);
+ }
+ }
+
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
@@ -196,33 +204,33 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- StorePU(src2, MemOperand(sp, -2 * kPointerSize));
- StoreP(src1, MemOperand(sp, kPointerSize));
+ StorePU(src2, MemOperand(sp, -2 * kSystemPointerSize));
+ StoreP(src1, MemOperand(sp, kSystemPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- StorePU(src3, MemOperand(sp, -3 * kPointerSize));
- StoreP(src2, MemOperand(sp, kPointerSize));
- StoreP(src1, MemOperand(sp, 2 * kPointerSize));
+ StorePU(src3, MemOperand(sp, -3 * kSystemPointerSize));
+ StoreP(src2, MemOperand(sp, kSystemPointerSize));
+ StoreP(src1, MemOperand(sp, 2 * kSystemPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- StorePU(src4, MemOperand(sp, -4 * kPointerSize));
- StoreP(src3, MemOperand(sp, kPointerSize));
- StoreP(src2, MemOperand(sp, 2 * kPointerSize));
- StoreP(src1, MemOperand(sp, 3 * kPointerSize));
+ StorePU(src4, MemOperand(sp, -4 * kSystemPointerSize));
+ StoreP(src3, MemOperand(sp, kSystemPointerSize));
+ StoreP(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ StoreP(src1, MemOperand(sp, 3 * kSystemPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- StorePU(src5, MemOperand(sp, -5 * kPointerSize));
- StoreP(src4, MemOperand(sp, kPointerSize));
- StoreP(src3, MemOperand(sp, 2 * kPointerSize));
- StoreP(src2, MemOperand(sp, 3 * kPointerSize));
- StoreP(src1, MemOperand(sp, 4 * kPointerSize));
+ StorePU(src5, MemOperand(sp, -5 * kSystemPointerSize));
+ StoreP(src4, MemOperand(sp, kSystemPointerSize));
+ StoreP(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ StoreP(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ StoreP(src1, MemOperand(sp, 4 * kSystemPointerSize));
}
void Pop(Register dst) { pop(dst); }
@@ -230,36 +238,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
- LoadP(src1, MemOperand(sp, kPointerSize));
- addi(sp, sp, Operand(2 * kPointerSize));
+ LoadP(src1, MemOperand(sp, kSystemPointerSize));
+ addi(sp, sp, Operand(2 * kSystemPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
- LoadP(src2, MemOperand(sp, kPointerSize));
- LoadP(src1, MemOperand(sp, 2 * kPointerSize));
- addi(sp, sp, Operand(3 * kPointerSize));
+ LoadP(src2, MemOperand(sp, kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ addi(sp, sp, Operand(3 * kSystemPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
- LoadP(src3, MemOperand(sp, kPointerSize));
- LoadP(src2, MemOperand(sp, 2 * kPointerSize));
- LoadP(src1, MemOperand(sp, 3 * kPointerSize));
- addi(sp, sp, Operand(4 * kPointerSize));
+ LoadP(src3, MemOperand(sp, kSystemPointerSize));
+ LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ addi(sp, sp, Operand(4 * kSystemPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
- LoadP(src4, MemOperand(sp, kPointerSize));
- LoadP(src3, MemOperand(sp, 2 * kPointerSize));
- LoadP(src2, MemOperand(sp, 3 * kPointerSize));
- LoadP(src1, MemOperand(sp, 4 * kPointerSize));
- addi(sp, sp, Operand(5 * kPointerSize));
+ LoadP(src4, MemOperand(sp, kSystemPointerSize));
+ LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ addi(sp, sp, Operand(5 * kSystemPointerSize));
}
void SaveRegisters(RegList registers);
@@ -469,22 +477,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
- void Move(Register dst, Handle<HeapObject> value);
+ void Move(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
- SmiUntag(reg, reg, rc, scale);
- }
+ void SmiUntag(Register dst, const MemOperand& src, RCBit rc);
+ void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
- void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
- if (scale > kSmiShift) {
- ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
- } else if (scale < kSmiShift) {
- ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
+ void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
+ if (COMPRESS_POINTERS_BOOL) {
+ srawi(dst, src, kSmiShift, rc);
} else {
- // do nothing
+ ShiftRightArithImm(dst, src, kSmiShift, rc);
}
}
@@ -650,6 +656,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
+ // ---------------------------------------------------------------------------
+ // Pointer compression Support
+
+ // Loads a field containing a HeapObject and decompresses it if pointer
+ // compression is enabled.
+ void LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch = no_reg);
+
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand,
+ const Register& scratch = no_reg);
+
+ // Loads a field containing smi value and untags it.
+ void SmiUntagField(Register dst, const MemOperand& src, RCBit rc = LeaveRC);
+
+ // Compresses and stores tagged value to given on-heap location.
+ void StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand,
+ const Register& scratch = no_reg);
+ void StoreTaggedFieldX(const Register& value,
+ const MemOperand& dst_field_operand,
+ const Register& scratch = no_reg);
+
+ void DecompressTaggedSigned(Register destination, MemOperand field_operand);
+ void DecompressTaggedSigned(Register destination, Register src);
+ void DecompressTaggedPointer(Register destination, MemOperand field_operand);
+ void DecompressTaggedPointer(Register destination, Register source);
+ void DecompressAnyTagged(Register destination, MemOperand field_operand);
+ void DecompressAnyTagged(Register destination, Register source);
+
+ void LoadWord(Register dst, const MemOperand& mem, Register scratch);
+ void StoreWord(Register src, const MemOperand& mem, Register scratch);
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -718,8 +759,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// than assembler-ppc and may generate variable length sequences
// load a literal double value <value> to FPR <result>
- void LoadWord(Register dst, const MemOperand& mem, Register scratch);
- void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem,
Register scratch = no_reg);
@@ -893,11 +932,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void SmiToPtrArrayOffset(Register dst, Register src) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
- ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2);
+ ShiftLeftImm(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
- STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
- ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2);
+ ShiftRightArithImm(dst, src, kSmiShift - kSystemPointerSizeLog2);
#endif
}
@@ -918,7 +957,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
-#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index 8c89aecec7..eded9622c4 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -306,6 +306,8 @@ constexpr Register kRuntimeCallArgvRegister = r5;
constexpr Register kWasmInstanceRegister = r10;
constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
+constexpr DoubleRegister kFPReturnRegister0 = d1;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 9f07978932..2e62c6f1f1 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -6,11 +6,12 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-reference.h"
+#include "src/codegen/external-reference-encoder.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/code-inl.h"
-#include "src/snapshot/snapshot.h"
+#include "src/snapshot/embedded/embedded-data.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
index 1f65065fb7..8e0e9a4cf5 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -283,6 +283,30 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 77bf0ee916..7e7d1434c4 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -16,7 +16,7 @@
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/smi.h"
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 21094ef3bc..009248a65c 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -263,6 +263,8 @@ constexpr Register kRuntimeCallArgvRegister = r4;
constexpr Register kWasmInstanceRegister = r6;
constexpr Register kWasmCompileLazyFuncIndexRegister = r7;
+constexpr DoubleRegister kFPReturnRegister0 = d0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index ecf9afab48..83a05d0d44 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -79,6 +79,12 @@ struct UintPtrT : WordT {
static constexpr MachineType kMachineType = MachineType::UintPtr();
};
+struct ExternalPointerT : UntaggedT {
+ static const MachineRepresentation kMachineRepresentation =
+ MachineType::PointerRepresentation();
+ static constexpr MachineType kMachineType = MachineType::Pointer();
+};
+
struct Float32T : UntaggedT {
static const MachineRepresentation kMachineRepresentation =
MachineRepresentation::kFloat32;
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index f46ab0ade5..575529e399 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -6,9 +6,9 @@
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
+#include "src/codegen/external-reference-encoder.h"
#include "src/execution/isolate-data.h"
#include "src/execution/isolate-inl.h"
-#include "src/snapshot/serializer-common.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.cc b/deps/v8/src/codegen/unoptimized-compilation-info.cc
index f46e9cda21..08cd818188 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.cc
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.cc
@@ -18,7 +18,7 @@ namespace internal {
UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
ParseInfo* parse_info,
FunctionLiteral* literal)
- : flags_(0), zone_(zone), feedback_vector_spec_(zone) {
+ : flags_(parse_info->flags()), feedback_vector_spec_(zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this OptimizedCompilationInfo. As such,
@@ -28,13 +28,6 @@ UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
DCHECK_NOT_NULL(literal);
literal_ = literal;
source_range_map_ = parse_info->source_range_map();
-
- if (parse_info->is_eval()) MarkAsEval();
- if (parse_info->collect_type_profile()) MarkAsCollectTypeProfile();
- if (parse_info->might_always_opt()) MarkAsMightAlwaysOpt();
- if (parse_info->collect_source_positions()) {
- MarkAsForceCollectSourcePositions();
- }
}
DeclarationScope* UnoptimizedCompilationInfo::scope() const {
@@ -52,7 +45,7 @@ int UnoptimizedCompilationInfo::num_parameters_including_this() const {
SourcePositionTableBuilder::RecordingMode
UnoptimizedCompilationInfo::SourcePositionRecordingMode() const {
- if (collect_source_positions()) {
+ if (flags().collect_source_positions()) {
return SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
}
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.h b/deps/v8/src/codegen/unoptimized-compilation-info.h
index fe8dbe66c6..bb431dc98d 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.h
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.h
@@ -12,6 +12,7 @@
#include "src/handles/handles.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/objects.h"
+#include "src/parsing/parse-info.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -33,23 +34,7 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
UnoptimizedCompilationInfo(Zone* zone, ParseInfo* parse_info,
FunctionLiteral* literal);
- Zone* zone() { return zone_; }
-
- // Compilation flag accessors.
-
- void MarkAsEval() { SetFlag(kIsEval); }
- bool is_eval() const { return GetFlag(kIsEval); }
-
- void MarkAsCollectTypeProfile() { SetFlag(kCollectTypeProfile); }
- bool collect_type_profile() const { return GetFlag(kCollectTypeProfile); }
-
- void MarkAsForceCollectSourcePositions() { SetFlag(kCollectSourcePositions); }
- bool collect_source_positions() const {
- return GetFlag(kCollectSourcePositions);
- }
-
- void MarkAsMightAlwaysOpt() { SetFlag(kMightAlwaysOpt); }
- bool might_always_opt() const { return GetFlag(kMightAlwaysOpt); }
+ const UnoptimizedCompileFlags& flags() const { return flags_; }
// Accessors for the input data of the function being compiled.
@@ -97,24 +82,8 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
FeedbackVectorSpec* feedback_vector_spec() { return &feedback_vector_spec_; }
private:
- // Various configuration flags for a compilation, as well as some properties
- // of the compiled code produced by a compilation.
- enum Flag {
- kIsEval = 1 << 0,
- kCollectTypeProfile = 1 << 1,
- kMightAlwaysOpt = 1 << 2,
- kCollectSourcePositions = 1 << 3,
- };
-
- void SetFlag(Flag flag) { flags_ |= flag; }
- bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
-
// Compilation flags.
- unsigned flags_;
-
- // The zone from which the compilation pipeline working on this
- // OptimizedCompilationInfo allocates.
- Zone* zone_;
+ const UnoptimizedCompileFlags flags_;
// The root AST node of the function literal being compiled.
FunctionLiteral* literal_;
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1106626a13..287de802be 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3441,6 +3441,15 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::pmovmskb(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x66);
+ emit(0x0F);
+ emit(0xD7);
+ emit_sse_operand(dst, src);
+}
+
// AVX instructions
void Assembler::vmovddup(XMMRegister dst, XMMRegister src) {
@@ -3634,6 +3643,15 @@ void Assembler::vucomiss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::vpmovmskb(Register dst, XMMRegister src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(idst, xmm0, src, kL128, k66, k0F, kWIG);
+ emit(0xD7);
+ emit_sse_operand(idst, src);
+}
+
void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 3f58e3b428..24eb976578 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -1124,6 +1124,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
+ void pmovmskb(Register dst, XMMRegister src);
+
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void insertps(XMMRegister dst, Operand src, byte imm8);
@@ -1393,6 +1395,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vpd(0x50, idst, xmm0, src);
}
+ void vpmovmskb(Register dst, XMMRegister src);
void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
index 0fd62d46a4..6b9754efca 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -285,6 +285,41 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, // kLeft
+ rax, // kRight
+ rdi, // Slot
+ rbx}; // kMaybeFeedbackVector
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi, // kFunction
+ rax, // kActualArgumentsCount
+ rcx, // kSlot
+ rbx}; // kMaybeFeedbackVector
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, // kLeft
+ rax, // kRight
+ rdi, // Slot
+ rbx}; // kMaybeFeedbackVector
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdx, // kValue
+ rax, // kSlot
+ rdi}; // kMaybeFeedbackVector
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index a3389f1bb0..7d6fdc5eb3 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -14,10 +14,11 @@
#include "src/codegen/register-configuration.h"
#include "src/codegen/string-constants.h"
#include "src/codegen/x64/assembler-x64.h"
+#include "src/common/external-pointer.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryChunk.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
@@ -341,6 +342,14 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
}
+void TurboAssembler::LoadExternalPointerField(Register destination,
+ Operand field_operand) {
+ movq(destination, field_operand);
+ if (V8_HEAP_SANDBOX_BOOL) {
+ xorq(destination, Immediate(kExternalPointerSalt));
+ }
+}
+
void TurboAssembler::RestoreRegisters(RegList registers) {
DCHECK_GT(NumRegs(registers), 0);
for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index da9ab5da32..8382bf5a28 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -141,6 +141,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Movups, movups)
AVX_OP(Movmskps, movmskps)
AVX_OP(Movmskpd, movmskpd)
+ AVX_OP(Pmovmskb, pmovmskb)
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
AVX_OP(Movdqu, movdqu)
@@ -227,7 +228,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
AVX_OP(Addps, addps)
- AVX_OP(Haddps, haddps)
AVX_OP(Subps, subps)
AVX_OP(Mulps, mulps)
AVX_OP(Divps, divps)
@@ -248,6 +248,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Cmpps, cmpps)
AVX_OP(Cmppd, cmppd)
AVX_OP(Movlhps, movlhps)
+ AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSE3(Movddup, movddup)
AVX_OP_SSSE3(Phaddd, phaddd)
AVX_OP_SSSE3(Phaddw, phaddw)
@@ -676,6 +677,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedPointer(Register destination, Register source);
void DecompressAnyTagged(Register destination, Operand field_operand);
+ // ---------------------------------------------------------------------------
+ // V8 Heap sandbox support
+
+ // Loads a field containing off-heap pointer and does necessary decoding
+ // if V8 heap sandbox is enabled.
+ void LoadExternalPointerField(Register destination, Operand field_operand);
+
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 39e8c02b3f..7d5aaab682 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -217,6 +217,8 @@ constexpr Register kRootRegister = r13; // callee save
constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
+constexpr DoubleRegister kFPReturnRegister0 = xmm0;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/common/external-pointer-inl.h b/deps/v8/src/common/external-pointer-inl.h
new file mode 100644
index 0000000000..32a78002e1
--- /dev/null
+++ b/deps/v8/src/common/external-pointer-inl.h
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMMON_EXTERNAL_POINTER_INL_H_
+#define V8_COMMON_EXTERNAL_POINTER_INL_H_
+
+#include "include/v8-internal.h"
+#include "src/common/external-pointer.h"
+#include "src/execution/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+V8_INLINE ExternalPointer_t EncodeExternalPointer(Isolate* isolate,
+ Address external_pointer) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ if (!V8_HEAP_SANDBOX_BOOL) return external_pointer;
+ return external_pointer ^ kExternalPointerSalt;
+}
+
+V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
+ ExternalPointer_t encoded_pointer) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ if (!V8_HEAP_SANDBOX_BOOL) return encoded_pointer;
+ return encoded_pointer ^ kExternalPointerSalt;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMMON_EXTERNAL_POINTER_INL_H_
diff --git a/deps/v8/src/common/external-pointer.h b/deps/v8/src/common/external-pointer.h
new file mode 100644
index 0000000000..9b5b061997
--- /dev/null
+++ b/deps/v8/src/common/external-pointer.h
@@ -0,0 +1,33 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMMON_EXTERNAL_POINTER_H_
+#define V8_COMMON_EXTERNAL_POINTER_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// See v8:10391 for details about V8 heap sandbox.
+constexpr uint32_t kExternalPointerSalt =
+ 0x7fffffff & ~static_cast<uint32_t>(kHeapObjectTagMask);
+
+static_assert(static_cast<int32_t>(kExternalPointerSalt) >= 0,
+ "Salt value must be positive for better assembly code");
+
+// Convert external pointer value into encoded form suitable for being stored
+// on V8 heap.
+V8_INLINE ExternalPointer_t EncodeExternalPointer(Isolate* isolate,
+ Address external_pointer);
+
+// Convert external pointer from on-V8-heap representation to an actual external
+// pointer value.
+V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
+ ExternalPointer_t encoded_pointer);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMMON_EXTERNAL_POINTER_H_
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index c79b3b633c..4309b70234 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -279,6 +279,11 @@ constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
#endif
+// This type defines raw storage type for external (or off-V8 heap) pointers
+// stored on V8 heap.
+using ExternalPointer_t = Address;
+constexpr int kExternalPointerSize = sizeof(ExternalPointer_t);
+
constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
constexpr int kEmbedderDataSlotSizeInTaggedSlots =
@@ -646,6 +651,7 @@ class NewSpace;
class NewLargeObjectSpace;
class NumberDictionary;
class Object;
+class OffThreadIsolate;
class OldLargeObjectSpace;
template <HeapObjectReferenceType kRefType, typename StorageType>
class TaggedImpl;
@@ -805,17 +811,6 @@ enum class LocalSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum VisitMode {
- VISIT_ALL,
- VISIT_ALL_IN_MINOR_MC_MARK,
- VISIT_ALL_IN_MINOR_MC_UPDATE,
- VISIT_ALL_IN_SCAVENGE,
- VISIT_ALL_IN_SWEEP_NEWSPACE,
- VISIT_ONLY_STRONG,
- VISIT_ONLY_STRONG_IGNORE_STACK,
- VISIT_FOR_SERIALIZATION,
-};
-
enum class BytecodeFlushMode {
kDoNotFlushBytecode,
kFlushBytecode,
@@ -837,7 +832,7 @@ enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, INSPECTOR_CODE };
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
-enum ParseRestriction {
+enum ParseRestriction : bool {
NO_PARSE_RESTRICTION, // All expressions are allowed.
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
@@ -1598,7 +1593,10 @@ enum class LoadSensitivity {
V(TrapElemSegmentDropped) \
V(TrapTableOutOfBounds) \
V(TrapBrOnExnNullRef) \
- V(TrapRethrowNullRef)
+ V(TrapRethrowNullRef) \
+ V(TrapNullDereference) \
+ V(TrapIllegalCast) \
+ V(TrapArrayOutOfBounds)
enum KeyedAccessLoadMode {
STANDARD_LOAD,
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 13d5310d92..e6a25de266 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -223,8 +223,6 @@ namespace internal {
T(ProxyGetPrototypeOfNonExtensible, \
"'getPrototypeOf' on proxy: proxy target is non-extensible but the " \
"trap did not return its actual prototype") \
- T(ProxyHandlerOrTargetRevoked, \
- "Cannot create proxy with a revoked proxy as target or handler") \
T(ProxyHasNonConfigurable, \
"'has' on proxy: trap returned falsish for property '%' which exists in " \
"the proxy target as non-configurable") \
@@ -500,7 +498,8 @@ namespace internal {
T(TooManySpreads, \
"Literal containing too many nested spreads (up to 65534 allowed)") \
T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
- T(TooManyElementsInPromiseAll, "Too many elements passed to Promise.all") \
+ T(TooManyElementsInPromiseCombinator, \
+ "Too many elements passed to Promise.%") \
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
@@ -554,6 +553,9 @@ namespace internal {
T(WasmTrapTableOutOfBounds, "table access out of bounds") \
T(WasmTrapBrOnExnNullRef, "br_on_exn on nullref value") \
T(WasmTrapRethrowNullRef, "rethrowing nullref value") \
+ T(WasmTrapNullDereference, "dereferencing a null pointer") \
+ T(WasmTrapIllegalCast, "illegal cast") \
+ T(WasmTrapArrayOutOfBounds, "array element access out of bounds") \
T(WasmExceptionError, "wasm exception") \
/* Asm.js validation related */ \
T(AsmJsInvalid, "Invalid asm.js: %") \
@@ -593,7 +595,9 @@ namespace internal {
"WeakRef: target must be an object") \
T(OptionalChainingNoNew, "Invalid optional chain from new expression") \
T(OptionalChainingNoSuper, "Invalid optional chain from super property") \
- T(OptionalChainingNoTemplate, "Invalid tagged template on optional chain")
+ T(OptionalChainingNoTemplate, "Invalid tagged template on optional chain") \
+ /* AggregateError */ \
+ T(AllPromisesRejected, "All promises were rejected")
enum class MessageTemplate {
#define TEMPLATE(NAME, STRING) k##NAME,
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index 75ea2e069f..1898930319 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -8,6 +8,7 @@
#include "include/v8-internal.h"
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate-inl.h"
namespace v8 {
namespace internal {
@@ -36,6 +37,15 @@ V8_INLINE Address GetIsolateRoot(const Isolate* isolate) {
return isolate_root;
}
+V8_INLINE Address GetIsolateRoot(const OffThreadIsolate* isolate) {
+ Address isolate_root = isolate->isolate_root();
+#ifdef V8_COMPRESS_POINTERS
+ isolate_root = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(isolate_root), kPtrComprIsolateRootAlignment));
+#endif
+ return isolate_root;
+}
+
// Decompresses smi value.
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
// For runtime code the upper 32-bits of the Smi value do not matter.
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 42d64b6614..c20a38a7e4 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -27,7 +27,6 @@ CompilerDispatcher::Job::~Job() = default;
CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
size_t max_stack_size)
: isolate_(isolate),
- allocator_(isolate->allocator()),
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
background_compile_timer_(
@@ -66,7 +65,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
if (!IsEnabled()) return base::nullopt;
std::unique_ptr<Job> job = std::make_unique<Job>(new BackgroundCompileTask(
- allocator_, outer_parse_info, function_name, function_literal,
+ outer_parse_info, function_name, function_literal,
worker_thread_runtime_call_stats_, background_compile_timer_,
static_cast<int>(max_stack_size_)));
JobMap::const_iterator it = InsertJob(std::move(job));
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index 544e9c8ba7..108e3cca49 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -151,7 +151,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
JobMap::const_iterator RemoveJob(JobMap::const_iterator job);
Isolate* isolate_;
- AccountingAllocator* allocator_;
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
TimedHistogram* background_compile_timer_;
std::shared_ptr<v8::TaskRunner> taskrunner_;
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 8eac049504..809f36741f 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,6 +1,4 @@
bmeurer@chromium.org
-jarin@chromium.org
-titzer@chromium.org
danno@chromium.org
sigurds@chromium.org
tebbi@chromium.org
@@ -8,6 +6,7 @@ neis@chromium.org
mvstanton@chromium.org
mslekova@chromium.org
jgruber@chromium.org
+nicohartmann@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index f820b3db4c..e19067f3c1 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -420,7 +420,8 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
JSTypedArray::kExternalPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- Type::ExternalPointer(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
LoadSensitivity::kCritical};
@@ -429,9 +430,13 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
// static
FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
- FieldAccess access = {kTaggedBase, JSDataView::kDataPointerOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::Pointer(),
+ FieldAccess access = {kTaggedBase,
+ JSDataView::kDataPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
+ MachineType::Pointer(),
kNoWriteBarrier};
return access;
}
@@ -690,7 +695,8 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
ExternalString::kResourceDataOffset,
Handle<Name>(),
MaybeHandle<Map>(),
- Type::ExternalPointer(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
return access;
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index dcdd1de831..9a2a56cd8b 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -390,6 +390,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
+ } else if (FLAG_turboprop && !map->is_prototype_map()) {
+ // The constness feedback is too unstable for the aggresive compilation
+ // of turboprop.
+ constness = PropertyConstness::kMutable;
} else {
map_ref.SerializeOwnDescriptor(descriptor);
constness = dependencies()->DependOnFieldConstness(map_ref, descriptor);
@@ -583,11 +587,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
}
Handle<JSObject> map_prototype(JSObject::cast(map->prototype()), isolate());
- if (map_prototype->map().is_deprecated()) {
- // Try to migrate the prototype object so we don't embed the deprecated
- // map into the optimized code.
- JSObject::TryMigrateInstance(isolate(), map_prototype);
- }
+ CHECK(!map_prototype->map().is_deprecated());
map = handle(map_prototype->map(), isolate());
holder = map_prototype;
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 5b0c74799b..d453cf0188 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/backend/code-generator.h"
-
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/numbers/double.h"
#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-code-manager.h"
@@ -3283,13 +3282,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmWord32AtomicPairStore: {
Label store;
- __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ Register base = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value_low = i.InputRegister(2);
+ Register value_high = i.InputRegister(3);
+ Register actual_addr = i.TempRegister(0);
+ // The {ldrexd} instruction needs two temp registers. We do not need the
+ // result of {ldrexd}, but {strexd} likely fails without the {ldrexd}.
+ Register tmp1 = i.TempRegister(1);
+ Register tmp2 = i.TempRegister(2);
+ // Reuse one of the temp registers for the result of {strexd}.
+ Register store_result = tmp1;
+ __ add(actual_addr, base, offset);
__ dmb(ISH);
__ bind(&store);
- __ ldrexd(i.TempRegister(1), i.TempRegister(2), i.TempRegister(0));
- __ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
- i.TempRegister(0));
- __ teq(i.TempRegister(1), Operand(0));
+ // Add this {ldrexd} instruction here so that {strexd} below can succeed.
+ // We don't need the result of {ldrexd} itself.
+ __ ldrexd(tmp1, tmp2, actual_addr);
+ __ strexd(store_result, value_low, value_high, actual_addr);
+ __ cmp(store_result, Operand(0));
__ b(ne, &store);
__ dmb(ISH);
break;
@@ -3421,7 +3432,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ PrepareCallCFunction(0, 0);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 38a4dd8db4..74658697b5 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -1233,6 +1233,8 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
VisitWord32PairShift(this, kArmAsrPair, node);
}
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, TryMatchROR);
}
@@ -2437,23 +2439,24 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
AddressingModeField::encode(addressing_mode);
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ InstructionOperand temps[4];
+ size_t temp_count = 0;
+ temps[temp_count++] = g.TempRegister();
+ temps[temp_count++] = g.TempRegister();
+ if (projection0) {
+ outputs[output_count++] = g.DefineAsFixed(projection0, r6);
+ } else {
+ temps[temp_count++] = g.TempRegister(r6);
+ }
if (projection1) {
- InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r6),
- g.DefineAsFixed(projection1, r7)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
- } else if (projection0) {
- InstructionOperand outputs[] = {g.DefineAsFixed(projection0, r6)};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister(r7)};
- Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
- arraysize(temps), temps);
+ outputs[output_count++] = g.DefineAsFixed(projection1, r7);
} else {
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
- g.TempRegister(r6), g.TempRegister(r7)};
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ temps[temp_count++] = g.TempRegister(r7);
}
+ Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
+ temps);
}
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index f01181e955..4cf19a5d80 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/backend/code-generator.h"
-
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -1846,6 +1845,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V##FORMAT(), \
i.InputSimd128Register(1).V##FORMAT()); \
break;
+#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
+ case Op: { \
+ VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0).V##FORMAT()); \
+ __ Instr(dst, i.InputSimd128Register(1).V##FORMAT(), \
+ i.InputSimd128Register(2).V##FORMAT()); \
+ break; \
+ }
case kArm64F64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
@@ -1892,18 +1899,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2D());
break;
}
- case kArm64F64x2Qfma: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Fmla(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(2).V2D());
- break;
- }
- case kArm64F64x2Qfms: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Fmls(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(2).V2D());
- break;
- }
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -1954,18 +1951,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V4S());
break;
}
- case kArm64F32x4Qfma: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Fmla(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(2).V4S());
- break;
- }
- case kArm64F32x4Qfms: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Fmls(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(2).V4S());
- break;
- }
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfms, Fmls, 4S);
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -2104,6 +2091,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I32x4AddHoriz, Addp, 4S);
SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mla, Mla, 4S);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mls, Mls, 4S);
SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
@@ -2197,6 +2186,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
@@ -2310,6 +2301,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B);
SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
@@ -2394,13 +2387,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArm64S128Select: {
- VRegister dst = i.OutputSimd128Register().V16B();
- DCHECK_EQ(dst, i.InputSimd128Register(0).V16B());
- __ Bsl(dst, i.InputSimd128Register(1).V16B(),
- i.InputSimd128Register(2).V16B());
- break;
- }
+ SIMD_DESTRUCTIVE_BINOP_CASE(kArm64S128Select, Bsl, 16B);
SIMD_BINOP_CASE(kArm64S128AndNot, Bic, 16B);
case kArm64S32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V4S(),
@@ -2575,6 +2562,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef SIMD_UNOP_CASE
#undef SIMD_WIDENING_UNOP_CASE
#undef SIMD_BINOP_CASE
+#undef SIMD_DESTRUCTIVE_BINOP_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -2684,7 +2672,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// Therefore we emit a call to C here instead of a call to the runtime.
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index e24812f884..a8e2b52c02 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -238,6 +238,8 @@ namespace compiler {
V(Arm64I32x4AddHoriz) \
V(Arm64I32x4Sub) \
V(Arm64I32x4Mul) \
+ V(Arm64I32x4Mla) \
+ V(Arm64I32x4Mls) \
V(Arm64I32x4MinS) \
V(Arm64I32x4MaxS) \
V(Arm64I32x4Eq) \
@@ -270,6 +272,8 @@ namespace compiler {
V(Arm64I16x8Sub) \
V(Arm64I16x8SubSaturateS) \
V(Arm64I16x8Mul) \
+ V(Arm64I16x8Mla) \
+ V(Arm64I16x8Mls) \
V(Arm64I16x8MinS) \
V(Arm64I16x8MaxS) \
V(Arm64I16x8Eq) \
@@ -302,6 +306,8 @@ namespace compiler {
V(Arm64I8x16Sub) \
V(Arm64I8x16SubSaturateS) \
V(Arm64I8x16Mul) \
+ V(Arm64I8x16Mla) \
+ V(Arm64I8x16Mls) \
V(Arm64I8x16MinS) \
V(Arm64I8x16MaxS) \
V(Arm64I8x16Eq) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 72a0b1b012..128ebdac95 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -208,6 +208,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4AddHoriz:
case kArm64I32x4Sub:
case kArm64I32x4Mul:
+ case kArm64I32x4Mla:
+ case kArm64I32x4Mls:
case kArm64I32x4MinS:
case kArm64I32x4MaxS:
case kArm64I32x4Eq:
@@ -240,6 +242,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8Sub:
case kArm64I16x8SubSaturateS:
case kArm64I16x8Mul:
+ case kArm64I16x8Mla:
+ case kArm64I16x8Mls:
case kArm64I16x8MinS:
case kArm64I16x8MaxS:
case kArm64I16x8Eq:
@@ -272,6 +276,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16Sub:
case kArm64I8x16SubSaturateS:
case kArm64I8x16Mul:
+ case kArm64I8x16Mla:
+ case kArm64I8x16Mls:
case kArm64I8x16MinS:
case kArm64I8x16MaxS:
case kArm64I8x16Eq:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index bb204f62a6..06a87a8aab 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -1301,6 +1301,10 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
VisitRRO(this, kArm64Asr, node, kShift64Imm);
}
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kArm64Ror32, node, kShift32Imm);
}
@@ -3233,9 +3237,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2GeS, kArm64I64x2GeS) \
V(I64x2GtU, kArm64I64x2GtU) \
V(I64x2GeU, kArm64I64x2GeU) \
- V(I32x4Add, kArm64I32x4Add) \
V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
- V(I32x4Sub, kArm64I32x4Sub) \
V(I32x4Mul, kArm64I32x4Mul) \
V(I32x4MinS, kArm64I32x4MinS) \
V(I32x4MaxS, kArm64I32x4MaxS) \
@@ -3248,10 +3250,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GtU, kArm64I32x4GtU) \
V(I32x4GeU, kArm64I32x4GeU) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8Add, kArm64I16x8Add) \
V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
- V(I16x8Sub, kArm64I16x8Sub) \
V(I16x8SubSaturateS, kArm64I16x8SubSaturateS) \
V(I16x8Mul, kArm64I16x8Mul) \
V(I16x8MinS, kArm64I16x8MinS) \
@@ -3269,9 +3269,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8GeU, kArm64I16x8GeU) \
V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16Add, kArm64I8x16Add) \
V(I8x16AddSaturateS, kArm64I8x16AddSaturateS) \
- V(I8x16Sub, kArm64I8x16Sub) \
V(I8x16SubSaturateS, kArm64I8x16SubSaturateS) \
V(I8x16Mul, kArm64I8x16Mul) \
V(I8x16MinS, kArm64I8x16MinS) \
@@ -3359,6 +3357,52 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
arraysize(temps), temps);
}
+#define VISIT_SIMD_ADD(Type) \
+ void InstructionSelector::Visit##Type##Add(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Node* left = node->InputAt(0); \
+ Node* right = node->InputAt(1); \
+ /* Select Mla(z, x, y) for Add(Mul(x, y), z). */ \
+ if (left->opcode() == IrOpcode::k##Type##Mul && CanCover(node, left)) { \
+ Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(right), \
+ g.UseRegister(left->InputAt(0)), g.UseRegister(left->InputAt(1))); \
+ return; \
+ } \
+ /* Select Mla(z, x, y) for Add(z, Mul(x, y)). */ \
+ if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
+ Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(left), \
+ g.UseRegister(right->InputAt(0)), \
+ g.UseRegister(right->InputAt(1))); \
+ return; \
+ } \
+ VisitRRR(this, kArm64##Type##Add, node); \
+ }
+
+VISIT_SIMD_ADD(I32x4)
+VISIT_SIMD_ADD(I16x8)
+VISIT_SIMD_ADD(I8x16)
+#undef VISIT_SIMD_ADD
+
+#define VISIT_SIMD_SUB(Type) \
+ void InstructionSelector::Visit##Type##Sub(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Node* left = node->InputAt(0); \
+ Node* right = node->InputAt(1); \
+ /* Select Mls(z, x, y) for Sub(z, Mul(x, y)). */ \
+ if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
+ Emit(kArm64##Type##Mls, g.DefineSameAsFirst(node), g.UseRegister(left), \
+ g.UseRegister(right->InputAt(0)), \
+ g.UseRegister(right->InputAt(1))); \
+ return; \
+ } \
+ VisitRRR(this, kArm64##Type##Sub, node); \
+ }
+
+VISIT_SIMD_SUB(I32x4)
+VISIT_SIMD_SUB(I16x8)
+VISIT_SIMD_SUB(I8x16)
+#undef VISIT_SIMD_SUB
+
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64S128Select, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 9dbd5fac33..72c5750035 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -93,7 +93,6 @@ CodeGenerator::CodeGenerator(
if (code_kind == Code::WASM_FUNCTION ||
code_kind == Code::WASM_TO_CAPI_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION ||
- code_kind == Code::WASM_INTERPRETER_ENTRY ||
code_kind == Code::JS_TO_WASM_FUNCTION) {
tasm_.set_abort_hard(true);
}
@@ -499,6 +498,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
MaybeHandle<Code> maybe_code =
Factory::CodeBuilder(isolate(), desc, info()->code_kind())
.set_builtin_index(info()->builtin_index())
+ .set_inlined_bytecode_size(info()->inlined_bytecode_size())
.set_source_position_table(source_positions)
.set_deoptimization_data(deopt_data)
.set_is_turbofanned()
@@ -996,8 +996,10 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
}
int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
+ literal.Validate();
int result = static_cast<int>(deoptimization_literals_.size());
for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
+ deoptimization_literals_[i].Validate();
if (deoptimization_literals_[i] == literal) return i;
}
deoptimization_literals_.push_back(literal);
@@ -1349,6 +1351,7 @@ OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
OutOfLineCode::~OutOfLineCode() = default;
Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
+ Validate();
switch (kind_) {
case DeoptimizationLiteralKind::kObject: {
return object_;
@@ -1359,6 +1362,9 @@ Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
case DeoptimizationLiteralKind::kString: {
return string_->AllocateStringConstant(isolate);
}
+ case DeoptimizationLiteralKind::kInvalid: {
+ UNREACHABLE();
+ }
}
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 0caefddd97..f9f410dd39 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -51,12 +51,16 @@ class InstructionOperandIterator {
size_t pos_;
};
-enum class DeoptimizationLiteralKind { kObject, kNumber, kString };
+enum class DeoptimizationLiteralKind { kObject, kNumber, kString, kInvalid };
// Either a non-null Handle<Object>, a double or a StringConstantBase.
class DeoptimizationLiteral {
public:
- DeoptimizationLiteral() : object_(), number_(0), string_(nullptr) {}
+ DeoptimizationLiteral()
+ : kind_(DeoptimizationLiteralKind::kInvalid),
+ object_(),
+ number_(0),
+ string_(nullptr) {}
explicit DeoptimizationLiteral(Handle<Object> object)
: kind_(DeoptimizationLiteralKind::kObject), object_(object) {
CHECK(!object_.is_null());
@@ -77,7 +81,14 @@ class DeoptimizationLiteral {
Handle<Object> Reify(Isolate* isolate) const;
- DeoptimizationLiteralKind kind() const { return kind_; }
+ void Validate() const {
+ CHECK_NE(kind_, DeoptimizationLiteralKind::kInvalid);
+ }
+
+ DeoptimizationLiteralKind kind() const {
+ Validate();
+ return kind_;
+ }
private:
DeoptimizationLiteralKind kind_;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index d397ba8241..c673458c75 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/backend/code-generator.h"
-
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
@@ -11,12 +9,13 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/objects/smi.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -493,20 +492,21 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ cmov(zero, dst, tmp); \
} while (false)
-#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
- do { \
- XMMRegister dst = i.OutputSimd128Register(); \
- DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- if (HasImmediateInput(instr, 1)) { \
- __ opcode(dst, dst, static_cast<byte>(i.InputInt##width(1))); \
- } else { \
- XMMRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.InputRegister(1); \
- constexpr int mask = (1 << width) - 1; \
- __ and_(shift, Immediate(mask)); \
- __ Movd(tmp, shift); \
- __ opcode(dst, dst, tmp); \
- } \
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ if (HasImmediateInput(instr, 1)) { \
+ __ opcode(dst, dst, byte{i.InputInt##width(1)}); \
+ } else { \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ Register tmp_shift = i.TempRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ mov(tmp_shift, i.InputRegister(1)); \
+ __ and_(tmp_shift, Immediate(mask)); \
+ __ Movd(tmp, tmp_shift); \
+ __ opcode(dst, dst, tmp); \
+ } \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1234,6 +1234,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
}
break;
+ case kIA32Rol:
+ if (HasImmediateInput(instr, 1)) {
+ __ rol(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ rol_cl(i.OutputOperand());
+ }
+ break;
case kIA32Ror:
if (HasImmediateInput(instr, 1)) {
__ ror(i.OutputOperand(), i.InputInt5(1));
@@ -2013,6 +2020,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kIA32F64x2Pmin: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Minpd(dst, dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kIA32F64x2Pmax: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Maxpd(dst, dst, i.InputSimd128Register(1));
+ break;
+ }
case kIA32I64x2SplatI32Pair: {
XMMRegister dst = i.OutputSimd128Register();
__ Pinsrd(dst, i.InputRegister(0), 0);
@@ -2224,12 +2243,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEF32x4Sqrt: {
- __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ sqrtps(i.OutputSimd128Register(), i.InputOperand(0));
break;
}
case kAVXF32x4Sqrt: {
CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vsqrtps(i.OutputSimd128Register(), i.InputOperand(0));
break;
}
case kIA32F32x4RecipApprox: {
@@ -2317,11 +2336,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAVXF32x4Min: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src0 = i.InputSimd128Register(0);
Operand src1 = i.InputOperand(1);
// See comment above for correction of minps.
__ movups(kScratchDoubleReg, src1);
__ vminps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vminps(dst, dst, src1);
+ __ vminps(dst, src0, src1);
__ vorps(dst, dst, kScratchDoubleReg);
__ vcmpneqps(kScratchDoubleReg, dst, dst);
__ vorps(dst, dst, kScratchDoubleReg);
@@ -2410,6 +2430,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kIA32F32x4Pmin: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Minps(dst, dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kIA32F32x4Pmax: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Maxps(dst, dst, i.InputSimd128Register(1));
+ break;
+ }
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2759,6 +2791,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pabsd(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kIA32I32x4BitMask: {
+ __ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -3093,6 +3129,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pabsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kIA32I16x8BitMask: {
+ Register dst = i.OutputRegister();
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ Packsswb(tmp, i.InputSimd128Register(0));
+ __ Pmovmskb(dst, tmp);
+ __ shr(dst, 8);
+ break;
+ }
case kIA32I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -3155,7 +3199,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, 1)) {
// Perform 16-bit shift, then mask away low bits.
uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, dst, static_cast<byte>(shift));
+ __ Psllw(dst, dst, byte{shift});
uint8_t bmask = static_cast<uint8_t>(0xff << shift);
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
@@ -3164,18 +3208,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufd(tmp_simd, tmp_simd, 0);
__ Pand(dst, tmp_simd);
} else {
- Register shift = i.InputRegister(1);
// Take shift value modulo 8.
- __ and_(shift, 7);
+ __ mov(tmp, i.InputRegister(1));
+ __ and_(tmp, 7);
// Mask off the unwanted bits before word-shifting.
__ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ mov(tmp, shift);
__ add(tmp, Immediate(8));
__ Movd(tmp_simd, tmp);
__ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
__ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
__ Pand(dst, kScratchDoubleReg);
- __ Movd(tmp_simd, shift);
+ // TODO(zhin): sub here to avoid asking for another temporary register,
+ // examine codegen for other i8x16 shifts, they use less instructions.
+ __ sub(tmp, Immediate(8));
+ __ Movd(tmp_simd, tmp);
__ Psllw(dst, dst, tmp_simd);
}
break;
@@ -3454,7 +3500,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, 1)) {
// Perform 16-bit shift, then mask away high bits.
uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, dst, static_cast<byte>(shift));
+ __ Psrlw(dst, dst, byte{shift});
uint8_t bmask = 0xff >> shift;
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
@@ -3546,6 +3592,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pabsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kIA32I8x16BitMask: {
+ __ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Pxor(dst, dst);
@@ -3696,7 +3746,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32S16x8LoadSplat: {
__ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
__ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- static_cast<uint8_t>(0));
+ uint8_t{0});
__ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
break;
}
@@ -4354,7 +4404,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ PrepareCallCFunction(0, esi);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
size_t pop_size =
call_descriptor->StackParameterCount() * kSystemPointerSize;
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index b4c90e2711..d347d67202 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -39,6 +39,7 @@ namespace compiler {
V(IA32ShlPair) \
V(IA32ShrPair) \
V(IA32SarPair) \
+ V(IA32Rol) \
V(IA32Ror) \
V(IA32Lzcnt) \
V(IA32Tzcnt) \
@@ -133,6 +134,8 @@ namespace compiler {
V(IA32F64x2Ne) \
V(IA32F64x2Lt) \
V(IA32F64x2Le) \
+ V(IA32F64x2Pmin) \
+ V(IA32F64x2Pmax) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
V(IA32I64x2Neg) \
@@ -181,6 +184,8 @@ namespace compiler {
V(AVXF32x4Lt) \
V(SSEF32x4Le) \
V(AVXF32x4Le) \
+ V(IA32F32x4Pmin) \
+ V(IA32F32x4Pmax) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
@@ -226,6 +231,7 @@ namespace compiler {
V(SSEI32x4GeU) \
V(AVXI32x4GeU) \
V(IA32I32x4Abs) \
+ V(IA32I32x4BitMask) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneU) \
V(IA32I16x8ExtractLaneS) \
@@ -281,6 +287,7 @@ namespace compiler {
V(AVXI16x8GeU) \
V(IA32I16x8RoundingAverageU) \
V(IA32I16x8Abs) \
+ V(IA32I16x8BitMask) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLaneU) \
V(IA32I8x16ExtractLaneS) \
@@ -330,6 +337,7 @@ namespace compiler {
V(AVXI8x16GeU) \
V(IA32I8x16RoundingAverageU) \
V(IA32I8x16Abs) \
+ V(IA32I8x16BitMask) \
V(IA32S128Zero) \
V(SSES128Not) \
V(AVXS128Not) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 020136403a..52f0b0356f 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -38,6 +38,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32ShlPair:
case kIA32ShrPair:
case kIA32SarPair:
+ case kIA32Rol:
case kIA32Ror:
case kIA32Lzcnt:
case kIA32Tzcnt:
@@ -114,6 +115,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Ne:
case kIA32F64x2Lt:
case kIA32F64x2Le:
+ case kIA32F64x2Pmin:
+ case kIA32F64x2Pmax:
case kIA32I64x2SplatI32Pair:
case kIA32I64x2ReplaceLaneI32Pair:
case kIA32I64x2Neg:
@@ -162,6 +165,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Lt:
case kSSEF32x4Le:
case kAVXF32x4Le:
+ case kIA32F32x4Pmin:
+ case kIA32F32x4Pmax:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
@@ -207,6 +212,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEI32x4GeU:
case kAVXI32x4GeU:
case kIA32I32x4Abs:
+ case kIA32I32x4BitMask:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneU:
case kIA32I16x8ExtractLaneS:
@@ -262,6 +268,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8GeU:
case kIA32I16x8RoundingAverageU:
case kIA32I16x8Abs:
+ case kIA32I16x8BitMask:
case kIA32I8x16Splat:
case kIA32I8x16ExtractLaneU:
case kIA32I8x16ExtractLaneS:
@@ -311,6 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GeU:
case kIA32I8x16RoundingAverageU:
case kIA32I8x16Abs:
+ case kIA32I8x16BitMask:
case kIA32S128Zero:
case kSSES128Not:
case kAVXS128Not:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 36a70c8fa8..c50464f4b8 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -314,7 +314,7 @@ void VisitRROSimdShift(InstructionSelector* selector, Node* node,
} else {
InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
arraysize(temps), temps);
}
@@ -895,6 +895,10 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
VisitWord32PairShift(this, kIA32SarPair, node);
}
+void InstructionSelector::VisitWord32Rol(Node* node) {
+ VisitShift(this, node, kIA32Rol);
+}
+
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
@@ -2109,6 +2113,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High) \
V(I32x4Abs) \
+ V(I32x4BitMask) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I16x8Neg) \
@@ -2116,7 +2121,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8UConvertI8x16High) \
V(I16x8Abs) \
V(I8x16Neg) \
- V(I8x16Abs)
+ V(I8x16Abs) \
+ V(I8x16BitMask)
#define SIMD_UNOP_PREFIX_LIST(V) \
V(F32x4Abs) \
@@ -2439,6 +2445,13 @@ void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
VisitPack(this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
}
+void InstructionSelector::VisitI16x8BitMask(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kIA32I16x8BitMask, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
}
@@ -2797,12 +2810,40 @@ void InstructionSelector::VisitS8x16Swizzle(Node* node) {
arraysize(temps), temps);
}
+namespace {
+void VisitPminOrPmax(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ // Due to the way minps/minpd work, we want the dst to be same as the second
+ // input: b = pmin(a, b) directly maps to minps b a.
+ IA32OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)));
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitPminOrPmax(this, node, kIA32F32x4Pmin);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitPminOrPmax(this, node, kIA32F32x4Pmax);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitPminOrPmax(this, node, kIA32F64x2Pmin);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitPminOrPmax(this, node, kIA32F64x2Pmax);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz;
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord32Rol;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt;
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 7d72dbbf2d..c2022b574e 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -12,6 +12,7 @@
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
@@ -1416,6 +1417,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
return MarkAsWord32(node), VisitWord32Sar(node);
+ case IrOpcode::kWord32Rol:
+ return MarkAsWord32(node), VisitWord32Rol(node);
case IrOpcode::kWord32Ror:
return MarkAsWord32(node), VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
@@ -1446,6 +1449,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
return MarkAsWord64(node), VisitWord64Sar(node);
+ case IrOpcode::kWord64Rol:
+ return MarkAsWord64(node), VisitWord64Rol(node);
case IrOpcode::kWord64Ror:
return MarkAsWord64(node), VisitWord64Ror(node);
case IrOpcode::kWord64Clz:
@@ -1879,6 +1884,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Qfma(node);
case IrOpcode::kF64x2Qfms:
return MarkAsSimd128(node), VisitF64x2Qfms(node);
+ case IrOpcode::kF64x2Pmin:
+ return MarkAsSimd128(node), VisitF64x2Pmin(node);
+ case IrOpcode::kF64x2Pmax:
+ return MarkAsSimd128(node), VisitF64x2Pmax(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1925,6 +1934,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Qfma(node);
case IrOpcode::kF32x4Qfms:
return MarkAsSimd128(node), VisitF32x4Qfms(node);
+ case IrOpcode::kF32x4Pmin:
+ return MarkAsSimd128(node), VisitF32x4Pmin(node);
+ case IrOpcode::kF32x4Pmax:
+ return MarkAsSimd128(node), VisitF32x4Pmax(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
@@ -2386,6 +2399,8 @@ void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64Rol(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
@@ -2612,9 +2627,11 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
#if !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
@@ -2634,11 +2651,23 @@ void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
+// TODO(v8:10308) Bitmask operations are in prototype now, we can remove these
+// guards when they go into the proposal.
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
+ !V8_TARGET_ARCH_X64
void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
+ // && !V8_TARGET_ARCH_X64
+
+// TODO(v8:10501) Prototyping pmin and pmax instructions.
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
+void InstructionSelector::VisitF32x4Pmin(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Pmax(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Pmin(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Pmax(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -3008,7 +3037,8 @@ void InstructionSelector::VisitUnreachable(Node* node) {
}
void InstructionSelector::VisitStaticAssert(Node* node) {
- node->InputAt(0)->Print();
+ Node* asserted = node->InputAt(0);
+ asserted->Print(2);
FATAL("Expected turbofan static assert to hold, but got non-true input!\n");
}
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 8e1ce0f2a3..c83a4e28ee 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -11,7 +11,7 @@
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
@@ -3508,7 +3508,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ PrepareCallCFunction(0, 0, cp);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index cd87f36913..dac94fae27 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -714,6 +714,8 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
VisitWord32PairShift(this, kMipsSarPair, node);
}
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMipsRor, node);
}
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 224b23fffc..197167c01c 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -12,7 +12,7 @@
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
@@ -1893,6 +1893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64I16x8Load8x8U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ Ld(kScratchReg, i.MemoryOperand());
__ fill_d(dst, kScratchReg);
__ ilvr_b(dst, kSimd128RegZero, dst);
@@ -1911,6 +1912,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64I32x4Load16x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ Ld(kScratchReg, i.MemoryOperand());
__ fill_d(dst, kScratchReg);
__ ilvr_h(dst, kSimd128RegZero, dst);
@@ -1929,6 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64I64x2Load32x2U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ Ld(kScratchReg, i.MemoryOperand());
__ fill_d(dst, kScratchReg);
__ ilvr_w(dst, kSimd128RegZero, dst);
@@ -2158,12 +2161,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64F64x2Min: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmin_d);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ // MSA follows IEEE 754-2008 comparision rules:
+ // 1. All NaN-related comparsions get false.
+ // 2. +0.0 equals to -0.0.
+
+ // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
+ // scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
+ __ fseq_d(scratch0, src0, src1);
+ __ bsel_v(scratch0, src1, src0);
+ __ or_v(scratch1, scratch0, src1);
+ // scratch0 = isNaN(src0) ? src0 : scratch1.
+ __ fseq_d(scratch0, src0, src0);
+ __ bsel_v(scratch0, src0, scratch1);
+ // dst = (src0 < scratch0) ? src0 : scratch0.
+ __ fslt_d(dst, src0, scratch0);
+ __ bsel_v(dst, scratch0, src0);
break;
}
case kMips64F64x2Max: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmax_d);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ // MSA follows IEEE 754-2008 comparision rules:
+ // 1. All NaN-related comparsions get false.
+ // 2. +0.0 equals to -0.0.
+
+ // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
+ // scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
+ __ fseq_d(scratch0, src0, src1);
+ __ bsel_v(scratch0, src1, src0);
+ __ and_v(scratch1, scratch0, src1);
+ // scratch0 = isNaN(src0) ? src0 : scratch1.
+ __ fseq_d(scratch0, src0, src0);
+ __ bsel_v(scratch0, src0, scratch1);
+ // dst = (scratch0 < src0) ? src0 : scratch0.
+ __ fslt_d(dst, scratch0, src0);
+ __ bsel_v(dst, scratch0, src0);
break;
}
case kMips64F64x2Eq: {
@@ -2174,8 +2215,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64F64x2Ne: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ fcne_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ __ fcune_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kMips64F64x2Lt: {
@@ -2206,13 +2247,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (src != dst) {
+ __ Move(kScratchReg, i.InputDoubleRegister(2));
+ if (dst != src) {
__ move_v(dst, src);
}
- __ Move(kScratchReg, i.InputDoubleRegister(2));
__ insert_d(dst, i.InputInt8(1), kScratchReg);
break;
}
+ case kMips64I64x2Splat: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fill_d(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMips64I64x2ExtractLane: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ copy_s_d(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMips64I64x2ReplaceLane: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (src != dst) {
+ __ move_v(dst, src);
+ }
+ __ insert_d(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
case kMips64I64x2Add: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2240,20 +2302,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I64x2Shl: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt6(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
+ __ sll_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ }
break;
}
case kMips64I64x2ShrS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt6(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
+ __ sra_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ }
break;
}
case kMips64I64x2ShrU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt6(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
+ __ srl_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ }
break;
}
case kMips64F32x4Splat: {
@@ -2272,10 +2352,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- if (src != dst) {
+ __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
+ if (dst != src) {
__ move_v(dst, src);
}
- __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
__ insert_w(dst, i.InputInt8(1), kScratchReg);
break;
}
@@ -2322,20 +2402,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I32x4Shl: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt5(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
+ __ sll_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
break;
}
case kMips64I32x4ShrS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt5(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
+ __ sra_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
break;
}
case kMips64I32x4ShrU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt5(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
+ __ srl_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
break;
}
case kMips64I32x4MaxU: {
@@ -2359,9 +2457,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64S128AndNot: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- Simd128Register dst = i.OutputSimd128Register();
- __ nor_v(dst, i.InputSimd128Register(1), i.InputSimd128Register(1));
- __ and_v(dst, dst, i.InputSimd128Register(0));
+ Simd128Register scratch = kSimd128ScratchReg,
+ dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ __ nor_v(scratch, src1, src1);
+ __ and_v(dst, scratch, src0);
break;
}
case kMips64F32x4Abs: {
@@ -2410,14 +2511,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64F32x4Max: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ // MSA follows IEEE 754-2008 comparision rules:
+ // 1. All NaN-related comparsions get false.
+ // 2. +0.0 equals to -0.0.
+
+ // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
+ // scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
+ __ fseq_w(scratch0, src0, src1);
+ __ bsel_v(scratch0, src1, src0);
+ __ and_v(scratch1, scratch0, src1);
+ // scratch0 = isNaN(src0) ? src0 : scratch1.
+ __ fseq_w(scratch0, src0, src0);
+ __ bsel_v(scratch0, src0, scratch1);
+ // dst = (scratch0 < src0) ? src0 : scratch0.
+ __ fslt_w(dst, scratch0, src0);
+ __ bsel_v(dst, scratch0, src0);
break;
}
case kMips64F32x4Min: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ // MSA follows IEEE 754-2008 comparision rules:
+ // 1. All NaN-related comparsions get false.
+ // 2. +0.0 equals to -0.0.
+
+ // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
+ // scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
+ __ fseq_w(scratch0, src0, src1);
+ __ bsel_v(scratch0, src1, src0);
+ __ or_v(scratch1, scratch0, src1);
+ // scratch0 = isNaN(src0) ? src0 : scratch1.
+ __ fseq_w(scratch0, src0, src0);
+ __ bsel_v(scratch0, src0, scratch1);
+ // dst = (src0 < scratch0) ? src0 : scratch0.
+ __ fslt_w(dst, src0, scratch0);
+ __ bsel_v(dst, scratch0, src0);
break;
}
case kMips64F32x4Eq: {
@@ -2428,8 +2565,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64F32x4Ne: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ __ fcune_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kMips64F32x4Lt: {
@@ -2492,6 +2629,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I32x4Abs: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
@@ -2532,20 +2670,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I16x8Shl: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt4(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
+ __ sll_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
break;
}
case kMips64I16x8ShrS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt4(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
+ __ sra_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
break;
}
case kMips64I16x8ShrU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt4(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
+ __ srl_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
break;
}
case kMips64I16x8Add: {
@@ -2659,6 +2815,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I16x8Abs: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
@@ -2699,14 +2856,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I8x16Shl: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt3(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
+ __ sll_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
break;
}
case kMips64I8x16ShrS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt3(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
+ __ sra_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
break;
}
case kMips64I8x16Add: {
@@ -2778,8 +2947,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I8x16ShrU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt3(1));
+ if (instr->InputAt(1)->IsRegister()) {
+ __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
+ __ srl_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ } else {
+ __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
break;
}
case kMips64I8x16AddSaturateU: {
@@ -2826,6 +3001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64I8x16Abs: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
kSimd128RegZero);
break;
@@ -2863,7 +3039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, 0l); // branch delay slot
- __ li(dst, -1);
+ __ li(dst, 1);
__ bind(&all_false);
break;
}
@@ -2873,7 +3049,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
- __ li(dst, -1); // branch delay slot
+ __ li(dst, 1); // branch delay slot
__ li(dst, 0l);
__ bind(&all_true);
break;
@@ -2884,7 +3060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
- __ li(dst, -1); // branch delay slot
+ __ li(dst, 1); // branch delay slot
__ li(dst, 0l);
__ bind(&all_true);
break;
@@ -2895,7 +3071,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label all_true;
__ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
- __ li(dst, -1); // branch delay slot
+ __ li(dst, 1); // branch delay slot
__ li(dst, 0l);
__ bind(&all_true);
break;
@@ -3196,9 +3372,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ctl = i.InputSimd128Register(1);
DCHECK(dst != ctl && dst != tbl);
Simd128Register zeroReg = i.TempSimd128Register(0);
- __ fill_d(zeroReg, zero_reg);
+ __ xor_v(zeroReg, zeroReg, zeroReg);
__ move_v(dst, ctl);
- __ vshf_b(dst, tbl, zeroReg);
+ __ vshf_b(dst, zeroReg, tbl);
break;
}
case kMips64S8x8Reverse: {
@@ -3290,9 +3466,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
- __ sat_u_w(kSimd128ScratchReg, src0, 15);
- __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
- __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ max_s_w(kSimd128ScratchReg, kSimd128RegZero, src0);
+ __ sat_u_w(kSimd128ScratchReg, kSimd128ScratchReg, 15);
+ __ max_s_w(dst, kSimd128RegZero, src1);
+ __ sat_u_w(dst, dst, 15);
+ __ pckev_h(dst, dst, kSimd128ScratchReg);
break;
}
case kMips64I16x8UConvertI8x16Low: {
@@ -3324,9 +3503,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
- __ sat_u_h(kSimd128ScratchReg, src0, 7);
- __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
- __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ max_s_h(kSimd128ScratchReg, kSimd128RegZero, src0);
+ __ sat_u_h(kSimd128ScratchReg, kSimd128ScratchReg, 7);
+ __ max_s_h(dst, kSimd128RegZero, src1);
+ __ sat_u_h(dst, dst, 7);
+ __ pckev_b(dst, dst, kSimd128ScratchReg);
break;
}
case kMips64F32x4AddHoriz: {
@@ -3593,7 +3775,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ PrepareCallCFunction(0, 0, cp);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index c752381c8c..9303b4572f 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -203,6 +203,9 @@ namespace compiler {
V(Mips64F64x2Splat) \
V(Mips64F64x2ExtractLane) \
V(Mips64F64x2ReplaceLane) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
V(Mips64I64x2Add) \
V(Mips64I64x2Sub) \
V(Mips64I64x2Mul) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 0261d915fb..81fc3b2ca9 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -82,6 +82,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Ne:
case kMips64F64x2Lt:
case kMips64F64x2Le:
+ case kMips64I64x2Splat:
+ case kMips64I64x2ExtractLane:
+ case kMips64I64x2ReplaceLane:
case kMips64I64x2Add:
case kMips64I64x2Sub:
case kMips64I64x2Mul:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 5a0e41ccbe..719a916b6a 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -132,6 +132,20 @@ static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
}
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
@@ -838,6 +852,10 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
VisitRRO(this, kMips64Dsar, node);
}
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMips64Ror, node);
}
@@ -2733,7 +2751,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
}
#define SIMD_TYPE_LIST(V) \
+ V(F64x2) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
@@ -2875,7 +2895,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
void InstructionSelector::VisitS128Zero(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
+ Emit(kMips64S128Zero, g.DefineAsRegister(node));
}
#define SIMD_VISIT_SPLAT(Type) \
@@ -2883,7 +2903,6 @@ void InstructionSelector::VisitS128Zero(Node* node) {
VisitRR(this, kMips64##Type##Splat, node); \
}
SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
-SIMD_VISIT_SPLAT(F64x2)
#undef SIMD_VISIT_SPLAT
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
@@ -2892,6 +2911,7 @@ SIMD_VISIT_SPLAT(F64x2)
}
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
@@ -2904,7 +2924,6 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, S)
VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
}
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
-SIMD_VISIT_REPLACE_LANE(F64x2)
#undef SIMD_VISIT_REPLACE_LANE
#define SIMD_VISIT_UNOP(Name, instruction) \
@@ -2916,7 +2935,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitRRI(this, kMips64##Name, node); \
+ VisitSimdShift(this, kMips64##Name, node); \
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index addbd76ffb..b7fece3f72 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/backend/code-generator.h"
-
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/numbers/double.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -89,8 +88,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
- *mode = AddressingModeField::decode(instr_->opcode());
- switch (*mode) {
+ AddressingMode addr_mode = AddressingModeField::decode(instr_->opcode());
+ if (mode) *mode = addr_mode;
+ switch (addr_mode) {
case kMode_None:
break;
case kMode_MRI:
@@ -103,7 +103,8 @@ class PPCOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
- MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+ MemOperand MemoryOperand(AddressingMode* mode = NULL,
+ size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
@@ -166,6 +167,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTaggedPointer(value_, value_);
+ }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
@@ -831,7 +835,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
- __ LoadP(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ LoadTaggedPointerField(
+ r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
@@ -938,7 +943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// they might need to be patched individually.
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
-#ifdef V8_TARGET_ARCH_S390X
+#ifdef V8_TARGET_ARCH_PPC64
Address wasm_code = static_cast<Address>(constant.ToInt64());
#else
Address wasm_code = static_cast<Address>(constant.ToInt32());
@@ -969,13 +974,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ LoadP(kScratchReg,
- FieldMemOperand(func, JSFunction::kContextOffset));
+ __ LoadTaggedPointerField(
+ kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ LoadTaggedPointerField(r5,
+ FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1031,7 +1037,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
- int offset = 20 * kInstrSize;
+ int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
+
#if defined(_AIX)
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
@@ -1041,7 +1048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
- offset = 22 * kInstrSize;
+ offset += 2 * kInstrSize;
}
#endif
if (isWasmCapiFunction) {
@@ -1190,14 +1197,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
- __ StoreP(value, MemOperand(object, offset));
+ __ StoreTaggedField(value, MemOperand(object, offset), r0);
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
- __ StorePX(value, MemOperand(object, offset));
+ __ StoreTaggedFieldX(value, MemOperand(object, offset), r0);
}
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
@@ -1991,6 +1998,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_LoadDouble:
ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
break;
+ case kPPC_LoadSimd128: {
+ Simd128Register result = i.OutputSimd128Register();
+ AddressingMode mode = kMode_None;
+ MemOperand operand = i.MemoryOperand(&mode);
+ bool is_atomic = i.InputInt32(2);
+ // lvx only supports MRR.
+ DCHECK_EQ(mode, kMode_MRR);
+ __ lvx(result, operand);
+ if (is_atomic) __ lwsync();
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
case kPPC_StoreWord8:
ASSEMBLE_STORE_INTEGER(stb, stbx);
break;
@@ -2011,6 +2030,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_StoreDouble:
ASSEMBLE_STORE_FLOAT(stfd, stfdx);
break;
+ case kPPC_StoreSimd128: {
+ size_t index = 0;
+ AddressingMode mode = kMode_None;
+ MemOperand operand = i.MemoryOperand(&mode, &index);
+ Simd128Register value = i.InputSimd128Register(index);
+ bool is_atomic = i.InputInt32(3);
+ if (is_atomic) __ lwsync();
+ // stvx only supports MRR.
+ DCHECK_EQ(mode, kMode_MRR);
+ __ stvx(value, operand);
+ if (is_atomic) __ sync();
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
case kWord32AtomicLoadInt8:
case kPPC_AtomicLoadUint8:
case kWord32AtomicLoadInt16:
@@ -2120,6 +2153,122 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#endif // V8_TARGET_ARCH_PPC64
+ case kPPC_F64x2Splat: {
+ Simd128Register dst = i.OutputSimd128Register();
+ constexpr int shift_bits = 64;
+ __ MovDoubleToInt64(r0, i.InputDoubleRegister(0));
+ __ mtvsrd(dst, r0);
+ // right shift
+ __ li(ip, Operand(shift_bits));
+ __ mtvsrd(kScratchDoubleReg, ip);
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
+ __ vsro(dst, dst, kScratchDoubleReg);
+ // reload
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4Splat: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0));
+ __ mtvsrd(dst, kScratchReg);
+ __ vspltw(dst, dst, Operand(1));
+ break;
+ }
+ case kPPC_I64x2Splat: {
+ Register src = i.InputRegister(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ constexpr int shift_bits = 64;
+ __ mtvsrd(dst, src);
+ // right shift
+ __ li(ip, Operand(shift_bits));
+ __ mtvsrd(kScratchDoubleReg, ip);
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
+ __ vsro(dst, dst, kScratchDoubleReg);
+ // reload
+ __ mtvsrd(kScratchDoubleReg, src);
+ __ vor(dst, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4Splat: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(dst, i.InputRegister(0));
+ __ vspltw(dst, dst, Operand(1));
+ break;
+ }
+ case kPPC_I16x8Splat: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(dst, i.InputRegister(0));
+ __ vsplth(dst, dst, Operand(3));
+ break;
+ }
+ case kPPC_I8x16Splat: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(dst, i.InputRegister(0));
+ __ vspltb(dst, dst, Operand(7));
+ break;
+ }
+ case kPPC_F64x2ExtractLane: {
+ __ mfvsrd(kScratchReg, i.InputSimd128Register(0));
+ __ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
+ break;
+ }
+ case kPPC_F32x4ExtractLane: {
+ __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
+ break;
+ }
+ case kPPC_I64x2ExtractLane: {
+ __ mfvsrd(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I32x4ExtractLane: {
+ __ mfvsrwz(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I16x8ExtractLaneU: {
+ __ mfvsrwz(r0, i.InputSimd128Register(0));
+ __ li(ip, Operand(16));
+ __ srd(i.OutputRegister(), r0, ip);
+ break;
+ }
+ case kPPC_I16x8ExtractLaneS: {
+ __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ __ sradi(i.OutputRegister(), kScratchReg, 16);
+ break;
+ }
+ case kPPC_I8x16ExtractLaneU: {
+ __ mfvsrwz(r0, i.InputSimd128Register(0));
+ __ li(ip, Operand(24));
+ __ srd(i.OutputRegister(), r0, ip);
+ break;
+ }
+ case kPPC_I8x16ExtractLaneS: {
+ __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ __ sradi(i.OutputRegister(), kScratchReg, 24);
+ break;
+ }
+ case kPPC_StoreCompressTagged: {
+ ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
+ break;
+ }
+ case kPPC_LoadDecompressTaggedSigned: {
+ CHECK(instr->HasOutput());
+ ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ break;
+ }
+ case kPPC_LoadDecompressTaggedPointer: {
+ CHECK(instr->HasOutput());
+ ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
+ break;
+ }
+ case kPPC_LoadDecompressAnyTagged: {
+ CHECK(instr->HasOutput());
+ ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ __ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -2198,7 +2347,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ PrepareCallCFunction(0, 0, cp);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
@@ -2391,10 +2540,12 @@ void CodeGenerator::AssembleConstructFrame() {
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
- __ LoadP(kJSFunctionRegister,
- FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
- __ LoadP(kWasmInstanceRegister,
- FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ LoadTaggedPointerField(
+ kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ LoadTaggedPointerField(
+ kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
if (call_descriptor->IsWasmCapiFunction()) {
// Reserve space for saving the PC later.
@@ -2623,8 +2774,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
- case Constant::kCompressedHeapObject:
- UNREACHABLE();
+ case Constant::kCompressedHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ // TODO(v8:7703, jyan@ca.ibm.com): Turn into a
+ // COMPRESSED_EMBEDDED_OBJECT when the constant pool entry size is
+ // tagged size.
+ __ Move(dst, src_object, RelocInfo::FULL_EMBEDDED_OBJECT);
+ }
+ break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
break;
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 82d1d40b5b..4f6aeced6d 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -121,12 +121,14 @@ namespace compiler {
V(PPC_LoadWord64) \
V(PPC_LoadFloat32) \
V(PPC_LoadDouble) \
+ V(PPC_LoadSimd128) \
V(PPC_StoreWord8) \
V(PPC_StoreWord16) \
V(PPC_StoreWord32) \
V(PPC_StoreWord64) \
V(PPC_StoreFloat32) \
V(PPC_StoreDouble) \
+ V(PPC_StoreSimd128) \
V(PPC_ByteRev32) \
V(PPC_ByteRev64) \
V(PPC_CompressSigned) \
@@ -187,7 +189,25 @@ namespace compiler {
V(PPC_AtomicXorInt8) \
V(PPC_AtomicXorInt16) \
V(PPC_AtomicXorInt32) \
- V(PPC_AtomicXorInt64)
+ V(PPC_AtomicXorInt64) \
+ V(PPC_F64x2Splat) \
+ V(PPC_F64x2ExtractLane) \
+ V(PPC_F32x4Splat) \
+ V(PPC_F32x4ExtractLane) \
+ V(PPC_I64x2Splat) \
+ V(PPC_I64x2ExtractLane) \
+ V(PPC_I32x4Splat) \
+ V(PPC_I32x4ExtractLane) \
+ V(PPC_I16x8Splat) \
+ V(PPC_I16x8ExtractLaneU) \
+ V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I8x16Splat) \
+ V(PPC_I8x16ExtractLaneU) \
+ V(PPC_I8x16ExtractLaneS) \
+ V(PPC_StoreCompressTagged) \
+ V(PPC_LoadDecompressTaggedSigned) \
+ V(PPC_LoadDecompressTaggedPointer) \
+ V(PPC_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 3062dfb53d..68d0aaedc4 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -113,6 +113,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_CompressSigned:
case kPPC_CompressPointer:
case kPPC_CompressAny:
+ case kPPC_F64x2Splat:
+ case kPPC_F64x2ExtractLane:
+ case kPPC_F32x4Splat:
+ case kPPC_F32x4ExtractLane:
+ case kPPC_I64x2Splat:
+ case kPPC_I64x2ExtractLane:
+ case kPPC_I32x4Splat:
+ case kPPC_I32x4ExtractLane:
+ case kPPC_I16x8Splat:
+ case kPPC_I16x8ExtractLaneU:
+ case kPPC_I16x8ExtractLaneS:
+ case kPPC_I8x16Splat:
+ case kPPC_I8x16ExtractLaneU:
+ case kPPC_I8x16ExtractLaneS:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
@@ -124,11 +138,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadWord64:
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
+ case kPPC_LoadSimd128:
case kPPC_AtomicLoadUint8:
case kPPC_AtomicLoadUint16:
case kPPC_AtomicLoadWord32:
case kPPC_AtomicLoadWord64:
case kPPC_Peek:
+ case kPPC_LoadDecompressTaggedSigned:
+ case kPPC_LoadDecompressTaggedPointer:
+ case kPPC_LoadDecompressAnyTagged:
return kIsLoadOperation;
case kPPC_StoreWord8:
@@ -137,6 +155,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_StoreWord64:
case kPPC_StoreFloat32:
case kPPC_StoreDouble:
+ case kPPC_StoreSimd128:
+ case kPPC_StoreCompressTagged:
case kPPC_Push:
case kPPC_PushFrame:
case kPPC_StoreToStackSlot:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 7e29b00c31..1598fbad04 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -191,16 +191,39 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kPPC_LoadWordU32;
break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kPPC_LoadWordS32;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#else
+ UNREACHABLE();
+#endif
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kPPC_LoadDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kPPC_LoadDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kPPC_LoadDecompressAnyTagged;
+ break;
+#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
+#endif
case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
mode = kInt16Imm_4ByteAligned;
break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kPPC_LoadSimd128;
+ // Vectors do not support MRI mode, only MRR is available.
+ mode = kNoImmediate;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -257,7 +280,7 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
- DCHECK(CanBeTaggedPointer(rep));
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -302,28 +325,33 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kPPC_StoreWord16;
break;
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#endif
case MachineRepresentation::kWord32:
opcode = kPPC_StoreWord32;
break;
-#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
+ opcode = kPPC_StoreCompressTagged;
+ break;
+#else
+ UNREACHABLE();
+ break;
+#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTagged:
+ mode = kInt16Imm_4ByteAligned;
+ opcode = kPPC_StoreCompressTagged;
+ break;
case MachineRepresentation::kWord64:
opcode = kPPC_StoreWord64;
mode = kInt16Imm_4ByteAligned;
break;
-#else
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kPPC_StoreSimd128;
+ // Vectors do not support MRI mode, only MRR is available.
+ mode = kNoImmediate;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -881,6 +909,10 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
}
#endif
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
// TODO(mbrandy): Absorb logical-and into rlwinm?
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
@@ -2088,9 +2120,29 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+#define SIMD_TYPES(V) \
+ V(F64x2) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Type##Splat, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+SIMD_TYPES(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+#undef SIMD_TYPES
+
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- UNIMPLEMENTED(); \
+ PPCOperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kPPC_##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
@@ -2101,8 +2153,6 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
@@ -2139,8 +2189,6 @@ void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
@@ -2203,8 +2251,6 @@ void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
@@ -2267,8 +2313,6 @@ void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::EmitPrepareResults(
@@ -2411,8 +2455,6 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 9420269ca0..8b74ef68b1 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -519,6 +519,16 @@ UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
return prev;
}
+UsePosition* LiveRange::NextUsePositionSpillDetrimental(
+ LifetimePosition start) const {
+ UsePosition* pos = NextUsePosition(start);
+ while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister &&
+ !pos->SpillDetrimental()) {
+ pos = pos->next();
+ }
+ return pos;
+}
+
UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
@@ -2424,6 +2434,15 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
if (from.IsUnallocated()) {
live->Add(UnallocatedOperand::cast(from).virtual_register());
}
+ // When the value is moved to a register to meet input constraints,
+ // we should consider this value use similar as a register use in the
+ // backward spilling heuristics, even though this value use is not
+ // register benefical at the AllocateBlockedReg stage.
+ if (to.IsAnyRegister() ||
+ (to.IsUnallocated() &&
+ UnallocatedOperand::cast(&to)->HasRegisterPolicy())) {
+ from_use->set_spill_detrimental();
+ }
// Resolve use position hints just created.
if (to_use != nullptr && from_use != nullptr) {
to_use->ResolveHint(from_use);
@@ -2769,6 +2788,7 @@ void BundleBuilder::BuildBundles() {
}
TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
out_range->TopLevel()->vreg(), out_range->relative_id());
+ bool phi_interferes_with_backedge_input = false;
for (auto input : phi->operands()) {
LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
TRACE("Input value v%d with range %d:%d\n", input,
@@ -2776,16 +2796,32 @@ void BundleBuilder::BuildBundles() {
LiveRangeBundle* input_bundle = input_range->get_bundle();
if (input_bundle != nullptr) {
TRACE("Merge\n");
- if (out->TryMerge(input_bundle, data()->is_trace_alloc()))
+ if (out->TryMerge(input_bundle, data()->is_trace_alloc())) {
TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
out->id());
+ } else if (input_range->Start() > out_range->Start()) {
+ // We are only interested in values defined after the phi, because
+ // those are values that will go over a back-edge.
+ phi_interferes_with_backedge_input = true;
+ }
} else {
TRACE("Add\n");
- if (out->TryAddRange(input_range))
+ if (out->TryAddRange(input_range)) {
TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
out->id());
+ } else if (input_range->Start() > out_range->Start()) {
+ // We are only interested in values defined after the phi, because
+ // those are values that will go over a back-edge.
+ phi_interferes_with_backedge_input = true;
+ }
}
}
+ // Spilling the phi at the loop header is not beneficial if there is
+ // a back-edge with an input for the phi that interferes with the phi's
+ // value, because in case that input gets spilled it might introduce
+ // a stack-to-stack move at the back-edge.
+ if (phi_interferes_with_backedge_input)
+ out_range->TopLevel()->set_spilling_at_loop_header_not_beneficial();
}
TRACE("Done block B%d\n", block_id);
}
@@ -3006,6 +3042,12 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
// This will reduce number of memory moves on the back edge.
LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
loop_header->first_instruction_index());
+ // Stop if we moved to a loop header before the value is defined or
+ // at the define position that is not beneficial to spill.
+ if (range->TopLevel()->Start() > loop_start ||
+ (range->TopLevel()->Start() == loop_start &&
+ range->TopLevel()->SpillAtLoopHeaderNotBeneficial()))
+ return pos;
auto& loop_header_state =
data()->GetSpillState(loop_header->rpo_number());
for (LiveRange* live_at_header : loop_header_state) {
@@ -3016,14 +3058,17 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
LiveRange* check_use = live_at_header;
for (; check_use != nullptr && check_use->Start() < pos;
check_use = check_use->next()) {
- UsePosition* next_use = check_use->NextRegisterPosition(loop_start);
+ // If we find a use for which spilling is detrimental, don't spill
+ // at the loop header
+ UsePosition* next_use =
+ check_use->NextUsePositionSpillDetrimental(loop_start);
// UsePosition at the end of a UseInterval may
// have the same value as the start of next range.
if (next_use != nullptr && next_use->pos() <= pos) {
return pos;
}
}
- // No register use inside the loop before the pos.
+ // No register beneficial use inside the loop before the pos.
*begin_spill_out = live_at_header;
pos = loop_start;
break;
@@ -3825,11 +3870,15 @@ void LinearScanAllocator::AllocateRegisters() {
auto& spill_state = data()->GetSpillState(pred);
TRACE("Not a fallthrough. Adding %zu elements...\n",
spill_state.size());
+ LifetimePosition pred_end =
+ LifetimePosition::GapFromInstructionIndex(
+ this->code()->InstructionBlockAt(pred)->code_end());
for (const auto range : spill_state) {
- // Filter out ranges that had their register stolen by backwards
- // working spill heuristics. These have been spilled after the
- // fact, so ignore them.
- if (!range->HasRegisterAssigned()) continue;
+ // Filter out ranges that were split or had their register
+ // stolen by backwards working spill heuristics. These have
+ // been spilled after the fact, so ignore them.
+ if (range->End() < pred_end || !range->HasRegisterAssigned())
+ continue;
to_be_live->emplace(range);
}
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 9e1a7beff9..f890bd868b 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -459,6 +459,10 @@ class V8_EXPORT_PRIVATE UsePosition final
bool RegisterIsBeneficial() const {
return RegisterBeneficialField::decode(flags_);
}
+ bool SpillDetrimental() const {
+ return SpillDetrimentalField::decode(flags_);
+ }
+
UsePositionType type() const { return TypeField::decode(flags_); }
void set_type(UsePositionType type, bool register_beneficial);
@@ -471,6 +475,9 @@ class V8_EXPORT_PRIVATE UsePosition final
void set_assigned_register(int register_code) {
flags_ = AssignedRegisterField::update(flags_, register_code);
}
+ void set_spill_detrimental() {
+ flags_ = SpillDetrimentalField::update(flags_, true);
+ }
UsePositionHintType hint_type() const {
return HintTypeField::decode(flags_);
@@ -489,6 +496,7 @@ class V8_EXPORT_PRIVATE UsePosition final
using HintTypeField = base::BitField<UsePositionHintType, 2, 3>;
using RegisterBeneficialField = base::BitField<bool, 5, 1>;
using AssignedRegisterField = base::BitField<int32_t, 6, 6>;
+ using SpillDetrimentalField = base::BitField<int32_t, 12, 1>;
InstructionOperand* const operand_;
void* hint_;
@@ -584,6 +592,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
UsePosition* PreviousUsePositionRegisterIsBeneficial(
LifetimePosition start) const;
+ // Returns use position for which spilling is detrimental in this live
+ // range and which follows both start and last processed use position
+ UsePosition* NextUsePositionSpillDetrimental(LifetimePosition start) const;
+
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos) const;
@@ -675,7 +687,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
using RepresentationField = base::BitField<MachineRepresentation, 13, 8>;
using RecombineField = base::BitField<bool, 21, 1>;
using ControlFlowRegisterHint = base::BitField<uint8_t, 22, 6>;
- // Bit 28 is used by TopLevelLiveRange.
+ // Bits 28,29 are used by TopLevelLiveRange.
// Unique among children and splinters of the same virtual register.
int relative_id_;
@@ -785,6 +797,12 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
void set_is_non_loop_phi(bool value) {
bits_ = IsNonLoopPhiField::update(bits_, value);
}
+ bool SpillAtLoopHeaderNotBeneficial() const {
+ return SpillAtLoopHeaderNotBeneficialField::decode(bits_);
+ }
+ void set_spilling_at_loop_header_not_beneficial() {
+ bits_ = SpillAtLoopHeaderNotBeneficialField::update(bits_, true);
+ }
enum SlotUseKind { kNoSlotUse, kDeferredSlotUse, kGeneralSlotUse };
@@ -991,6 +1009,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
using IsNonLoopPhiField = base::BitField<bool, 4, 1>;
using SpillTypeField = base::BitField<SpillType, 5, 2>;
using DeferredFixedField = base::BitField<bool, 28, 1>;
+ using SpillAtLoopHeaderNotBeneficialField = base::BitField<bool, 29, 1>;
int vreg_;
int last_child_id_;
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 24552cf632..cb79373b42 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/backend/code-generator.h"
-
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -4285,7 +4284,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ PrepareCallCFunction(0, 0, cp);
__ CallCFunction(
ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index fdffb30e00..515e8dd127 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -1156,6 +1156,10 @@ void InstructionSelector::VisitWord32PairSar(Node* node) {
}
#endif
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
#if V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 472ffaa508..4f99ad49ba 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/backend/code-generator.h"
-
#include <limits>
#include "src/base/overflowing-math.h"
@@ -11,10 +9,11 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
#include "src/objects/smi.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -323,7 +322,7 @@ class WasmOutOfLineTrap : public OutOfLineCode {
__ PrepareCallCFunction(0);
__ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
size_t pop_size =
call_descriptor->StackParameterCount() * kSystemPointerSize;
@@ -588,7 +587,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
do { \
- CpuFeatureScope sse_scope(tasm(), SSE4_1); \
Register dst = i.OutputRegister(); \
XMMRegister tmp = i.TempSimd128Register(0); \
__ xorq(dst, dst); \
@@ -601,20 +599,21 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
// This macro will directly emit the opcode if the shift is an immediate - the
// shift value will be taken modulo 2^width. Otherwise, it will emit code to
// perform the modulus operation.
-#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
- do { \
- XMMRegister dst = i.OutputSimd128Register(); \
- DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- if (HasImmediateInput(instr, 1)) { \
- __ opcode(dst, static_cast<byte>(i.InputInt##width(1))); \
- } else { \
- XMMRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.InputRegister(1); \
- constexpr int mask = (1 << width) - 1; \
- __ andq(shift, Immediate(mask)); \
- __ Movq(tmp, shift); \
- __ opcode(dst, tmp); \
- } \
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ if (HasImmediateInput(instr, 1)) { \
+ __ opcode(dst, byte{i.InputInt##width(1)}); \
+ } else { \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ Register tmp_shift = i.TempRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ movq(tmp_shift, i.InputRegister(1)); \
+ __ andq(tmp_shift, Immediate(mask)); \
+ __ Movq(tmp, tmp_shift); \
+ __ opcode(dst, tmp); \
+ } \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -670,13 +669,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* assembler,
}
}
-void SetupShuffleMaskOnStack(TurboAssembler* assembler, uint32_t* mask) {
- int64_t shuffle_mask = (mask[2]) | (static_cast<uint64_t>(mask[3]) << 32);
- assembler->movq(kScratchRegister, shuffle_mask);
- assembler->Push(kScratchRegister);
- shuffle_mask = (mask[0]) | (static_cast<uint64_t>(mask[1]) << 32);
+void SetupShuffleMaskInTempRegister(TurboAssembler* assembler, uint32_t* mask,
+ XMMRegister tmp) {
+ uint64_t shuffle_mask = (mask[0]) | (uint64_t{mask[1]} << 32);
+ assembler->Move(tmp, shuffle_mask);
+ shuffle_mask = (mask[2]) | (uint64_t{mask[3]} << 32);
assembler->movq(kScratchRegister, shuffle_mask);
- assembler->Push(kScratchRegister);
+ assembler->Pinsrq(tmp, kScratchRegister, int8_t{1});
}
} // namespace
@@ -1292,6 +1291,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Sar:
ASSEMBLE_SHIFT(sarq, 6);
break;
+ case kX64Rol32:
+ ASSEMBLE_SHIFT(roll, 5);
+ break;
+ case kX64Rol:
+ ASSEMBLE_SHIFT(rolq, 6);
+ break;
case kX64Ror32:
ASSEMBLE_SHIFT(rorl, 5);
break;
@@ -2277,7 +2282,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Splat: {
- CpuFeatureScope sse_scope(tasm(), SSE3);
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsFPRegister()) {
__ Movddup(dst, i.InputDoubleRegister(0));
@@ -2287,7 +2291,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2ReplaceLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsFPRegister()) {
__ Movq(kScratchRegister, i.InputDoubleRegister(2));
__ Pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1));
@@ -2297,7 +2300,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2ExtractLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
__ Movq(i.OutputDoubleRegister(), kScratchRegister);
break;
@@ -2334,7 +2336,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// propagate -0's and NaNs, which may be non-canonical.
__ Orpd(kScratchDoubleReg, dst);
// Canonicalize NaNs by quieting and clearing the payload.
- __ Cmppd(dst, kScratchDoubleReg, static_cast<int8_t>(3));
+ __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
__ Orpd(kScratchDoubleReg, dst);
__ Psrlq(dst, 13);
__ Andnpd(dst, kScratchDoubleReg);
@@ -2356,7 +2358,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Propagate sign discrepancy and (subtle) quiet NaNs.
__ Subpd(kScratchDoubleReg, dst);
// Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmppd(dst, kScratchDoubleReg, static_cast<int8_t>(3));
+ __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
__ Psrlq(dst, 13);
__ Andnpd(dst, kScratchDoubleReg);
break;
@@ -2415,7 +2417,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ Movss(dst, i.InputOperand(0));
}
- __ Shufps(dst, dst, static_cast<byte>(0x0));
+ __ Shufps(dst, dst, byte{0x0});
break;
}
case kX64F32x4ExtractLane: {
@@ -2442,15 +2444,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64F32x4UConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
DCHECK_NE(i.OutputSimd128Register(), kScratchDoubleReg);
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
__ Pxor(kScratchDoubleReg, kScratchDoubleReg); // zeros
- __ Pblendw(kScratchDoubleReg, dst,
- static_cast<uint8_t>(0x55)); // get lo 16 bits
+ __ Pblendw(kScratchDoubleReg, dst, uint8_t{0x55}); // get lo 16 bits
__ Psubd(dst, kScratchDoubleReg); // get hi 16 bits
__ Cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // convert lo exactly
- __ Psrld(dst,
- static_cast<byte>(1)); // divide by 2 to get in unsigned range
+ __ Psrld(dst, byte{1}); // divide by 2 to get in unsigned range
__ Cvtdq2ps(dst, dst); // convert hi exactly
__ Addps(dst, dst); // double hi, exactly
__ Addps(dst, kScratchDoubleReg); // add hi and lo, may round.
@@ -2461,11 +2460,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, static_cast<byte>(1));
+ __ Psrld(kScratchDoubleReg, byte{1});
__ Andps(i.OutputSimd128Register(), kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
- __ Psrld(dst, static_cast<byte>(1));
+ __ Psrld(dst, byte{1});
__ Andps(dst, i.InputSimd128Register(0));
}
break;
@@ -2475,11 +2474,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pslld(kScratchDoubleReg, static_cast<byte>(31));
+ __ Pslld(kScratchDoubleReg, byte{31});
__ Xorps(i.OutputSimd128Register(), kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
- __ Pslld(dst, static_cast<byte>(31));
+ __ Pslld(dst, byte{31});
__ Xorps(dst, i.InputSimd128Register(0));
}
break;
@@ -2503,7 +2502,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F32x4AddHoriz: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE3);
__ Haddps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2534,9 +2532,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// propagate -0's and NaNs, which may be non-canonical.
__ Orps(kScratchDoubleReg, dst);
// Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpps(dst, kScratchDoubleReg, static_cast<int8_t>(3));
+ __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
__ Orps(kScratchDoubleReg, dst);
- __ Psrld(dst, static_cast<byte>(10));
+ __ Psrld(dst, byte{10});
__ Andnps(dst, kScratchDoubleReg);
break;
}
@@ -2556,21 +2554,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Propagate sign discrepancy and (subtle) quiet NaNs.
__ Subps(kScratchDoubleReg, dst);
// Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpps(dst, kScratchDoubleReg, static_cast<int8_t>(3));
- __ Psrld(dst, static_cast<byte>(10));
+ __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
+ __ Psrld(dst, byte{10});
__ Andnps(dst, kScratchDoubleReg);
break;
}
case kX64F32x4Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ Cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- static_cast<int8_t>(0x0));
+ int8_t{0x0});
break;
}
case kX64F32x4Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ Cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- static_cast<int8_t>(0x4));
+ int8_t{0x4});
break;
}
case kX64F32x4Lt: {
@@ -2609,8 +2607,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F32x4Pmin: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Minps(dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Pmax: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Maxps(dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Pmin: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Minpd(dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Pmax: {
+ XMMRegister dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ Maxpd(dst, i.InputSimd128Register(1));
+ break;
+ }
case kX64I64x2Splat: {
- CpuFeatureScope sse_scope(tasm(), SSE3);
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
__ Movq(dst, i.InputRegister(0));
@@ -2621,12 +2642,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2ExtractLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I64x2ReplaceLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
__ Pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2653,7 +2672,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I64x2ShrS: {
// TODO(zhin): there is vpsraq but requires AVX512
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
// ShrS on each quadword one at a time
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
@@ -2661,14 +2679,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Modulo 64 not required as sarq_cl will mask cl to 6 bits.
// lower quadword
- __ Pextrq(tmp, src, static_cast<int8_t>(0x0));
+ __ Pextrq(tmp, src, int8_t{0x0});
__ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, static_cast<int8_t>(0x0));
+ __ Pinsrq(dst, tmp, int8_t{0x0});
// upper quadword
- __ Pextrq(tmp, src, static_cast<int8_t>(0x1));
+ __ Pextrq(tmp, src, int8_t{0x1});
__ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, static_cast<int8_t>(0x1));
+ __ Pinsrq(dst, tmp, int8_t{0x1});
break;
}
case kX64I64x2Add: {
@@ -2886,16 +2904,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ Movd(dst, i.InputOperand(0));
}
- __ Pshufd(dst, dst, static_cast<uint8_t>(0x0));
+ __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kX64I32x4ExtractLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I32x4ReplaceLane: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (HasRegisterInput(instr, 2)) {
__ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2918,24 +2934,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cvttps2dq(dst, dst);
// Set top bit if >=0 is now < 0
__ Pand(tmp, dst);
- __ Psrad(tmp, static_cast<byte>(31));
+ __ Psrad(tmp, byte{31});
// Set positive overflow lanes to 0x7FFFFFFF
__ Pxor(dst, tmp);
break;
}
case kX64I32x4SConvertI16x8Low: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pmovsxwd(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kX64I32x4SConvertI16x8High: {
XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputSimd128Register(0), static_cast<uint8_t>(8));
+ __ Palignr(dst, i.InputSimd128Register(0), uint8_t{8});
__ Pmovsxwd(dst, dst);
break;
}
case kX64I32x4Neg: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
@@ -2962,7 +2976,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4AddHoriz: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
__ Phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2971,17 +2984,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Mul: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MinS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -3001,7 +3011,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4GeS: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
__ Pminsd(dst, src);
@@ -3010,7 +3019,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4UConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp = i.TempSimd128Register(0);
XMMRegister tmp2 = i.TempSimd128Register(1);
@@ -3019,7 +3027,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, tmp2);
// scratch: float representation of max_signed
__ Pcmpeqd(tmp2, tmp2);
- __ Psrld(tmp2, static_cast<uint8_t>(1)); // 0x7fffffff
+ __ Psrld(tmp2, uint8_t{1}); // 0x7fffffff
__ Cvtdq2ps(tmp2, tmp2); // 0x4f000000
// tmp: convert (src-max_signed).
// Positive overflow lanes -> 0x7FFFFFFF
@@ -3043,7 +3051,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4UConvertI16x8High: {
XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputSimd128Register(0), static_cast<uint8_t>(8));
+ __ Palignr(dst, i.InputSimd128Register(0), uint8_t{8});
__ Pmovzxwd(dst, dst);
break;
}
@@ -3053,17 +3061,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4MinU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4GtU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
XMMRegister tmp = i.TempSimd128Register(0);
@@ -3074,7 +3079,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4GeU: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(1);
__ Pminud(dst, src);
@@ -3085,6 +3089,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pabsd(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kX64I32x4BitMask: {
+ __ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Xorps(dst, dst);
@@ -3097,8 +3105,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ Movd(dst, i.InputOperand(0));
}
- __ Pshuflw(dst, dst, static_cast<uint8_t>(0x0));
- __ Pshufd(dst, dst, static_cast<uint8_t>(0x0));
+ __ Pshuflw(dst, dst, uint8_t{0x0});
+ __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kX64I16x8ExtractLaneU: {
@@ -3127,7 +3135,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8SConvertI8x16High: {
XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputSimd128Register(0), static_cast<uint8_t>(8));
+ __ Palignr(dst, i.InputSimd128Register(0), uint8_t{8});
__ Pmovsxbw(dst, dst);
break;
}
@@ -3218,7 +3226,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8UConvertI8x16High: {
XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputSimd128Register(0), static_cast<uint8_t>(8));
+ __ Palignr(dst, i.InputSimd128Register(0), uint8_t{8});
__ Pmovzxbw(dst, dst);
break;
}
@@ -3273,6 +3281,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pabsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kX64I16x8BitMask: {
+ Register dst = i.OutputRegister();
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ Packsswb(tmp, i.InputSimd128Register(0));
+ __ Pmovmskb(dst, tmp);
+ __ shrq(dst, Immediate(8));
+ break;
+ }
case kX64I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
@@ -3330,27 +3346,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, 1)) {
// Perform 16-bit shift, then mask away low bits.
uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, static_cast<byte>(shift));
+ __ Psllw(dst, byte{shift});
uint8_t bmask = static_cast<uint8_t>(0xff << shift);
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
__ movl(tmp, Immediate(mask));
__ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, static_cast<uint8_t>(0));
+ __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
__ Pand(dst, tmp_simd);
} else {
- Register shift = i.InputRegister(1);
// Mask off the unwanted bits before word-shifting.
__ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
// Take shift value modulo 8.
- __ andq(shift, Immediate(7));
- __ movq(tmp, shift);
+ __ movq(tmp, i.InputRegister(1));
+ __ andq(tmp, Immediate(7));
__ addq(tmp, Immediate(8));
__ Movq(tmp_simd, tmp);
__ Psrlw(kScratchDoubleReg, tmp_simd);
__ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
__ Pand(dst, kScratchDoubleReg);
- __ Movq(tmp_simd, shift);
+ // TODO(zhin): subq here to avoid asking for another temporary register,
+ // examine codegen for other i8x16 shifts, they use less instructions.
+ __ subq(tmp, Immediate(8));
+ __ Movq(tmp_simd, tmp);
__ Psllw(dst, tmp_simd);
}
break;
@@ -3412,10 +3430,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// s = 00BB 00BB ... 00BB 00BB
__ Movaps(tmp, dst);
__ Movaps(kScratchDoubleReg, right);
- __ Psrlw(tmp, static_cast<byte>(8));
- __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psrlw(tmp, byte{8});
+ __ Psrlw(kScratchDoubleReg, byte{8});
// dst = left * 256
- __ Psllw(dst, static_cast<byte>(8));
+ __ Psllw(dst, byte{8});
// t = I16x8Mul(t, s)
// => __PP __PP ... __PP __PP
__ Pmullw(tmp, kScratchDoubleReg);
@@ -3424,10 +3442,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmullw(dst, right);
// t = I16x8Shl(t, 8)
// => PP00 PP00 ... PP00 PP00
- __ Psllw(tmp, static_cast<byte>(8));
+ __ Psllw(tmp, byte{8});
// dst = I16x8Shr(dst, 8)
// => 00pp 00pp ... 00pp 00pp
- __ Psrlw(dst, static_cast<byte>(8));
+ __ Psrlw(dst, byte{8});
// dst = I16x8Or(dst, t)
// => PPpp PPpp ... PPpp PPpp
__ Por(dst, tmp);
@@ -3478,13 +3496,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasImmediateInput(instr, 1)) {
// Perform 16-bit shift, then mask away high bits.
uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, static_cast<byte>(shift));
+ __ Psrlw(dst, byte{shift});
uint8_t bmask = 0xff >> shift;
uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
__ movl(tmp, Immediate(mask));
__ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, static_cast<byte>(0));
+ __ Pshufd(tmp_simd, tmp_simd, byte{0});
__ Pand(dst, tmp_simd);
} else {
__ Punpckhbw(kScratchDoubleReg, dst);
@@ -3542,6 +3560,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pabsb(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kX64I8x16BitMask: {
+ __ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kX64S128And: {
__ Pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -3592,18 +3614,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Out-of-range indices should return 0, add 112 so that any value > 15
// saturates to 128 (top bit set), so pshufb will zero that lane.
- __ Move(mask, static_cast<uint32_t>(0x70707070));
- __ Pshufd(mask, mask, static_cast<uint8_t>(0x0));
+ __ Move(mask, uint32_t{0x70707070});
+ __ Pshufd(mask, mask, uint8_t{0x0});
__ Paddusb(mask, i.InputSimd128Register(1));
__ Pshufb(dst, mask);
break;
}
case kX64S8x16Shuffle: {
XMMRegister dst = i.OutputSimd128Register();
- Register tmp = i.TempRegister(0);
- // Prepare 16 byte aligned buffer for shuffle control mask
- __ movq(tmp, rsp);
- __ andq(rsp, Immediate(-16));
+ XMMRegister tmp_simd = i.TempSimd128Register(0);
if (instr->InputCount() == 5) { // only one input operand
uint32_t mask[4] = {};
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -3611,22 +3630,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
mask[j - 1] = i.InputUint32(j);
}
- SetupShuffleMaskOnStack(tasm(), mask);
- __ Pshufb(dst, Operand(rsp, 0));
+ SetupShuffleMaskInTempRegister(tasm(), mask, tmp_simd);
+ __ Pshufb(dst, tmp_simd);
} else { // two input operands
DCHECK_EQ(6, instr->InputCount());
ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 0);
- uint32_t mask[4] = {};
+ uint32_t mask1[4] = {};
for (int j = 5; j > 1; j--) {
uint32_t lanes = i.InputUint32(j);
for (int k = 0; k < 32; k += 8) {
uint8_t lane = lanes >> k;
- mask[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k;
+ mask1[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k;
}
}
- SetupShuffleMaskOnStack(tasm(), mask);
- __ Pshufb(kScratchDoubleReg, Operand(rsp, 0));
- uint32_t mask1[4] = {};
+ SetupShuffleMaskInTempRegister(tasm(), mask1, tmp_simd);
+ __ Pshufb(kScratchDoubleReg, tmp_simd);
+ uint32_t mask2[4] = {};
if (instr->InputAt(1)->IsSimd128Register()) {
XMMRegister src1 = i.InputSimd128Register(1);
if (src1 != dst) __ movups(dst, src1);
@@ -3637,14 +3656,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint32_t lanes = i.InputUint32(j);
for (int k = 0; k < 32; k += 8) {
uint8_t lane = lanes >> k;
- mask1[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k;
+ mask2[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k;
}
}
- SetupShuffleMaskOnStack(tasm(), mask1);
- __ Pshufb(dst, Operand(rsp, 0));
+ SetupShuffleMaskInTempRegister(tasm(), mask2, tmp_simd);
+ __ Pshufb(dst, tmp_simd);
__ Por(dst, kScratchDoubleReg);
}
- __ movq(rsp, tmp);
break;
}
case kX64S8x16LoadSplat: {
@@ -3658,7 +3676,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
__ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- static_cast<uint8_t>(0));
+ uint8_t{0});
__ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
break;
}
@@ -3670,7 +3688,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ Movss(i.OutputSimd128Register(), i.MemoryOperand());
__ Shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
- static_cast<byte>(0));
+ byte{0});
}
break;
}
@@ -3680,37 +3698,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8Load8x8S: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64I16x8Load8x8U: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64I32x4Load16x4S: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64I32x4Load16x4U: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64I64x2Load32x2S: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64I64x2Load32x2U: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
@@ -3760,10 +3772,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, half_dup);
- __ Pshufd(dst, dst, static_cast<uint8_t>(0));
+ __ Pshufd(dst, dst, uint8_t{0});
} else {
ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, half_dup);
- __ Pshufd(dst, dst, static_cast<uint8_t>(0xaa));
+ __ Pshufd(dst, dst, uint8_t{0xaa});
}
break;
}
@@ -3781,10 +3793,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, static_cast<uint8_t>(0));
+ __ Pshufd(dst, dst, uint8_t{0});
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, static_cast<uint8_t>(0xaa));
+ __ Pshufd(dst, dst, uint8_t{0xaa});
}
break;
}
@@ -3818,10 +3830,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (instr->InputCount() == 2) {
ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
- __ Psrld(kScratchDoubleReg, static_cast<byte>(16));
+ __ Psrld(kScratchDoubleReg, byte{16});
src2 = kScratchDoubleReg;
}
- __ Psrld(dst, static_cast<byte>(16));
+ __ Psrld(dst, byte{16});
__ Packusdw(dst, src2);
break;
}
@@ -3831,11 +3843,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(dst, i.InputSimd128Register(0));
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
if (instr->InputCount() == 2) {
- ASSEMBLE_SIMD_IMM_INSTR(Pblendw, kScratchDoubleReg, 1,
- static_cast<uint8_t>(0x55));
+ ASSEMBLE_SIMD_IMM_INSTR(Pblendw, kScratchDoubleReg, 1, uint8_t{0x55});
src2 = kScratchDoubleReg;
}
- __ Pblendw(dst, kScratchDoubleReg, static_cast<uint8_t>(0xaa));
+ __ Pblendw(dst, kScratchDoubleReg, uint8_t{0xaa});
__ Packusdw(dst, src2);
break;
}
@@ -3845,10 +3856,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (instr->InputCount() == 2) {
ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
- __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psrlw(kScratchDoubleReg, byte{8});
src2 = kScratchDoubleReg;
}
- __ Psrlw(dst, static_cast<byte>(8));
+ __ Psrlw(dst, byte{8});
__ Packuswb(dst, src2);
break;
}
@@ -3858,42 +3869,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(dst, i.InputSimd128Register(0));
if (instr->InputCount() == 2) {
ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
- __ Psllw(kScratchDoubleReg, static_cast<byte>(8));
- __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psllw(kScratchDoubleReg, byte{8});
+ __ Psrlw(kScratchDoubleReg, byte{8});
src2 = kScratchDoubleReg;
}
- __ Psllw(dst, static_cast<byte>(8));
- __ Psrlw(dst, static_cast<byte>(8));
+ __ Psllw(dst, byte{8});
+ __ Psrlw(dst, byte{8});
__ Packuswb(dst, src2);
break;
}
case kX64S8x16TransposeLow: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Psllw(dst, static_cast<byte>(8));
+ __ Psllw(dst, byte{8});
if (instr->InputCount() == 1) {
__ Movups(kScratchDoubleReg, dst);
} else {
DCHECK_EQ(2, instr->InputCount());
ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
- __ Psllw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psllw(kScratchDoubleReg, byte{8});
}
- __ Psrlw(dst, static_cast<byte>(8));
+ __ Psrlw(dst, byte{8});
__ Por(dst, kScratchDoubleReg);
break;
}
case kX64S8x16TransposeHigh: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ Psrlw(dst, static_cast<byte>(8));
+ __ Psrlw(dst, byte{8});
if (instr->InputCount() == 1) {
__ Movups(kScratchDoubleReg, dst);
} else {
DCHECK_EQ(2, instr->InputCount());
ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 1);
- __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psrlw(kScratchDoubleReg, byte{8});
}
- __ Psllw(kScratchDoubleReg, static_cast<byte>(8));
+ __ Psllw(kScratchDoubleReg, byte{8});
__ Por(dst, kScratchDoubleReg);
break;
}
@@ -3910,8 +3921,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufhw(dst, dst, shuffle_mask);
}
__ Movaps(kScratchDoubleReg, dst);
- __ Psrlw(kScratchDoubleReg, static_cast<byte>(8));
- __ Psllw(dst, static_cast<byte>(8));
+ __ Psrlw(kScratchDoubleReg, byte{8});
+ __ Psllw(dst, byte{8});
__ Por(dst, kScratchDoubleReg);
break;
}
@@ -3919,7 +3930,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64S1x4AnyTrue:
case kX64S1x8AnyTrue:
case kX64S1x16AnyTrue: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
@@ -3933,7 +3943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
case kX64S1x2AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(pcmpeqq);
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
break;
}
case kX64S1x4AllTrue: {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index be5ac0d6c3..745f5c6cb2 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -48,6 +48,8 @@ namespace compiler {
V(X64Shr32) \
V(X64Sar) \
V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
V(X64Ror) \
V(X64Ror32) \
V(X64Lzcnt) \
@@ -170,6 +172,8 @@ namespace compiler {
V(X64F64x2Le) \
V(X64F64x2Qfma) \
V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -193,6 +197,8 @@ namespace compiler {
V(X64F32x4Le) \
V(X64F32x4Qfma) \
V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2ReplaceLane) \
@@ -241,6 +247,7 @@ namespace compiler {
V(X64I32x4GtU) \
V(X64I32x4GeU) \
V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneU) \
V(X64I16x8ExtractLaneS) \
@@ -275,6 +282,7 @@ namespace compiler {
V(X64I16x8GeU) \
V(X64I16x8RoundingAverageU) \
V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
V(X64I8x16Splat) \
V(X64I8x16ExtractLaneU) \
V(X64I8x16ExtractLaneS) \
@@ -304,6 +312,7 @@ namespace compiler {
V(X64I8x16GeU) \
V(X64I8x16RoundingAverageU) \
V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
V(X64S128Zero) \
V(X64S128Not) \
V(X64S128And) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index f4a74a4050..d2c1d14855 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -45,6 +45,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Shr32:
case kX64Sar:
case kX64Sar32:
+ case kX64Rol:
+ case kX64Rol32:
case kX64Ror:
case kX64Ror32:
case kX64Lzcnt:
@@ -142,6 +144,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Le:
case kX64F64x2Qfma:
case kX64F64x2Qfms:
+ case kX64F64x2Pmin:
+ case kX64F64x2Pmax:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -165,6 +169,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Le:
case kX64F32x4Qfma:
case kX64F32x4Qfms:
+ case kX64F32x4Pmin:
+ case kX64F32x4Pmax:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
case kX64I64x2ReplaceLane:
@@ -213,6 +219,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4GtU:
case kX64I32x4GeU:
case kX64I32x4Abs:
+ case kX64I32x4BitMask:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneU:
case kX64I16x8ExtractLaneS:
@@ -247,6 +254,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8GeU:
case kX64I16x8RoundingAverageU:
case kX64I16x8Abs:
+ case kX64I16x8BitMask:
case kX64I8x16Splat:
case kX64I8x16ExtractLaneU:
case kX64I8x16ExtractLaneS:
@@ -276,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16GeU:
case kX64I8x16RoundingAverageU:
case kX64I8x16Abs:
+ case kX64I8x16BitMask:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 392b207c8e..dd3f556937 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -911,6 +911,14 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
VisitWord64Shift(this, node, kX64Sar);
}
+void InstructionSelector::VisitWord32Rol(Node* node) {
+ VisitWord32Shift(this, node, kX64Rol32);
+}
+
+void InstructionSelector::VisitWord64Rol(Node* node) {
+ VisitWord64Shift(this, node, kX64Rol);
+}
+
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitWord32Shift(this, node, kX64Ror32);
}
@@ -1290,6 +1298,7 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
case IrOpcode::kWord32Sar:
+ case IrOpcode::kWord32Rol:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
case IrOpcode::kInt32Add:
@@ -2730,6 +2739,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High) \
V(I32x4Abs) \
+ V(I32x4BitMask) \
V(I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High) \
V(I16x8Neg) \
@@ -2738,6 +2748,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8Abs) \
V(I8x16Neg) \
V(I8x16Abs) \
+ V(I8x16BitMask) \
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
@@ -2815,7 +2826,8 @@ SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1))); \
} else { \
- InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempRegister()}; \
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
g.UseUniqueRegister(node->InputAt(0)), \
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
@@ -3033,6 +3045,13 @@ void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
+void InstructionSelector::VisitI16x8BitMask(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64I16x8BitMask, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
X64OperandGenerator g(this);
Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node),
@@ -3328,7 +3347,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
imms[imm_count++] = Pack4Lanes(shuffle + 4);
imms[imm_count++] = Pack4Lanes(shuffle + 8);
imms[imm_count++] = Pack4Lanes(shuffle + 12);
- temps[temp_count++] = g.TempRegister();
+ temps[temp_count++] = g.TempSimd128Register();
}
// Use DefineAsRegister(node) and Use(src0) if we can without forcing an extra
@@ -3337,7 +3356,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
InstructionOperand dst =
no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
InstructionOperand src0 =
- src0_needs_reg ? g.UseRegister(input0) : g.Use(input0);
+ src0_needs_reg ? g.UseUniqueRegister(input0) : g.UseUnique(input0);
int input_count = 0;
InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
@@ -3345,7 +3364,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
if (!is_swizzle) {
Node* input1 = node->InputAt(1);
inputs[input_count++] =
- src1_needs_reg ? g.UseRegister(input1) : g.Use(input1);
+ src1_needs_reg ? g.UseUniqueRegister(input1) : g.UseUnique(input1);
}
for (int i = 0; i < imm_count; ++i) {
inputs[input_count++] = g.UseImmediate(imms[i]);
@@ -3361,12 +3380,41 @@ void InstructionSelector::VisitS8x16Swizzle(Node* node) {
arraysize(temps), temps);
}
+namespace {
+void VisitPminOrPmax(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ // Due to the way minps/minpd work, we want the dst to be same as the second
+ // input: b = pmin(a, b) directly maps to minps b a.
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)));
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitPminOrPmax(this, node, kX64F32x4Pmin);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitPminOrPmax(this, node, kX64F32x4Pmax);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitPminOrPmax(this, node, kX64F64x2Pmin);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitPminOrPmax(this, node, kX64F64x2Pmax);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
+ MachineOperatorBuilder::kWord32Rol | MachineOperatorBuilder::kWord64Rol;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 302c429f0f..035d64144f 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -308,6 +308,10 @@ TNode<ExternalReference> CodeAssembler::ExternalConstant(
raw_assembler()->ExternalConstant(address));
}
+TNode<Float32T> CodeAssembler::Float32Constant(double value) {
+ return UncheckedCast<Float32T>(jsgraph()->Float32Constant(value));
+}
+
TNode<Float64T> CodeAssembler::Float64Constant(double value) {
return UncheckedCast<Float64T>(jsgraph()->Float64Constant(value));
}
@@ -435,6 +439,20 @@ void CodeAssembler::Return(TNode<WordT> value) {
return raw_assembler()->Return(value);
}
+void CodeAssembler::Return(TNode<Float32T> value) {
+ DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
+ DCHECK_EQ(MachineType::Float32(),
+ raw_assembler()->call_descriptor()->GetReturnType(0));
+ return raw_assembler()->Return(value);
+}
+
+void CodeAssembler::Return(TNode<Float64T> value) {
+ DCHECK_EQ(1, raw_assembler()->call_descriptor()->ReturnCount());
+ DCHECK_EQ(MachineType::Float64(),
+ raw_assembler()->call_descriptor()->GetReturnType(0));
+ return raw_assembler()->Return(value);
+}
+
void CodeAssembler::Return(TNode<WordT> value1, TNode<WordT> value2) {
DCHECK_EQ(2, raw_assembler()->call_descriptor()->ReturnCount());
DCHECK_EQ(
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 3a137fdee2..d9d81cfe30 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -65,7 +65,6 @@ class JSSegmenter;
class JSV8BreakIterator;
class JSWeakCollection;
class JSFinalizationRegistry;
-class JSFinalizationRegistryCleanupIterator;
class JSWeakMap;
class JSWeakRef;
class JSWeakSet;
@@ -76,8 +75,8 @@ class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
class WasmDebugInfo;
class Zone;
-#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name;
-TORQUE_INTERNAL_CLASS_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
+#define MAKE_FORWARD_DECLARATION(Name) class Name;
+TORQUE_INTERNAL_CLASS_LIST(MAKE_FORWARD_DECLARATION)
#undef MAKE_FORWARD_DECLARATION
template <typename T>
@@ -274,6 +273,7 @@ class CodeAssemblerParameterizedLabel;
V(WordShl, WordT, WordT, IntegralT) \
V(WordShr, WordT, WordT, IntegralT) \
V(WordSar, WordT, WordT, IntegralT) \
+ V(WordSarShiftOutZeros, WordT, WordT, IntegralT) \
V(Word32Or, Word32T, Word32T, Word32T) \
V(Word32And, Word32T, Word32T, Word32T) \
V(Word32Xor, Word32T, Word32T, Word32T) \
@@ -281,6 +281,7 @@ class CodeAssemblerParameterizedLabel;
V(Word32Shl, Word32T, Word32T, Word32T) \
V(Word32Shr, Word32T, Word32T, Word32T) \
V(Word32Sar, Word32T, Word32T, Word32T) \
+ V(Word32SarShiftOutZeros, Word32T, Word32T, Word32T) \
V(Word64And, Word64T, Word64T, Word64T) \
V(Word64Or, Word64T, Word64T, Word64T) \
V(Word64Xor, Word64T, Word64T, Word64T) \
@@ -321,6 +322,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \
V(BitcastWordToTagged, Object, WordT) \
V(BitcastWordToTaggedSigned, Smi, WordT) \
+ V(TruncateFloat32ToInt32, Int32T, Float32T) \
V(TruncateFloat64ToFloat32, Float32T, Float64T) \
V(TruncateFloat64ToWord32, Uint32T, Float64T) \
V(TruncateInt64ToInt32, Int32T, Int64T) \
@@ -533,6 +535,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<String> StringConstant(const char* str);
TNode<Oddball> BooleanConstant(bool value);
TNode<ExternalReference> ExternalConstant(ExternalReference address);
+ TNode<Float32T> Float32Constant(double value);
TNode<Float64T> Float64Constant(double value);
TNode<BoolT> Int32TrueConstant() {
return ReinterpretCast<BoolT>(Int32Constant(1));
@@ -572,6 +575,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Return(TNode<Int32T> value);
void Return(TNode<Uint32T> value);
void Return(TNode<WordT> value);
+ void Return(TNode<Float32T> value);
+ void Return(TNode<Float64T> value);
void Return(TNode<WordT> value1, TNode<WordT> value2);
void PopAndReturn(Node* pop, Node* value);
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 95ecb0f820..c04617c244 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -307,8 +307,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
// hence checkpoints can be cut out of the effect chain flowing into it.
effect = NodeProperties::GetEffectInput(effect);
NodeProperties::ReplaceEffectInput(node, effect);
- Reduction const reduction = ReduceReturn(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceReturn(node));
}
// TODO(ahaas): Extend the reduction below to multiple return values.
if (ValueInputCountOfReturn(node->op()) != 1) {
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 33990dfa48..b9ed54256a 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -176,7 +176,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
- DependentCode::kFieldOwnerGroup);
+ DependentCode::kFieldRepresentationGroup);
}
private:
@@ -206,7 +206,7 @@ class FieldTypeDependency final : public CompilationDependency {
void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
- DependentCode::kFieldOwnerGroup);
+ DependentCode::kFieldTypeGroup);
}
private:
@@ -234,7 +234,7 @@ class FieldConstnessDependency final : public CompilationDependency {
void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
- DependentCode::kFieldOwnerGroup);
+ DependentCode::kFieldConstGroup);
}
private:
@@ -534,8 +534,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// that triggers its deoptimization.
if (FLAG_stress_gc_during_compilation) {
broker_->isolate()->heap()->PreciseCollectAllGarbage(
- Heap::kNoGCFlags, GarbageCollectionReason::kTesting,
- kGCCallbackFlagForced);
+ Heap::kForcedGC, GarbageCollectionReason::kTesting, kNoGCCallbackFlags);
}
#ifdef DEBUG
for (auto dep : dependencies_) {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d3344b9545..20391eacce 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -564,6 +564,12 @@ void EffectControlLinearizer::Run() {
// TODO(rmcilroy) We should not depend on having rpo_order on schedule, and
// instead just do our own RPO walk here.
for (BasicBlock* block : *(schedule()->rpo_order())) {
+ if (block != schedule()->start() && block->PredecessorCount() == 0) {
+ // Block has been removed from the schedule by a preceeding unreachable
+ // node, just skip it.
+ continue;
+ }
+
gasm()->Reset(block);
BasicBlock::iterator instr = block->begin();
@@ -2370,25 +2376,21 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
Node* check = __ Uint32LessThan(index, limit);
- switch (params.mode()) {
- case CheckBoundsParameters::kDeoptOnOutOfBounds:
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
- break;
- case CheckBoundsParameters::kAbortOnOutOfBounds: {
- auto if_abort = __ MakeDeferredLabel();
- auto done = __ MakeLabel();
+ if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
+ params.check_parameters().feedback(), check, frame_state,
+ IsSafetyCheck::kCriticalSafetyCheck);
+ } else {
+ auto if_abort = __ MakeDeferredLabel();
+ auto done = __ MakeLabel();
- __ Branch(check, &done, &if_abort);
+ __ Branch(check, &done, &if_abort);
- __ Bind(&if_abort);
- __ Unreachable();
- __ Goto(&done);
+ __ Bind(&if_abort);
+ __ Unreachable();
+ __ Goto(&done);
- __ Bind(&done);
- break;
- }
+ __ Bind(&done);
}
return index;
@@ -2421,25 +2423,21 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
const CheckBoundsParameters& params = CheckBoundsParametersOf(node->op());
Node* check = __ Uint64LessThan(index, limit);
- switch (params.mode()) {
- case CheckBoundsParameters::kDeoptOnOutOfBounds:
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
- break;
- case CheckBoundsParameters::kAbortOnOutOfBounds: {
- auto if_abort = __ MakeDeferredLabel();
- auto done = __ MakeLabel();
+ if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
+ params.check_parameters().feedback(), check, frame_state,
+ IsSafetyCheck::kCriticalSafetyCheck);
+ } else {
+ auto if_abort = __ MakeDeferredLabel();
+ auto done = __ MakeLabel();
- __ Branch(check, &done, &if_abort);
+ __ Branch(check, &done, &if_abort);
- __ Bind(&if_abort);
- __ Unreachable();
- __ Goto(&done);
+ __ Bind(&if_abort);
+ __ Unreachable();
+ __ Goto(&done);
- __ Bind(&done);
- break;
- }
+ __ Bind(&done);
}
return index;
}
@@ -4552,18 +4550,20 @@ Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
}
Node* EffectControlLinearizer::ChangeSmiToIntPtr(Node* value) {
- // Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
- return __ ChangeInt32ToInt64(
- __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant()));
+ // First sign-extend the upper half, then shift away the Smi tag.
+ return __ WordSarShiftOutZeros(
+ __ ChangeInt32ToInt64(__ TruncateInt64ToInt32(value)),
+ SmiShiftBitsConstant());
}
- return __ WordSar(value, SmiShiftBitsConstant());
+ return __ WordSarShiftOutZeros(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
- return __ Word32Sar(__ TruncateInt64ToInt32(value), SmiShiftBitsConstant());
+ return __ Word32SarShiftOutZeros(__ TruncateInt64ToInt32(value),
+ SmiShiftBitsConstant());
}
if (machine()->Is64()) {
return __ TruncateInt64ToInt32(ChangeSmiToIntPtr(value));
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 246bf1e229..6057f1ce64 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -19,7 +19,8 @@ namespace compiler {
class GraphAssembler::BasicBlockUpdater {
public:
- BasicBlockUpdater(Schedule* schedule, Graph* graph, Zone* temp_zone);
+ BasicBlockUpdater(Schedule* schedule, Graph* graph,
+ CommonOperatorBuilder* common, Zone* temp_zone);
Node* AddNode(Node* node);
Node* AddNode(Node* node, BasicBlock* to);
@@ -48,6 +49,7 @@ class GraphAssembler::BasicBlockUpdater {
bool IsOriginalNode(Node* node);
void UpdateSuccessors(BasicBlock* block);
void SetBlockDeferredFromPredecessors();
+ void RemoveSuccessorsFromSchedule();
void CopyForChange();
Zone* temp_zone_;
@@ -64,6 +66,7 @@ class GraphAssembler::BasicBlockUpdater {
Schedule* schedule_;
Graph* graph_;
+ CommonOperatorBuilder* common_;
// The nodes in the original block if we are in 'changed' state. Retained to
// avoid invalidating iterators that are iterating over the original nodes of
@@ -85,14 +88,15 @@ class GraphAssembler::BasicBlockUpdater {
State state_;
};
-GraphAssembler::BasicBlockUpdater::BasicBlockUpdater(Schedule* schedule,
- Graph* graph,
- Zone* temp_zone)
+GraphAssembler::BasicBlockUpdater::BasicBlockUpdater(
+ Schedule* schedule, Graph* graph, CommonOperatorBuilder* common,
+ Zone* temp_zone)
: temp_zone_(temp_zone),
current_block_(nullptr),
original_block_(nullptr),
schedule_(schedule),
graph_(graph),
+ common_(common),
saved_nodes_(schedule->zone()),
saved_successors_(schedule->zone()),
original_control_(BasicBlock::kNone),
@@ -264,11 +268,66 @@ void GraphAssembler::BasicBlockUpdater::AddGoto(BasicBlock* from,
current_block_ = nullptr;
}
+void GraphAssembler::BasicBlockUpdater::RemoveSuccessorsFromSchedule() {
+ ZoneSet<BasicBlock*> blocks(temp_zone());
+ ZoneQueue<BasicBlock*> worklist(temp_zone());
+
+ for (SuccessorInfo succ : saved_successors_) {
+ BasicBlock* block = succ.block;
+ block->predecessors().erase(block->predecessors().begin() + succ.index);
+ blocks.insert(block);
+ worklist.push(block);
+ }
+ saved_successors_.clear();
+
+ // Walk through blocks until we get to the end node, then remove the path from
+ // end, clearing their successors / predecessors.
+ // This works because the unreachable paths form self-contained control flow
+ // that doesn't re-merge with reachable control flow (checked below) and
+ // DeadCodeElimination::ReduceEffectPhi preventing Unreachable from going into
+ // an effect-phi. We would need to extend this if we need the ability to mark
+ // control flow as unreachable later in the pipeline.
+ while (!worklist.empty()) {
+ BasicBlock* current = worklist.front();
+ worklist.pop();
+
+ for (BasicBlock* successor : current->successors()) {
+ // Remove the block from sucessors predecessors.
+ ZoneVector<BasicBlock*>& predecessors = successor->predecessors();
+ auto it = std::find(predecessors.begin(), predecessors.end(), current);
+ DCHECK_EQ(*it, current);
+ predecessors.erase(it);
+
+ if (successor == schedule_->end()) {
+ // If we have reached the end block, remove this block's control input
+ // from the end node's control inputs.
+ DCHECK_EQ(current->SuccessorCount(), 1);
+ NodeProperties::RemoveControlFromEnd(graph_, common_,
+ current->control_input());
+ } else {
+ // Otherwise, add successor to worklist if it's not already been seen.
+ if (blocks.insert(successor).second) {
+ worklist.push(successor);
+ }
+ }
+ }
+ current->ClearSuccessors();
+ }
+
+#ifdef DEBUG
+ // Ensure that the set of blocks being removed from the schedule are self
+ // contained, i.e., all predecessors have been removed from these blocks.
+ for (BasicBlock* block : blocks) {
+ CHECK_EQ(block->PredecessorCount(), 0);
+ CHECK_EQ(block->SuccessorCount(), 0);
+ }
+#endif
+}
+
void GraphAssembler::BasicBlockUpdater::AddThrow(Node* node) {
if (state_ == kUnchanged) {
CopyForChange();
}
- schedule_->AddThrow(current_block_, node);
// Clear original successors and replace the block's original control and
// control input to the throw, since this block is now connected directly to
@@ -280,10 +339,19 @@ void GraphAssembler::BasicBlockUpdater::AddThrow(Node* node) {
original_control_input_ = node;
original_control_ = BasicBlock::kThrow;
- for (SuccessorInfo succ : saved_successors_) {
- succ.block->RemovePredecessor(succ.index);
+ bool already_connected_to_end =
+ saved_successors_.size() == 1 &&
+ saved_successors_[0].block == schedule_->end();
+ if (!already_connected_to_end) {
+ // Remove all successor blocks from the schedule.
+ RemoveSuccessorsFromSchedule();
+
+ // Update current block's successor withend.
+ DCHECK(saved_successors_.empty());
+ size_t index = schedule_->end()->predecessors().size();
+ schedule_->end()->AddPredecessor(current_block_);
+ saved_successors_.push_back({schedule_->end(), index});
}
- saved_successors_.clear();
}
void GraphAssembler::BasicBlockUpdater::UpdateSuccessors(BasicBlock* block) {
@@ -341,9 +409,10 @@ GraphAssembler::GraphAssembler(MachineGraph* mcgraph, Zone* zone,
mcgraph_(mcgraph),
effect_(nullptr),
control_(nullptr),
- block_updater_(schedule != nullptr ? new BasicBlockUpdater(
- schedule, mcgraph->graph(), zone)
- : nullptr),
+ block_updater_(schedule != nullptr
+ ? new BasicBlockUpdater(schedule, mcgraph->graph(),
+ mcgraph->common(), zone)
+ : nullptr),
loop_headers_(zone),
mark_loop_exits_(mark_loop_exits) {}
@@ -590,6 +659,11 @@ TNode<Boolean> JSGraphAssembler::ObjectIsCallable(TNode<Object> value) {
graph()->NewNode(simplified()->ObjectIsCallable(), value));
}
+TNode<Boolean> JSGraphAssembler::ObjectIsUndetectable(TNode<Object> value) {
+ return AddNode<Boolean>(
+ graph()->NewNode(simplified()->ObjectIsUndetectable(), value));
+}
+
Node* JSGraphAssembler::CheckIf(Node* cond, DeoptimizeReason reason) {
return AddNode(graph()->NewNode(simplified()->CheckIf(reason), cond, effect(),
control()));
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index b9f605ae6e..f57c732912 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -82,6 +82,7 @@ class BasicBlock;
V(Word32Equal) \
V(Word32Or) \
V(Word32Sar) \
+ V(Word32SarShiftOutZeros) \
V(Word32Shl) \
V(Word32Shr) \
V(Word32Xor) \
@@ -91,7 +92,9 @@ class BasicBlock;
V(WordAnd) \
V(WordEqual) \
V(WordSar) \
- V(WordShl)
+ V(WordSarShiftOutZeros) \
+ V(WordShl) \
+ V(WordXor)
#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
V(Int32AddWithOverflow) \
@@ -579,6 +582,7 @@ void GraphAssembler::MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label,
label->effect_->ReplaceInput(1, effect());
for (size_t i = 0; i < kVarCount; i++) {
label->bindings_[i]->ReplaceInput(1, var_array[i]);
+ CHECK(!NodeProperties::IsTyped(var_array[i])); // Unsupported.
}
}
} else {
@@ -622,6 +626,13 @@ void GraphAssembler::MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label,
NodeProperties::ChangeOp(
label->bindings_[i],
common()->Phi(label->representations_[i], merged_count + 1));
+ if (NodeProperties::IsTyped(label->bindings_[i])) {
+ CHECK(NodeProperties::IsTyped(var_array[i]));
+ Type old_type = NodeProperties::GetType(label->bindings_[i]);
+ Type new_type = Type::Union(
+ old_type, NodeProperties::GetType(var_array[i]), graph()->zone());
+ NodeProperties::SetType(label->bindings_[i], new_type);
+ }
}
}
}
@@ -822,6 +833,7 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
TNode<String> StringSubstring(TNode<String> string, TNode<Number> from,
TNode<Number> to);
TNode<Boolean> ObjectIsCallable(TNode<Object> value);
+ TNode<Boolean> ObjectIsUndetectable(TNode<Object> value);
Node* CheckIf(Node* cond, DeoptimizeReason reason);
TNode<Boolean> NumberIsFloat64Hole(TNode<Number> value);
TNode<Boolean> ToBoolean(TNode<Object> value);
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 91b4b51c91..d9bc9d6c22 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -161,6 +161,11 @@ void GraphReducer::ReduceTop() {
// Check if the reduction is an in-place update of the {node}.
Node* const replacement = reduction.replacement();
if (replacement == node) {
+ for (Node* const user : node->uses()) {
+ DCHECK_IMPLIES(user == node, state_.Get(node) != State::kVisited);
+ Revisit(user);
+ }
+
// In-place update of {node}, may need to recurse on an input.
Node::Inputs node_inputs = node->inputs();
for (int i = 0; i < node_inputs.count(); ++i) {
@@ -178,12 +183,6 @@ void GraphReducer::ReduceTop() {
// Check if we have a new replacement.
if (replacement != node) {
Replace(node, replacement, max_id);
- } else {
- // Revisit all uses of the node.
- for (Node* const user : node->uses()) {
- // Don't revisit this node if it refers to itself.
- if (user != node) Revisit(user);
- }
}
}
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index bbcc67b074..3c15214d93 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -35,6 +35,10 @@ class Reduction final {
Node* replacement() const { return replacement_; }
bool Changed() const { return replacement() != nullptr; }
+ Reduction FollowedBy(Reduction next) const {
+ if (next.Changed()) return next;
+ return *this;
+ }
private:
Node* replacement_;
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index deeaa89c1e..ef0da29a05 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -319,6 +319,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
bool has_feedback_vector() const;
bool has_initial_map() const;
bool has_prototype() const;
+ bool IsOptimized() const;
bool PrototypeRequiresRuntimeLookup() const;
void Serialize();
@@ -331,6 +332,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
FeedbackVectorRef feedback_vector() const;
+ CodeRef code() const;
int InitialMapInstanceSizeWithMinSlack() const;
};
@@ -383,45 +385,41 @@ class ContextRef : public HeapObjectRef {
base::Optional<ScopeInfoRef> scope_info() const;
};
-#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- V(JSFunction, array_function) \
- V(JSFunction, boolean_function) \
- V(JSFunction, bigint_function) \
- V(JSFunction, number_function) \
- V(JSFunction, object_function) \
- V(JSFunction, promise_function) \
- V(JSFunction, promise_then) \
- V(JSFunction, regexp_function) \
- V(JSFunction, string_function) \
- V(JSFunction, symbol_function) \
- V(JSGlobalObject, global_object) \
- V(JSGlobalProxy, global_proxy_object) \
- V(JSObject, promise_prototype) \
- V(Map, block_context_map) \
- V(Map, bound_function_with_constructor_map) \
- V(Map, bound_function_without_constructor_map) \
- V(Map, catch_context_map) \
- V(Map, eval_context_map) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, function_context_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
- V(Map, js_array_holey_double_elements_map) \
- V(Map, js_array_holey_elements_map) \
- V(Map, js_array_holey_smi_elements_map) \
- V(Map, js_array_packed_double_elements_map) \
- V(Map, js_array_packed_elements_map) \
- V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(Map, with_context_map) \
- V(ScriptContextTable, script_context_table) \
- V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
- V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
- V(SharedFunctionInfo, promise_then_finally_shared_fun) \
- V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
+#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, boolean_function) \
+ V(JSFunction, bigint_function) \
+ V(JSFunction, number_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(JSFunction, promise_then) \
+ V(JSFunction, regexp_function) \
+ V(JSFunction, string_function) \
+ V(JSFunction, symbol_function) \
+ V(JSGlobalObject, global_object) \
+ V(JSGlobalProxy, global_proxy_object) \
+ V(JSObject, promise_prototype) \
+ V(Map, block_context_map) \
+ V(Map, bound_function_with_constructor_map) \
+ V(Map, bound_function_without_constructor_map) \
+ V(Map, catch_context_map) \
+ V(Map, eval_context_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, function_context_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(Map, with_context_map) \
+ V(ScriptContextTable, script_context_table)
// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
// happened when Turbofan is invoked via --always-opt.
@@ -470,14 +468,6 @@ class ScriptContextTableRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(ScriptContextTable, HeapObjectRef)
Handle<ScriptContextTable> object() const;
-
- struct LookupResult {
- ContextRef context;
- bool immutable;
- int index;
- };
-
- base::Optional<LookupResult> lookup(const NameRef& name) const;
};
class DescriptorArrayRef : public HeapObjectRef {
@@ -920,6 +910,8 @@ class CodeRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(Code, HeapObjectRef)
Handle<Code> object() const;
+
+ unsigned inlined_bytecode_size() const;
};
class InternalizedStringRef : public StringRef {
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index ad68d34d03..a6bbd563a0 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -670,6 +670,9 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
+ case IrOpcode::kWord64Rol:
+ DCHECK(machine()->Word32Rol().IsSupported());
+ V8_FALLTHROUGH;
case IrOpcode::kWord64Ror: {
DCHECK_EQ(2, node->InputCount());
Node* input = node->InputAt(0);
@@ -702,14 +705,19 @@ void Int64Lowering::LowerNode(Node* node) {
Node* inv_shift = graph()->NewNode(
common()->Int32Constant(32 - masked_shift_value));
- Node* low_node = graph()->NewNode(
- machine()->Word32Or(),
- graph()->NewNode(machine()->Word32Shr(), low_input, masked_shift),
- graph()->NewNode(machine()->Word32Shl(), high_input, inv_shift));
- Node* high_node = graph()->NewNode(
- machine()->Word32Or(), graph()->NewNode(machine()->Word32Shr(),
- high_input, masked_shift),
- graph()->NewNode(machine()->Word32Shl(), low_input, inv_shift));
+ auto* op1 = machine()->Word32Shr();
+ auto* op2 = machine()->Word32Shl();
+ bool is_ror = node->opcode() == IrOpcode::kWord64Ror;
+ if (!is_ror) std::swap(op1, op2);
+
+ Node* low_node =
+ graph()->NewNode(machine()->Word32Or(),
+ graph()->NewNode(op1, low_input, masked_shift),
+ graph()->NewNode(op2, high_input, inv_shift));
+ Node* high_node =
+ graph()->NewNode(machine()->Word32Or(),
+ graph()->NewNode(op1, high_input, masked_shift),
+ graph()->NewNode(op2, low_input, inv_shift));
ReplaceNode(node, low_node, high_node);
}
} else {
@@ -720,15 +728,19 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(common()->Int32Constant(0x1F)));
}
- // By creating this bit-mask with SAR and SHL we do not have to deal
- // with shift == 0 as a special case.
- Node* inv_mask = graph()->NewNode(
- machine()->Word32Shl(),
- graph()->NewNode(machine()->Word32Sar(),
- graph()->NewNode(common()->Int32Constant(
- std::numeric_limits<int32_t>::min())),
+ bool is_ror = node->opcode() == IrOpcode::kWord64Ror;
+ Node* inv_mask =
+ is_ror ? graph()->NewNode(
+ machine()->Word32Xor(),
+ graph()->NewNode(
+ machine()->Word32Shr(),
+ graph()->NewNode(common()->Int32Constant(-1)),
safe_shift),
- graph()->NewNode(common()->Int32Constant(1)));
+ graph()->NewNode(common()->Int32Constant(-1)))
+ : graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(common()->Int32Constant(-1)),
+ safe_shift);
Node* bit_mask =
graph()->NewNode(machine()->Word32Xor(), inv_mask,
@@ -759,21 +771,24 @@ void Int64Lowering::LowerNode(Node* node) {
lt32.Phi(MachineRepresentation::kWord32, GetReplacementHigh(input),
GetReplacementLow(input));
- Node* rotate_low =
- graph()->NewNode(machine()->Word32Ror(), input_low, safe_shift);
- Node* rotate_high =
- graph()->NewNode(machine()->Word32Ror(), input_high, safe_shift);
+ const Operator* oper =
+ is_ror ? machine()->Word32Ror() : machine()->Word32Rol().op();
+
+ Node* rotate_low = graph()->NewNode(oper, input_low, safe_shift);
+ Node* rotate_high = graph()->NewNode(oper, input_high, safe_shift);
+
+ auto* mask1 = bit_mask;
+ auto* mask2 = inv_mask;
+ if (!is_ror) std::swap(mask1, mask2);
Node* low_node = graph()->NewNode(
machine()->Word32Or(),
- graph()->NewNode(machine()->Word32And(), rotate_low, bit_mask),
- graph()->NewNode(machine()->Word32And(), rotate_high, inv_mask));
-
+ graph()->NewNode(machine()->Word32And(), rotate_low, mask1),
+ graph()->NewNode(machine()->Word32And(), rotate_high, mask2));
Node* high_node = graph()->NewNode(
machine()->Word32Or(),
- graph()->NewNode(machine()->Word32And(), rotate_high, bit_mask),
- graph()->NewNode(machine()->Word32And(), rotate_low, inv_mask));
-
+ graph()->NewNode(machine()->Word32And(), rotate_high, mask1),
+ graph()->NewNode(machine()->Word32And(), rotate_low, mask2));
ReplaceNode(node, low_node, high_node);
}
break;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 3fb41a8809..947f54c410 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -781,8 +781,9 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
public:
- PromiseBuiltinReducerAssembler(JSGraph* jsgraph, Zone* zone, Node* node)
- : JSCallReducerAssembler(jsgraph, zone, node) {
+ PromiseBuiltinReducerAssembler(JSGraph* jsgraph, Zone* zone, Node* node,
+ JSHeapBroker* broker)
+ : JSCallReducerAssembler(jsgraph, zone, node), broker_(broker) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
}
@@ -864,6 +865,8 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
effect(), control()));
});
}
+
+ JSHeapBroker* const broker_;
};
class FastApiCallReducerAssembler : public JSCallReducerAssembler {
@@ -921,9 +924,10 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
Internals::kJSObjectHeaderSize +
(Internals::kEmbedderDataSlotSize * wrapper_object_index);
- FieldAccess access(kTaggedBase, offset, MaybeHandle<Name>(),
- MaybeHandle<Map>(), Type::Any(), MachineType::Pointer(),
- WriteBarrierKind::kNoWriteBarrier);
+ FieldAccess access(
+ kTaggedBase, offset, MaybeHandle<Name>(), MaybeHandle<Map>(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer() : Type::Any(),
+ MachineType::Pointer(), WriteBarrierKind::kNoWriteBarrier);
TNode<RawPtrT> load = AddNode<RawPtrT>(graph()->NewNode(
simplified()->LoadField(access), node, effect(), control()));
return load;
@@ -2100,12 +2104,19 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor(
TrueConstant());
// Allocate closures for the resolve and reject cases.
- TNode<JSFunction> resolve = CreateClosureFromBuiltinSharedFunctionInfo(
- native_context.promise_capability_default_resolve_shared_fun(),
- promise_context);
- TNode<JSFunction> reject = CreateClosureFromBuiltinSharedFunctionInfo(
- native_context.promise_capability_default_reject_shared_fun(),
- promise_context);
+ SharedFunctionInfoRef resolve_sfi(
+ broker_, broker_->isolate()
+ ->factory()
+ ->promise_capability_default_resolve_shared_fun());
+ TNode<JSFunction> resolve =
+ CreateClosureFromBuiltinSharedFunctionInfo(resolve_sfi, promise_context);
+
+ SharedFunctionInfoRef reject_sfi(
+ broker_, broker_->isolate()
+ ->factory()
+ ->promise_capability_default_reject_shared_fun());
+ TNode<JSFunction> reject =
+ CreateClosureFromBuiltinSharedFunctionInfo(reject_sfi, promise_context);
FrameState lazy_with_catch_frame_state =
PromiseConstructorLazyWithCatchFrameState(
@@ -2419,8 +2430,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
node, javascript()->CallWithArrayLike(
p.frequency(), p.feedback(), p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
- Reduction const reduction = ReduceJSCallWithArrayLike(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCallWithArrayLike(node));
} else {
// Check whether {arguments_list} is null.
Node* check_null =
@@ -2498,8 +2508,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
}
// ES section #sec-function.prototype.bind
@@ -2685,8 +2694,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
}
// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] (V)
@@ -2918,8 +2926,7 @@ Reduction JSCallReducer::ReduceReflectApply(Node* node) {
node, javascript()->CallWithArrayLike(p.frequency(), p.feedback(),
p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
- Reduction const reduction = ReduceJSCallWithArrayLike(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCallWithArrayLike(node));
}
// ES6 section 26.1.2 Reflect.construct ( target, argumentsList [, newTarget] )
@@ -2942,8 +2949,7 @@ Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
}
NodeProperties::ChangeOp(node,
javascript()->ConstructWithArrayLike(p.frequency()));
- Reduction const reduction = ReduceJSConstructWithArrayLike(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSConstructWithArrayLike(node));
}
// ES6 section 26.1.7 Reflect.getPrototypeOf ( target )
@@ -3766,8 +3772,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
node, javascript()->Call(arity + 1, frequency, feedback,
ConvertReceiverMode::kAny, speculation_mode,
CallFeedbackRelation::kUnrelated));
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
} else {
NodeProperties::ChangeOp(
node, javascript()->Construct(arity + 2, frequency, feedback));
@@ -3821,8 +3826,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- Reduction const reduction = ReduceJSConstruct(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSConstruct(node));
}
}
@@ -3852,6 +3856,19 @@ bool ShouldUseCallICFeedback(Node* node) {
} // namespace
+bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const {
+ if (should_disallow_heap_access() && !function.serialized()) {
+ TRACE_BROKER_MISSING(broker(), "data for function " << function);
+ return false;
+ }
+
+ // TODO(neis): Add a way to check if function template info isn't serialized
+ // and add a warning in such cases. Currently we can't tell if function
+ // template info doesn't exist or wasn't serialized.
+ return function.shared().HasBuiltinId() ||
+ function.shared().function_template_info().has_value();
+}
+
Reduction JSCallReducer::ReduceJSCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -3911,8 +3928,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
}
// Don't mess with other {node}s that have a constant {target}.
@@ -3965,8 +3981,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
}
if (!ShouldUseCallICFeedback(target) ||
@@ -3978,7 +3993,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
ProcessedFeedback const& feedback =
broker()->GetFeedbackForCall(p.feedback());
if (feedback.IsInsufficient()) {
- return ReduceSoftDeoptimize(
+ return ReduceForInsufficientFeedback(
node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
}
@@ -3986,6 +4001,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (feedback_target.has_value() && feedback_target->map().is_callable()) {
Node* target_function = jsgraph()->Constant(*feedback_target);
+ if (FLAG_turboprop) {
+ if (!feedback_target->IsJSFunction()) return NoChange();
+ if (!IsBuiltinOrApiFunction(feedback_target->AsJSFunction())) {
+ return NoChange();
+ }
+ }
+
// Check that the {target} is still the {target_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
target_function);
@@ -3998,8 +4020,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
NodeProperties::ReplaceEffectInput(node, effect);
// Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell(
broker(), feedback_target.value().AsFeedbackCell().object());
@@ -4013,6 +4034,12 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
broker(), "feedback vector, not serialized: " << feedback_vector);
return NoChange();
}
+
+ if (FLAG_turboprop &&
+ !feedback_vector.shared_function_info().HasBuiltinId()) {
+ return NoChange();
+ }
+
Node* target_closure = effect =
graph()->NewNode(simplified()->CheckClosure(feedback_cell.object()),
target, effect, control);
@@ -4022,8 +4049,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
NodeProperties::ReplaceEffectInput(node, effect);
// Try to further reduce the JSCall {node}.
- Reduction const reduction = ReduceJSCall(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSCall(node));
}
}
return NoChange();
@@ -4118,11 +4144,14 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kArrayPrototypeSlice:
return ReduceArrayPrototypeSlice(node);
case Builtins::kArrayPrototypeEntries:
- return ReduceArrayIterator(node, IterationKind::kEntries);
+ return ReduceArrayIterator(node, ArrayIteratorKind::kArrayLike,
+ IterationKind::kEntries);
case Builtins::kArrayPrototypeKeys:
- return ReduceArrayIterator(node, IterationKind::kKeys);
+ return ReduceArrayIterator(node, ArrayIteratorKind::kArrayLike,
+ IterationKind::kKeys);
case Builtins::kArrayPrototypeValues:
- return ReduceArrayIterator(node, IterationKind::kValues);
+ return ReduceArrayIterator(node, ArrayIteratorKind::kArrayLike,
+ IterationKind::kValues);
case Builtins::kArrayIteratorPrototypeNext:
return ReduceArrayIteratorPrototypeNext(node);
case Builtins::kArrayIsArray:
@@ -4323,11 +4352,14 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kStringPrototypeConcat:
return ReduceStringPrototypeConcat(node);
case Builtins::kTypedArrayPrototypeEntries:
- return ReduceArrayIterator(node, IterationKind::kEntries);
+ return ReduceArrayIterator(node, ArrayIteratorKind::kTypedArray,
+ IterationKind::kEntries);
case Builtins::kTypedArrayPrototypeKeys:
- return ReduceArrayIterator(node, IterationKind::kKeys);
+ return ReduceArrayIterator(node, ArrayIteratorKind::kTypedArray,
+ IterationKind::kKeys);
case Builtins::kTypedArrayPrototypeValues:
- return ReduceArrayIterator(node, IterationKind::kValues);
+ return ReduceArrayIterator(node, ArrayIteratorKind::kTypedArray,
+ IterationKind::kValues);
case Builtins::kPromisePrototypeCatch:
return ReducePromisePrototypeCatch(node);
case Builtins::kPromisePrototypeFinally:
@@ -4417,7 +4449,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
ProcessedFeedback const& feedback =
broker()->GetFeedbackForCall(p.feedback());
if (feedback.IsInsufficient()) {
- return ReduceSoftDeoptimize(
+ return ReduceForInsufficientFeedback(
node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
}
@@ -4469,8 +4501,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
// Try to further reduce the JSConstruct {node}.
- Reduction const reduction = ReduceJSConstruct(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSConstruct(node));
}
}
@@ -4588,8 +4619,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
// Try to further reduce the JSConstruct {node}.
- Reduction const reduction = ReduceJSConstruct(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSConstruct(node));
}
// TODO(bmeurer): Also support optimizing proxies here.
@@ -4629,8 +4659,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
// Try to further reduce the JSConstruct {node}.
- Reduction const reduction = ReduceJSConstruct(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSConstruct(node));
}
return NoChange();
@@ -4831,9 +4860,15 @@ Reduction JSCallReducer::ReduceReturnReceiver(Node* node) {
return Replace(receiver);
}
-Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
- DeoptimizeReason reason) {
+Reduction JSCallReducer::ReduceForInsufficientFeedback(
+ Node* node, DeoptimizeReason reason) {
+ DCHECK(node->opcode() == IrOpcode::kJSCall ||
+ node->opcode() == IrOpcode::kJSConstruct);
if (!(flags() & kBailoutOnUninitialized)) return NoChange();
+ // TODO(mythria): May be add additional flags to specify if we need to deopt
+ // on calls / construct rather than checking for TurboProp here. We may need
+ // it for NativeContextIndependent code too.
+ if (FLAG_turboprop) return NoChange();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -5490,7 +5525,9 @@ Reduction JSCallReducer::ReduceArrayIsArray(Node* node) {
return Changed(node);
}
-Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
+Reduction JSCallReducer::ReduceArrayIterator(Node* node,
+ ArrayIteratorKind array_kind,
+ IterationKind iteration_kind) {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -5505,6 +5542,39 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
return NoChange();
}
+ // TypedArray iteration is stricter: it throws if the receiver is not a typed
+ // array. So don't bother optimizing in that case.
+ if (array_kind == ArrayIteratorKind::kTypedArray &&
+ !inference.AllOfInstanceTypesAre(InstanceType::JS_TYPED_ARRAY_TYPE)) {
+ return NoChange();
+ }
+
+ if (array_kind == ArrayIteratorKind::kTypedArray) {
+ // Make sure we deopt when the JSArrayBuffer is detached.
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
+ jsgraph()->ZeroConstant());
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached,
+ p.feedback()),
+ check, effect, control);
+ }
+ }
+
// Morph the {node} into a JSCreateArrayIterator with the given {kind}.
RelaxControls(node);
node->ReplaceInput(0, receiver);
@@ -5512,7 +5582,8 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
node->ReplaceInput(2, effect);
node->ReplaceInput(3, control);
node->TrimInputCount(4);
- NodeProperties::ChangeOp(node, javascript()->CreateArrayIterator(kind));
+ NodeProperties::ChangeOp(node,
+ javascript()->CreateArrayIterator(iteration_kind));
return Changed(node);
}
@@ -5810,19 +5881,24 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
if (node->op()->ValueInputCount() < 3) {
+ effect = graph()->NewNode(simplified()->CheckString(p.feedback()), receiver,
+ effect, control);
+
Node* value = jsgraph()->FalseConstant();
- ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
- Node* string = NodeProperties::GetValueInput(node, 1);
Node* search_string = NodeProperties::GetValueInput(node, 2);
Node* position = node->op()->ValueInputCount() >= 4
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->ZeroConstant();
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
HeapObjectMatcher m(search_string);
if (m.HasValue()) {
@@ -5830,13 +5906,14 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
if (target_ref.IsString()) {
StringRef str = target_ref.AsString();
if (str.length() == 1) {
- string = effect = graph()->NewNode(
- simplified()->CheckString(p.feedback()), string, effect, control);
+ receiver = effect = graph()->NewNode(
+ simplified()->CheckString(p.feedback()), receiver, effect, control);
+
position = effect = graph()->NewNode(
simplified()->CheckSmi(p.feedback()), position, effect, control);
Node* string_length =
- graph()->NewNode(simplified()->StringLength(), string);
+ graph()->NewNode(simplified()->StringLength(), receiver);
Node* unsigned_position = graph()->NewNode(
simplified()->NumberMax(), position, jsgraph()->ZeroConstant());
@@ -5856,7 +5933,7 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* masked_position =
graph()->NewNode(simplified()->PoisonIndex(), unsigned_position);
Node* string_first = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), string,
+ graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
masked_position, etrue, if_true);
Node* search_first = jsgraph()->Constant(str.GetFirstChar());
@@ -6003,9 +6080,10 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* input = NodeProperties::GetValueInput(node, 2);
- input = effect =
- graph()->NewNode(simplified()->CheckBounds(p.feedback()), input,
- jsgraph()->Constant(0x10FFFF + 1), effect, control);
+ input = effect = graph()->NewNode(
+ simplified()->CheckBounds(p.feedback(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ input, jsgraph()->Constant(0x10FFFF + 1), effect, control);
Node* value =
graph()->NewNode(simplified()->StringFromSingleCodePoint(), input);
@@ -6142,7 +6220,7 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) {
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
- PromiseBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ PromiseBuiltinReducerAssembler a(jsgraph(), temp_zone(), node, broker());
// We only inline when we have the executor.
if (a.ConstructArity() < 1) return NoChange();
@@ -6211,8 +6289,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
ConvertReceiverMode::kNotNullOrUndefined,
p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
- Reduction const reduction = ReducePromisePrototypeThen(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReducePromisePrototypeThen(node));
}
Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
@@ -6288,14 +6365,16 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
context, constructor, etrue, if_true);
// Allocate the closure for the reject case.
+ SharedFunctionInfoRef promise_catch_finally(
+ broker(), factory()->promise_catch_finally_shared_fun());
catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
- native_context().promise_catch_finally_shared_fun(), context, etrue,
- if_true);
+ promise_catch_finally, context, etrue, if_true);
// Allocate the closure for the fulfill case.
+ SharedFunctionInfoRef promise_then_finally(
+ broker(), factory()->promise_then_finally_shared_fun());
then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
- native_context().promise_then_finally_shared_fun(), context, etrue,
- if_true);
+ promise_then_finally, context, etrue, if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -6339,8 +6418,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
ConvertReceiverMode::kNotNullOrUndefined,
p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
- Reduction const reduction = ReducePromisePrototypeThen(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReducePromisePrototypeThen(node));
}
Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
@@ -7113,6 +7191,7 @@ uint32_t ExternalArrayElementSize(const ExternalArrayType element_type) {
Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
ExternalArrayType element_type) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kJSCall);
size_t const element_size = ExternalArrayElementSize(element_type);
CallParameters const& p = CallParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
@@ -7121,18 +7200,17 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
Node* offset = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
: jsgraph()->ZeroConstant();
- Node* value = (access == DataViewAccess::kGet)
- ? nullptr
- : (node->op()->ValueInputCount() > 3
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->ZeroConstant());
- Node* is_little_endian = (access == DataViewAccess::kGet)
- ? (node->op()->ValueInputCount() > 3
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->FalseConstant())
- : (node->op()->ValueInputCount() > 4
- ? NodeProperties::GetValueInput(node, 4)
- : jsgraph()->FalseConstant());
+ Node* value = nullptr;
+ if (access == DataViewAccess::kSet) {
+ value = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ }
+ const int endian_index = (access == DataViewAccess::kGet ? 3 : 4);
+ Node* is_little_endian =
+ (node->op()->ValueInputCount() > endian_index
+ ? NodeProperties::GetValueInput(node, endian_index)
+ : jsgraph()->FalseConstant());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 09e8159cfe..142e42789d 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -106,8 +106,9 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
const SharedFunctionInfoRef& shared);
Reduction ReduceArraySome(Node* node, const SharedFunctionInfoRef& shared);
- enum class ArrayIteratorKind { kArray, kTypedArray };
- Reduction ReduceArrayIterator(Node* node, IterationKind kind);
+ enum class ArrayIteratorKind { kArrayLike, kTypedArray };
+ Reduction ReduceArrayIterator(Node* node, ArrayIteratorKind array_kind,
+ IterationKind iteration_kind);
Reduction ReduceArrayIteratorPrototypeNext(Node* node);
Reduction ReduceFastArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
@@ -158,7 +159,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
const SharedFunctionInfoRef& shared);
Reduction ReduceTypedArrayPrototypeToStringTag(Node* node);
- Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+ Reduction ReduceForInsufficientFeedback(Node* node, DeoptimizeReason reason);
Reduction ReduceMathUnary(Node* node, const Operator* op);
Reduction ReduceMathBinary(Node* node, const Operator* op);
@@ -217,6 +218,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Node* control, Node** if_true, Node** if_false);
Node* LoadReceiverElementsKind(Node* receiver, Node** effect, Node** control);
+ bool IsBuiltinOrApiFunction(JSFunctionRef target_ref) const;
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 7a3610fff2..cedb5bc42d 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -412,7 +412,7 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
CreateArgumentsType const type = CreateArgumentsTypeOf(node->op());
switch (type) {
case CreateArgumentsType::kMappedArguments:
- ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments_Generic);
+ ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments);
break;
case CreateArgumentsType::kUnmappedArguments:
ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments);
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 2c46b34608..8ff520921f 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/js-heap-broker.h"
+#include "src/common/globals.h"
#include "src/compiler/heap-refs.h"
#ifdef ENABLE_SLOW_DCHECKS
@@ -609,6 +610,7 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; }
+ bool IsOptimized() const { return is_optimized_; }
bool PrototypeRequiresRuntimeLookup() const {
return PrototypeRequiresRuntimeLookup_;
}
@@ -622,6 +624,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* prototype() const { return prototype_; }
SharedFunctionInfoData* shared() const { return shared_; }
FeedbackVectorData* feedback_vector() const { return feedback_vector_; }
+ CodeData* code() const { return code_; }
int initial_map_instance_size_with_min_slack() const {
CHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -631,6 +634,7 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector_;
bool has_initial_map_;
bool has_prototype_;
+ bool is_optimized_;
bool PrototypeRequiresRuntimeLookup_;
bool serialized_ = false;
@@ -641,6 +645,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* prototype_ = nullptr;
SharedFunctionInfoData* shared_ = nullptr;
FeedbackVectorData* feedback_vector_ = nullptr;
+ CodeData* code_ = nullptr;
int initial_map_instance_size_with_min_slack_;
};
@@ -1260,6 +1265,7 @@ JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()),
+ is_optimized_(object->IsOptimized()),
PrototypeRequiresRuntimeLookup_(
object->PrototypeRequiresRuntimeLookup()) {}
@@ -1276,6 +1282,7 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
DCHECK_NULL(feedback_vector_);
+ DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context())->AsContext();
native_context_ =
@@ -1285,6 +1292,7 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
? broker->GetOrCreateData(function->feedback_vector())
->AsFeedbackVector()
: nullptr;
+ code_ = broker->GetOrCreateData(function->code())->AsCode();
initial_map_ = has_initial_map()
? broker->GetOrCreateData(function->initial_map())->AsMap()
: nullptr;
@@ -2023,7 +2031,13 @@ class TemplateObjectDescriptionData : public HeapObjectData {
class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object),
+ inlined_bytecode_size_(object->inlined_bytecode_size()) {}
+
+ unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
+
+ private:
+ unsigned const inlined_bytecode_size_;
};
#define DEFINE_IS_AND_AS(Name) \
@@ -2845,28 +2859,6 @@ int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
}
-// Not needed for TypedLowering.
-base::Optional<ScriptContextTableRef::LookupResult>
-ScriptContextTableRef::lookup(const NameRef& name) const {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- if (!name.IsString()) return {};
- ScriptContextTable::LookupResult lookup_result;
- auto table = object();
- if (!ScriptContextTable::Lookup(broker()->isolate(), *table,
- *name.AsString().object(), &lookup_result)) {
- return {};
- }
- Handle<Context> script_context = ScriptContextTable::GetContext(
- broker()->isolate(), table, lookup_result.context_index);
- LookupResult result{ContextRef(broker(), script_context),
- lookup_result.mode == VariableMode::kConst,
- lookup_result.slot_index};
- return result;
-}
-
OddballType MapRef::oddball_type() const {
if (instance_type() != ODDBALL_TYPE) {
return OddballType::kNone;
@@ -3370,6 +3362,7 @@ BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_offset)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
+BIMODAL_ACCESSOR_C(JSFunction, bool, IsOptimized)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
@@ -3377,6 +3370,7 @@ BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
+BIMODAL_ACCESSOR(JSFunction, Code, code)
BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
@@ -3412,6 +3406,8 @@ BIMODAL_ACCESSOR(Map, Object, GetConstructor)
BIMODAL_ACCESSOR(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
+BIMODAL_ACCESSOR_C(Code, unsigned, inlined_bytecode_size)
+
#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
BIMODAL_ACCESSOR(NativeContext, type, name)
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
@@ -4490,7 +4486,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4669,17 +4664,25 @@ bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
}
namespace {
-MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapHandles const& maps) {
- MapHandles result;
- for (Handle<Map> map : maps) {
+// Remove unupdatable and abandoned prototype maps in-place.
+void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
+ auto in = maps->begin();
+ auto out = in;
+ auto end = maps->end();
+
+ for (; in != end; ++in) {
+ Handle<Map> map = *in;
if (Map::TryUpdate(isolate, map).ToHandle(&map) &&
!map->is_abandoned_prototype_map()) {
DCHECK(!map->is_deprecated());
- result.push_back(map);
+ *out = *in;
+ ++out;
}
}
- return result;
-} // namespace
+
+ // Remove everything between the last valid map and the end of the vector.
+ maps->erase(out, end);
+}
} // namespace
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
@@ -4691,14 +4694,19 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
MapHandles maps;
nexus.ExtractMaps(&maps);
- if (!maps.empty()) {
- maps = GetRelevantReceiverMaps(isolate(), maps);
- if (maps.empty()) return *new (zone()) InsufficientFeedback(kind);
+ FilterRelevantReceiverMaps(isolate(), &maps);
+
+ // If no maps were found for a non-megamorphic access, then our maps died and
+ // we should soft-deopt.
+ if (maps.empty() && nexus.ic_state() != MEGAMORPHIC) {
+ return *new (zone()) InsufficientFeedback(kind);
}
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
if (name.has_value()) {
+ // We rely on this invariant in JSGenericLowering.
+ DCHECK_IMPLIES(maps.empty(), nexus.ic_state() == MEGAMORPHIC);
return *new (zone()) NamedAccessFeedback(
*name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind);
} else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) {
@@ -4707,8 +4715,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
} else {
// No actionable feedback.
DCHECK(maps.empty());
- // TODO(neis): Investigate if we really want to treat cleared the same as
- // megamorphic (also for global accesses).
+ DCHECK_EQ(nexus.ic_state(), MEGAMORPHIC);
// TODO(neis): Using ElementAccessFeedback here is kind of an abuse.
return *new (zone())
ElementAccessFeedback(zone(), KeyedAccessMode::FromNexus(nexus), kind);
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index f38cb51c1f..1c2bf5bc0e 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -22,8 +22,8 @@ namespace compiler {
} while (false)
namespace {
-bool IsSmall(BytecodeArrayRef const& bytecode) {
- return bytecode.length() <= FLAG_max_inlined_bytecode_size_small;
+bool IsSmall(int const size) {
+ return size <= FLAG_max_inlined_bytecode_size_small;
}
bool CanConsiderForInlining(JSHeapBroker* broker,
@@ -200,7 +200,16 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
can_inline_candidate = true;
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
candidate.total_size += bytecode.length();
- candidate_is_small = candidate_is_small && IsSmall(bytecode);
+ unsigned inlined_bytecode_size = 0;
+ if (candidate.functions[i].has_value()) {
+ JSFunctionRef function = candidate.functions[i].value();
+ if (function.IsOptimized()) {
+ inlined_bytecode_size = function.code().inlined_bytecode_size();
+ candidate.total_size += inlined_bytecode_size;
+ }
+ }
+ candidate_is_small = candidate_is_small &&
+ IsSmall(bytecode.length() + inlined_bytecode_size);
}
}
if (!can_inline_candidate) return NoChange();
@@ -775,6 +784,13 @@ void JSInliningHeuristic::PrintCandidates() {
os << " - target: " << shared;
if (candidate.bytecode[i].has_value()) {
os << ", bytecode size: " << candidate.bytecode[i]->length();
+ if (candidate.functions[i].has_value()) {
+ JSFunctionRef function = candidate.functions[i].value();
+ if (function.IsOptimized()) {
+ os << ", existing opt code's inlined bytecode size: "
+ << function.code().inlined_bytecode_size();
+ }
+ }
} else {
os << ", no bytecode";
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 3830be4445..a613dacaaa 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -33,6 +33,10 @@ class JSInliningHeuristic final : public AdvancedReducer {
// and inlines call sites that the heuristic determines to be important.
void Finalize() final;
+ int total_inlined_bytecode_size() const {
+ return total_inlined_bytecode_size_;
+ }
+
private:
// This limit currently matches what the old compiler did. We may want to
// re-evaluate and come up with a proper limit for TurboFan.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 3963edcbbd..3283ebd0ef 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -442,8 +442,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
NodeProperties::ReplaceValueInput(node, object, 1);
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
- Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSOrdinaryHasInstance(node));
}
if (access_info.IsDataConstant()) {
@@ -623,8 +622,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
NodeProperties::ReplaceValueInput(
node, jsgraph()->Constant(bound_target_function), 1);
NodeProperties::ChangeOp(node, javascript()->InstanceOf(FeedbackSource()));
- Reduction const reduction = ReduceJSInstanceOf(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSInstanceOf(node));
}
if (m.Ref(broker()).IsJSFunction()) {
@@ -650,8 +648,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
NodeProperties::ReplaceValueInput(node, object, 0);
NodeProperties::ReplaceValueInput(node, prototype_constant, 1);
NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
- Reduction const reduction = ReduceJSHasInPrototypeChain(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSHasInPrototypeChain(node));
}
return NoChange();
@@ -2455,8 +2452,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
FeedbackParameter const& p = FeedbackParameterOf(node->op());
Node* const key = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
+ Node* const flags = NodeProperties::GetValueInput(node, 3);
if (!p.feedback().IsValid()) return NoChange();
+
+ NumberMatcher mflags(flags);
+ CHECK(mflags.HasValue());
+ DataPropertyInLiteralFlags cflags(mflags.Value());
+ DCHECK(!(cflags & DataPropertyInLiteralFlag::kDontEnum));
+ if (cflags & DataPropertyInLiteralFlag::kSetFunctionName) return NoChange();
+
return ReducePropertyAccess(node, key, base::nullopt, value,
FeedbackSource(p.feedback()),
AccessMode::kStoreInLiteral);
@@ -2612,9 +2617,10 @@ JSNativeContextSpecialization::BuildElementAccess(
situation = kHandleOOB_SmiCheckDone;
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect =
- graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
- length, effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, effect, control);
situation = kBoundsCheckDone;
}
@@ -2642,7 +2648,8 @@ JSNativeContextSpecialization::BuildElementAccess(
index = etrue = graph()->NewNode(
simplified()->CheckBounds(
FeedbackSource(),
- CheckBoundsParameters::kAbortOnOutOfBounds),
+ CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, etrue, if_true);
// Perform the actual load
@@ -2712,7 +2719,8 @@ JSNativeContextSpecialization::BuildElementAccess(
index = etrue = graph()->NewNode(
simplified()->CheckBounds(
FeedbackSource(),
- CheckBoundsParameters::kAbortOnOutOfBounds),
+ CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, etrue, if_true);
// Perform the actual store.
@@ -2796,13 +2804,15 @@ JSNativeContextSpecialization::BuildElementAccess(
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
index = effect = graph()->NewNode(
- simplified()->CheckBounds(FeedbackSource()), index,
- jsgraph()->Constant(Smi::kMaxValue), effect, control);
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect =
- graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
- length, effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, effect, control);
}
// Compute the element access.
@@ -2850,10 +2860,12 @@ JSNativeContextSpecialization::BuildElementAccess(
// Do a real bounds check against {length}. This is in order to
// protect against a potential typer bug leading to the elimination of
// the NumberLessThan above.
- index = etrue = graph()->NewNode(
- simplified()->CheckBounds(
- FeedbackSource(), CheckBoundsParameters::kAbortOnOutOfBounds),
- index, length, etrue, if_true);
+ index = etrue =
+ graph()->NewNode(simplified()->CheckBounds(
+ FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
// Perform the actual load
vtrue = etrue =
@@ -2952,9 +2964,10 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
- Node* checked = etrue =
- graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
- length, etrue, if_true);
+ Node* checked = etrue = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, etrue, if_true);
Node* element = etrue =
graph()->NewNode(simplified()->LoadElement(element_access),
@@ -3041,9 +3054,10 @@ JSNativeContextSpecialization::BuildElementAccess(
jsgraph()->Constant(JSObject::kMaxGap))
: graph()->NewNode(simplified()->NumberAdd(), length,
jsgraph()->OneConstant());
- index = effect =
- graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
- limit, effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, limit, effect, control);
// Grow {elements} backing store if necessary.
GrowFastElementsMode mode =
@@ -3111,8 +3125,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
dependencies()->DependOnNoElementsProtector()) {
// Ensure that the {index} is a valid String length.
index = *effect = graph()->NewNode(
- simplified()->CheckBounds(FeedbackSource()), index,
- jsgraph()->Constant(String::kMaxLength), *effect, *control);
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, jsgraph()->Constant(String::kMaxLength), *effect, *control);
// Load the single character string from {receiver} or yield
// undefined if the {index} is not within the valid bounds.
@@ -3129,7 +3144,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
// NumberLessThan above.
Node* etrue = index = graph()->NewNode(
simplified()->CheckBounds(FeedbackSource(),
- CheckBoundsParameters::kAbortOnOutOfBounds),
+ CheckBoundsFlag::kConvertStringAndMinusZero |
+ CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, *effect, if_true);
Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* vtrue = etrue =
@@ -3147,9 +3163,10 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
vtrue, vfalse, *control);
} else {
// Ensure that {index} is less than {receiver} length.
- index = *effect =
- graph()->NewNode(simplified()->CheckBounds(FeedbackSource()), index,
- length, *effect, *control);
+ index = *effect = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource(),
+ CheckBoundsFlag::kConvertStringAndMinusZero),
+ index, length, *effect, *control);
Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 5be5f2148a..5c9a287bcc 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -568,6 +568,13 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackSlot slot, Node* effect,
if (!(flags() & kBailoutOnUninitialized)) return nullptr;
FeedbackSource source(feedback_vector(), slot);
+ // TODO(mythria): Think of adding flags to specify if we need a soft deopt for
+ // calls instead of using FLAG_turboprop here.
+ if (FLAG_turboprop &&
+ broker()->GetFeedbackSlotKind(source) == FeedbackSlotKind::kCall) {
+ return nullptr;
+ }
+
if (!broker()->FeedbackIsInsufficient(source)) return nullptr;
Node* deoptimize = jsgraph()->graph()->NewNode(
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 47f931317e..69ca3e62e7 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -9,6 +9,7 @@
#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
@@ -540,16 +541,14 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToString());
NodeProperties::SetType(
node, Type::Intersect(r.type(), Type::String(), graph()->zone()));
- Reduction const reduction = ReduceJSToString(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSToString(node));
} else if (r.RightInputIs(empty_string_type_)) {
// JSAdd(x:primitive, "") => JSToString(x)
NodeProperties::ReplaceValueInputs(node, r.left());
NodeProperties::ChangeOp(node, javascript()->ToString());
NodeProperties::SetType(
node, Type::Intersect(r.type(), Type::String(), graph()->zone()));
- Reduction const reduction = ReduceJSToString(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSToString(node));
}
}
@@ -780,9 +779,9 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
if (r.BothInputsAre(Type::Receiver())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
- if (r.OneInputIs(Type::Undetectable())) {
+ if (r.OneInputIs(Type::NullOrUndefined())) {
RelaxEffectsAndControls(node);
- node->RemoveInput(r.LeftInputIs(Type::Undetectable()) ? 0 : 1);
+ node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
node->TrimInputCount(1);
NodeProperties::ChangeOp(node, simplified()->ObjectIsUndetectable());
return Changed(node);
@@ -810,32 +809,40 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
// Known that both sides are Receiver, Null or Undefined, the
// abstract equality operation can be performed like this:
//
- // if ObjectIsUndetectable(left)
- // then ObjectIsUndetectable(right)
- // else ReferenceEqual(left, right)
- //
- Node* left = r.left();
- Node* right = r.right();
- Node* effect = r.effect();
- Node* control = r.control();
-
- Node* check = graph()->NewNode(simplified()->ObjectIsUndetectable(), left);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = graph()->NewNode(simplified()->ObjectIsUndetectable(), right);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse =
- graph()->NewNode(simplified()->ReferenceEqual(), left, right);
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- ReplaceWithValue(node, value, effect, control);
+ // if left == undefined || left == null
+ // then ObjectIsUndetectable(right)
+ // else if right == undefined || right == null
+ // then ObjectIsUndetectable(left)
+ // else ReferenceEqual(left, right)
+#define __ gasm.
+ JSGraphAssembler gasm(jsgraph(), jsgraph()->zone());
+ gasm.InitializeEffectControl(r.effect(), r.control());
+
+ auto lhs = TNode<Object>::UncheckedCast(r.left());
+ auto rhs = TNode<Object>::UncheckedCast(r.right());
+
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+ auto check_undetectable = __ MakeLabel(MachineRepresentation::kTagged);
+
+ __ GotoIf(__ ReferenceEqual(lhs, __ UndefinedConstant()),
+ &check_undetectable, rhs);
+ __ GotoIf(__ ReferenceEqual(lhs, __ NullConstant()), &check_undetectable,
+ rhs);
+ __ GotoIf(__ ReferenceEqual(rhs, __ UndefinedConstant()),
+ &check_undetectable, lhs);
+ __ GotoIf(__ ReferenceEqual(rhs, __ NullConstant()), &check_undetectable,
+ lhs);
+ __ Goto(&done, __ ReferenceEqual(lhs, rhs));
+
+ __ Bind(&check_undetectable);
+ __ Goto(&done,
+ __ ObjectIsUndetectable(check_undetectable.PhiAt<Object>(0)));
+
+ __ Bind(&done);
+ Node* value = done.PhiAt(0);
+ ReplaceWithValue(node, value, gasm.effect(), gasm.control());
return Replace(value);
+#undef __
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
@@ -1009,8 +1016,7 @@ Reduction JSTypedLowering::ReduceJSToNumeric(Node* node) {
if (input_type.Is(Type::NonBigIntPrimitive())) {
// ToNumeric(x:primitive\bigint) => ToNumber(x)
NodeProperties::ChangeOp(node, javascript()->ToNumber());
- Reduction const reduction = ReduceJSToNumber(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceJSToNumber(node));
}
return NoChange();
}
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index e97ee820f3..e16290f2a1 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -21,6 +21,10 @@ inline LinkageLocation regloc(Register reg, MachineType type) {
return LinkageLocation::ForRegister(reg.code(), type);
}
+inline LinkageLocation regloc(DoubleRegister reg, MachineType type) {
+ return LinkageLocation::ForRegister(reg.code(), type);
+}
+
} // namespace
@@ -149,7 +153,7 @@ int CallDescriptor::CalculateFixedFrameSize(Code::Kind code_kind) const {
return TypedFrameConstants::kFixedSlotCount;
case kCallWasmFunction:
case kCallWasmImportWrapper:
- return WasmCompiledFrameConstants::kFixedSlotCount;
+ return WasmFrameConstants::kFixedSlotCount;
case kCallWasmCapiFunction:
return WasmExitFrameConstants::kFixedSlotCount;
}
@@ -380,20 +384,33 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
LocationSignature::Builder locations(zone, return_count, parameter_count);
// Add returns.
- if (locations.return_count_ > 0) {
- locations.AddReturn(regloc(kReturnRegister0, descriptor.GetReturnType(0)));
- }
- if (locations.return_count_ > 1) {
- locations.AddReturn(regloc(kReturnRegister1, descriptor.GetReturnType(1)));
- }
- if (locations.return_count_ > 2) {
- locations.AddReturn(regloc(kReturnRegister2, descriptor.GetReturnType(2)));
+ static constexpr Register return_registers[] = {
+ kReturnRegister0, kReturnRegister1, kReturnRegister2};
+ size_t num_returns = 0;
+ size_t num_fp_returns = 0;
+ for (size_t i = 0; i < locations.return_count_; i++) {
+ MachineType type = descriptor.GetReturnType(static_cast<int>(i));
+ if (IsFloatingPoint(type.representation())) {
+ DCHECK_LT(num_fp_returns, 1); // Only 1 FP return is supported.
+ locations.AddReturn(regloc(kFPReturnRegister0, type));
+ num_fp_returns++;
+ } else {
+ DCHECK_LT(num_returns, arraysize(return_registers));
+ locations.AddReturn(regloc(return_registers[num_returns], type));
+ num_returns++;
+ }
}
// Add parameters in registers and on the stack.
for (int i = 0; i < js_parameter_count; i++) {
if (i < register_parameter_count) {
// The first parameters go in registers.
+ // TODO(bbudge) Add floating point registers to the InterfaceDescriptor
+ // and use them for FP types. Currently, this works because on most
+ // platforms, all FP registers are available for use. On ia32, xmm0 is
+ // not allocatable and so we must work around that with platform-specific
+ // descriptors, adjusting the GP register set to avoid eax, which has
+ // register code 0.
Register reg = descriptor.GetRegisterParameter(i);
MachineType type = descriptor.GetParameterType(i);
locations.AddParam(regloc(reg, type));
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 9527074825..1b60029169 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/machine-operator-reducer.h"
#include <cmath>
+#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
@@ -14,6 +15,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
#include "src/numbers/conversions-inl.h"
namespace v8 {
@@ -43,6 +45,14 @@ class Word32Adapter {
return x.IsWord32Shl();
}
template <typename T>
+ static bool IsWordNShr(const T& x) {
+ return x.IsWord32Shr();
+ }
+ template <typename T>
+ static bool IsWordNSar(const T& x) {
+ return x.IsWord32Sar();
+ }
+ template <typename T>
static bool IsWordNXor(const T& x) {
return x.IsWord32Xor();
}
@@ -65,6 +75,7 @@ class Word32Adapter {
Reduction TryMatchWordNRor(Node* node) { return r_->TryMatchWord32Ror(node); }
Node* IntNConstant(int32_t value) { return r_->Int32Constant(value); }
+ Node* UintNConstant(uint32_t value) { return r_->Uint32Constant(value); }
Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word32And(lhs, rhs); }
private:
@@ -94,6 +105,14 @@ class Word64Adapter {
return x.IsWord64Shl();
}
template <typename T>
+ static bool IsWordNShr(const T& x) {
+ return x.IsWord64Shr();
+ }
+ template <typename T>
+ static bool IsWordNSar(const T& x) {
+ return x.IsWord64Sar();
+ }
+ template <typename T>
static bool IsWordNXor(const T& x) {
return x.IsWord64Xor();
}
@@ -119,6 +138,7 @@ class Word64Adapter {
}
Node* IntNConstant(int64_t value) { return r_->Int64Constant(value); }
+ Node* UintNConstant(uint64_t value) { return r_->Uint64Constant(value); }
Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word64And(lhs, rhs); }
private:
@@ -246,6 +266,12 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
return quotient;
}
+Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) {
+ Node* const node = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+ Reduction const reduction = ReduceTruncateInt64ToInt32(node);
+ return reduction.Changed() ? reduction.replacement() : node;
+}
+
// Perform constant folding and strength reduction on machine operators.
Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
@@ -297,25 +323,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
- if (m.left().IsWord32And() && m.right().HasValue()) {
- Uint32BinopMatcher mand(m.left().node());
- if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
- mand.right().HasValue()) {
- Uint32BinopMatcher mshift(mand.left().node());
- // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
- if (mshift.right().HasValue()) {
- auto shift_bits = mshift.right().Value();
- auto mask = mand.right().Value();
- auto rhs = static_cast<uint32_t>(m.right().Value());
- // Make sure that we won't shift data off the end.
- if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
- shift_bits <= base::bits::CountLeadingZeros(rhs)) {
- node->ReplaceInput(
- 0, Word32And(mshift.left().node(), mask << shift_bits));
- node->ReplaceInput(1, Int32Constant(rhs << shift_bits));
- return Changed(node);
- }
- }
+ if (m.right().HasValue()) {
+ base::Optional<std::pair<Node*, uint32_t>> replacements;
+ if (m.left().IsTruncateInt64ToInt32()) {
+ replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
+ NodeProperties::GetValueInput(m.left().node(), 0),
+ static_cast<uint32_t>(m.right().Value()));
+ } else {
+ replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
+ m.left().node(), static_cast<uint32_t>(m.right().Value()));
+ }
+ if (replacements) {
+ node->ReplaceInput(0, replacements->first);
+ node->ReplaceInput(1, Uint32Constant(replacements->second));
+ return Changed(node);
}
}
break;
@@ -361,8 +382,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
node->ReplaceInput(
1, Int32Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Word32Shl());
- Reduction reduction = ReduceWord32Shl(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceWord32Shl(node));
}
break;
}
@@ -405,7 +425,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceBool(true);
}
}
- break;
+ return ReduceWord32Comparisons(node);
}
case IrOpcode::kInt32LessThanOrEqual: {
Int32BinopMatcher m(node);
@@ -413,7 +433,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceBool(m.left().Value() <= m.right().Value());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
- break;
+ return ReduceWord32Comparisons(node);
}
case IrOpcode::kUint32LessThan: {
Uint32BinopMatcher m(node);
@@ -438,7 +458,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// TODO(turbofan): else the comparison is always true.
}
}
- break;
+ return ReduceWord32Comparisons(node);
}
case IrOpcode::kUint32LessThanOrEqual: {
Uint32BinopMatcher m(node);
@@ -448,7 +468,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceBool(m.left().Value() <= m.right().Value());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
- break;
+ return ReduceWord32Comparisons(node);
}
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
@@ -804,12 +824,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
return NoChange();
}
- case IrOpcode::kTruncateInt64ToInt32: {
- Int64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
- if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
- break;
- }
+ case IrOpcode::kTruncateInt64ToInt32:
+ return ReduceTruncateInt64ToInt32(node);
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) {
@@ -859,12 +875,24 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kTrapIf:
case IrOpcode::kTrapUnless:
return ReduceConditional(node);
+ case IrOpcode::kInt64LessThan:
+ case IrOpcode::kInt64LessThanOrEqual:
+ case IrOpcode::kUint64LessThan:
+ case IrOpcode::kUint64LessThanOrEqual:
+ return ReduceWord64Comparisons(node);
default:
break;
}
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) {
+ Int64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+ if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
DCHECK_EQ(IrOpcode::kInt32Add, node->opcode());
Int32BinopMatcher m(node);
@@ -879,8 +907,7 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
node->ReplaceInput(0, m.right().node());
node->ReplaceInput(1, mleft.right().node());
NodeProperties::ChangeOp(node, machine()->Int32Sub());
- Reduction const reduction = ReduceInt32Sub(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceInt32Sub(node));
}
}
if (m.right().IsInt32Sub()) {
@@ -888,8 +915,7 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
if (mright.left().Is(0)) { // y + (0 - x) => y - x
node->ReplaceInput(1, mright.right().node());
NodeProperties::ChangeOp(node, machine()->Int32Sub());
- Reduction const reduction = ReduceInt32Sub(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceInt32Sub(node));
}
}
// (x + Int32Constant(a)) + Int32Constant(b)) => x + Int32Constant(a + b)
@@ -940,8 +966,7 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
node->ReplaceInput(
1, Int32Constant(base::NegateWithWraparound(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Int32Add());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceInt32Add(node));
}
return NoChange();
}
@@ -959,8 +984,7 @@ Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
node->ReplaceInput(
1, Int64Constant(base::NegateWithWraparound(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Int64Add());
- Reduction const reduction = ReduceInt64Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceInt64Add(node));
}
return NoChange();
}
@@ -984,8 +1008,7 @@ Reduction MachineOperatorReducer::ReduceInt64Mul(Node* node) {
node->ReplaceInput(
1, Int64Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
NodeProperties::ChangeOp(node, machine()->Word64Shl());
- Reduction reduction = ReduceWord64Shl(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceWord64Shl(node));
}
return NoChange();
}
@@ -1231,6 +1254,78 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kInt32LessThan ||
+ node->opcode() == IrOpcode::kInt32LessThanOrEqual ||
+ node->opcode() == IrOpcode::kUint32LessThan ||
+ node->opcode() == IrOpcode::kUint32LessThanOrEqual);
+ Int32BinopMatcher m(node);
+ // (x >>> K) < (y >>> K) => x < y if only zeros shifted out
+ if (m.left().op() == machine()->Word32SarShiftOutZeros() &&
+ m.right().op() == machine()->Word32SarShiftOutZeros()) {
+ Int32BinopMatcher mleft(m.left().node());
+ Int32BinopMatcher mright(m.right().node());
+ if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1, mright.left().node());
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+const Operator* MachineOperatorReducer::Map64To32Comparison(
+ const Operator* op, bool sign_extended) {
+ switch (op->opcode()) {
+ case IrOpcode::kInt64LessThan:
+ return sign_extended ? machine()->Int32LessThan()
+ : machine()->Uint32LessThan();
+ case IrOpcode::kInt64LessThanOrEqual:
+ return sign_extended ? machine()->Int32LessThanOrEqual()
+ : machine()->Uint32LessThanOrEqual();
+ case IrOpcode::kUint64LessThan:
+ return machine()->Uint32LessThan();
+ case IrOpcode::kUint64LessThanOrEqual:
+ return machine()->Uint32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ }
+}
+
+Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kInt64LessThan ||
+ node->opcode() == IrOpcode::kInt64LessThanOrEqual ||
+ node->opcode() == IrOpcode::kUint64LessThan ||
+ node->opcode() == IrOpcode::kUint64LessThanOrEqual);
+ Int64BinopMatcher m(node);
+
+ bool sign_extended =
+ m.left().IsChangeInt32ToInt64() && m.right().IsChangeInt32ToInt64();
+ if (sign_extended || (m.left().IsChangeUint32ToUint64() &&
+ m.right().IsChangeUint32ToUint64())) {
+ node->ReplaceInput(0, NodeProperties::GetValueInput(m.left().node(), 0));
+ node->ReplaceInput(1, NodeProperties::GetValueInput(m.right().node(), 0));
+ NodeProperties::ChangeOp(node,
+ Map64To32Comparison(node->op(), sign_extended));
+ return Changed(node).FollowedBy(Reduce(node));
+ }
+
+ // (x >>> K) < (y >>> K) => x < y if only zeros shifted out
+ // This is useful for Smi untagging, which results in such a shift.
+ if (m.left().op() == machine()->Word64SarShiftOutZeros() &&
+ m.right().op() == machine()->Word64SarShiftOutZeros()) {
+ Int64BinopMatcher mleft(m.left().node());
+ Int64BinopMatcher mright(m.right().node());
+ if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1, mright.left().node());
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
(node->opcode() == IrOpcode::kWord32Shr) ||
@@ -1259,17 +1354,44 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
base::ShlWithWraparound(m.left().Value(), m.right().Value()));
}
if (m.right().IsInRange(1, 31)) {
- // (x >>> K) << K => x & ~(2^K - 1)
- // (x >> K) << K => x & ~(2^K - 1)
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
+
+ // If x >> K only shifted out zeros:
+ // (x >> K) << L => x if K == L
+ // (x >> K) << L => x >> (K-L) if K > L
+ // (x >> K) << L => x << (L-K) if K < L
+ // Since this is used for Smi untagging, we currently only need it for
+ // signed shifts.
+ if (mleft.op() == machine()->Word32SarShiftOutZeros() &&
+ mleft.right().IsInRange(1, 31)) {
+ Node* x = mleft.left().node();
+ int k = mleft.right().Value();
+ int l = m.right().Value();
+ if (k == l) {
+ return Replace(x);
+ } else if (k > l) {
+ node->ReplaceInput(0, x);
+ node->ReplaceInput(1, Uint32Constant(k - l));
+ NodeProperties::ChangeOp(node, machine()->Word32Sar());
+ return Changed(node).FollowedBy(ReduceWord32Sar(node));
+ } else {
+ DCHECK(k < l);
+ node->ReplaceInput(0, x);
+ node->ReplaceInput(1, Uint32Constant(l - k));
+ return Changed(node);
+ }
+ }
+
+ // (x >>> K) << K => x & ~(2^K - 1)
+ // (x >> K) << K => x & ~(2^K - 1)
if (mleft.right().Is(m.right().Value())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
- Uint32Constant(~((1U << m.right().Value()) - 1U)));
+ Uint32Constant(std::numeric_limits<uint32_t>::max()
+ << m.right().Value()));
NodeProperties::ChangeOp(node, machine()->Word32And());
- Reduction reduction = ReduceWord32And(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceWord32And(node));
}
}
}
@@ -1284,6 +1406,46 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
return ReplaceInt64(
base::ShlWithWraparound(m.left().Value(), m.right().Value()));
}
+ if (m.right().IsInRange(1, 63) &&
+ (m.left().IsWord64Sar() || m.left().IsWord64Shr())) {
+ Int64BinopMatcher mleft(m.left().node());
+
+ // If x >> K only shifted out zeros:
+ // (x >> K) << L => x if K == L
+ // (x >> K) << L => x >> (K-L) if K > L
+ // (x >> K) << L => x << (L-K) if K < L
+ // Since this is used for Smi untagging, we currently only need it for
+ // signed shifts.
+ if (mleft.op() == machine()->Word64SarShiftOutZeros() &&
+ mleft.right().IsInRange(1, 63)) {
+ Node* x = mleft.left().node();
+ int64_t k = mleft.right().Value();
+ int64_t l = m.right().Value();
+ if (k == l) {
+ return Replace(x);
+ } else if (k > l) {
+ node->ReplaceInput(0, x);
+ node->ReplaceInput(1, Uint64Constant(k - l));
+ NodeProperties::ChangeOp(node, machine()->Word64Sar());
+ return Changed(node).FollowedBy(ReduceWord64Sar(node));
+ } else {
+ DCHECK(k < l);
+ node->ReplaceInput(0, x);
+ node->ReplaceInput(1, Uint64Constant(l - k));
+ return Changed(node);
+ }
+ }
+
+ // (x >>> K) << K => x & ~(2^K - 1)
+ // (x >> K) << K => x & ~(2^K - 1)
+ if (mleft.right().Is(m.right().Value())) {
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1, Uint64Constant(std::numeric_limits<uint64_t>::max()
+ << m.right().Value()));
+ NodeProperties::ChangeOp(node, machine()->Word64And());
+ return Changed(node).FollowedBy(ReduceWord64And(node));
+ }
+ }
return NoChange();
}
@@ -1331,8 +1493,7 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, mleft.left().node());
NodeProperties::ChangeOp(node, machine()->Int32Sub());
- Reduction const reduction = ReduceInt32Sub(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(ReduceInt32Sub(node));
}
} else if (mleft.left().IsLoad()) {
LoadRepresentation const rep =
@@ -1382,8 +1543,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(
1, a.IntNConstant(m.right().Value() & mleft.right().Value()));
- Reduction const reduction = a.ReduceWordNAnd(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(a.ReduceWordNAnd(node));
}
}
if (m.right().IsNegativePowerOf2()) {
@@ -1406,8 +1566,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
a.WordNAnd(mleft.left().node(), m.right().node()));
node->ReplaceInput(1, mleft.right().node());
NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
- Reduction const reduction = a.ReduceIntNAdd(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
}
if (A::IsIntNMul(mleft.left())) {
typename A::IntNBinopMatcher mleftleft(mleft.left().node());
@@ -1417,8 +1576,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
0, a.WordNAnd(mleft.right().node(), m.right().node()));
node->ReplaceInput(1, mleftleft.node());
NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
- Reduction const reduction = a.ReduceIntNAdd(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
}
}
if (A::IsIntNMul(mleft.right())) {
@@ -1429,8 +1587,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
a.WordNAnd(mleft.left().node(), m.right().node()));
node->ReplaceInput(1, mleftright.node());
NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
- Reduction const reduction = a.ReduceIntNAdd(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
}
}
if (A::IsWordNShl(mleft.left())) {
@@ -1441,8 +1598,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
0, a.WordNAnd(mleft.right().node(), m.right().node()));
node->ReplaceInput(1, mleftleft.node());
NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
- Reduction const reduction = a.ReduceIntNAdd(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
}
}
if (A::IsWordNShl(mleft.right())) {
@@ -1453,8 +1609,7 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
a.WordNAnd(mleft.left().node(), m.right().node()));
node->ReplaceInput(1, mleftright.node());
NodeProperties::ChangeOp(node, a.IntNAdd(machine()));
- Reduction const reduction = a.ReduceIntNAdd(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(a.ReduceIntNAdd(node));
}
}
} else if (A::IsIntNMul(m.left())) {
@@ -1543,6 +1698,20 @@ Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) {
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
+ // (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1.
+ // This case can be constructed by UpdateWord and UpdateWord32 in CSA.
+ if (m.right().HasValue()) {
+ if (A::IsWordNAnd(m.left())) {
+ typename A::IntNBinopMatcher mand(m.left().node());
+ if (mand.right().HasValue()) {
+ if ((m.right().Value() | mand.right().Value()) == -1) {
+ node->ReplaceInput(0, mand.left().node());
+ return Changed(node);
+ }
+ }
+ }
+ }
+
return a.TryMatchWordNRor(node);
}
@@ -1696,25 +1865,64 @@ Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
// Reductions involving control flow happen elsewhere. Non-zero inputs are
// considered true in all conditional ops.
NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
- if (condition.IsWord32And()) {
- Uint32BinopMatcher mand(condition.node());
- if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
+ if (condition.IsTruncateInt64ToInt32()) {
+ if (auto replacement =
+ ReduceConditionalN<Word64Adapter>(condition.node())) {
+ NodeProperties::ReplaceValueInput(node, *replacement, 0);
+ return Changed(node);
+ }
+ } else if (auto replacement = ReduceConditionalN<Word32Adapter>(node)) {
+ NodeProperties::ReplaceValueInput(node, *replacement, 0);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+template <typename WordNAdapter>
+base::Optional<Node*> MachineOperatorReducer::ReduceConditionalN(Node* node) {
+ NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
+ // Branch conditions are 32-bit comparisons against zero, so they are the
+ // opposite of a 32-bit `x == 0` node. To avoid repetition, we can reuse logic
+ // for Word32Equal: if `x == 0` can reduce to `y == 0`, then branch(x) can
+ // reduce to branch(y).
+ auto replacements =
+ ReduceWord32EqualForConstantRhs<WordNAdapter>(condition.node(), 0);
+ if (replacements && replacements->second == 0) return replacements->first;
+ return {};
+}
+
+template <typename WordNAdapter>
+base::Optional<std::pair<Node*, uint32_t>>
+MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs,
+ uint32_t rhs) {
+ if (WordNAdapter::IsWordNAnd(NodeMatcher(lhs))) {
+ typename WordNAdapter::UintNBinopMatcher mand(lhs);
+ if ((WordNAdapter::IsWordNShr(mand.left()) ||
+ WordNAdapter::IsWordNSar(mand.left())) &&
mand.right().HasValue()) {
- Uint32BinopMatcher mshift(mand.left().node());
- // Branch condition (x >> K1) & K2 => x & (K2 << K1)
+ typename WordNAdapter::UintNBinopMatcher mshift(mand.left().node());
+ // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
if (mshift.right().HasValue()) {
auto shift_bits = mshift.right().Value();
auto mask = mand.right().Value();
- // Make sure that we won't shift data off the end.
- if (shift_bits <= base::bits::CountLeadingZeros(mask)) {
- NodeProperties::ReplaceValueInput(
- node, Word32And(mshift.left().node(), mask << shift_bits), 0);
- return Changed(node);
+ // Make sure that we won't shift data off the end, and that all of the
+ // data ends up in the lower 32 bits for 64-bit mode.
+ if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
+ shift_bits <= base::bits::CountLeadingZeros(rhs) &&
+ mask << shift_bits <= std::numeric_limits<uint32_t>::max()) {
+ Node* new_input = mshift.left().node();
+ uint32_t new_mask = static_cast<uint32_t>(mask << shift_bits);
+ uint32_t new_rhs = rhs << shift_bits;
+ if (WordNAdapter::WORD_SIZE == 64) {
+ // We can truncate before performing the And.
+ new_input = TruncateInt64ToInt32(new_input);
+ }
+ return std::make_pair(Word32And(new_input, new_mask), new_rhs);
}
}
}
}
- return NoChange();
+ return {};
}
CommonOperatorBuilder* MachineOperatorReducer::common() const {
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 53c5d6fa68..7970daefce 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -62,6 +62,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Node* Int32Mul(Node* lhs, Node* rhs);
Node* Int32Div(Node* dividend, int32_t divisor);
Node* Uint32Div(Node* dividend, uint32_t divisor);
+ Node* TruncateInt64ToInt32(Node* value);
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
Reduction ReplaceFloat32(volatile float value) {
@@ -91,6 +92,9 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceUint32Mod(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
+ const Operator* Map64To32Comparison(const Operator* op, bool sign_extended);
+ Reduction ReduceWord32Comparisons(Node* node);
+ Reduction ReduceWord64Comparisons(Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
Reduction ReduceWord64Shl(Node* node);
@@ -109,6 +113,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
Reduction ReduceFloat64RoundDown(Node* node);
+ Reduction ReduceTruncateInt64ToInt32(Node* node);
Reduction ReduceConditional(Node* node);
Graph* graph() const;
@@ -125,6 +130,18 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
template <typename WordNAdapter>
Reduction ReduceWordNXor(Node* node);
+ // Helper for ReduceConditional. Does not perform the actual reduction; just
+ // returns a new Node that could be used as the input to the condition.
+ template <typename WordNAdapter>
+ base::Optional<Node*> ReduceConditionalN(Node* node);
+
+ // Helper for finding a reduced equality condition. Does not perform the
+ // actual reduction; just returns a new pair that could be compared for the
+ // same outcome.
+ template <typename WordNAdapter>
+ base::Optional<std::pair<Node*, uint32_t>> ReduceWord32EqualForConstantRhs(
+ Node* lhs, uint32_t rhs);
+
MachineGraph* mcgraph_;
bool allow_signalling_nan_;
};
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 898182db31..9a985eb5fa 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator.h"
+#include <type_traits>
-#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
@@ -106,7 +106,6 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
return OpParameter<LoadRepresentation>(op);
}
-
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
@@ -150,6 +149,22 @@ MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
+size_t hash_value(ShiftKind kind) { return static_cast<size_t>(kind); }
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, ShiftKind kind) {
+ switch (kind) {
+ case ShiftKind::kNormal:
+ return os << "Normal";
+ case ShiftKind::kShiftOutZeros:
+ return os << "ShiftOutZeros";
+ }
+}
+
+ShiftKind ShiftKindOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32Sar == op->opcode() ||
+ IrOpcode::kWord64Sar == op->opcode());
+ return OpParameter<ShiftKind>(op);
+}
+
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
#define PURE_BINARY_OP_LIST_32(V) \
@@ -158,7 +173,6 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -183,7 +197,6 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -324,6 +337,8 @@ MachineType AtomicOpType(Operator const* op) {
V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Qfma, Operator::kNoProperties, 3, 0, 1) \
V(F64x2Qfms, Operator::kNoProperties, 3, 0, 1) \
+ V(F64x2Pmin, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Pmax, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -345,6 +360,8 @@ MachineType AtomicOpType(Operator const* op) {
V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Qfma, Operator::kNoProperties, 3, 0, 1) \
V(F32x4Qfms, Operator::kNoProperties, 3, 0, 1) \
+ V(F32x4Pmin, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Pmax, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
@@ -474,6 +491,8 @@ MachineType AtomicOpType(Operator const* op) {
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32Rol, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Rol, Operator::kNoProperties, 2, 0, 1) \
V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 2) \
@@ -568,14 +587,6 @@ MachineType AtomicOpType(Operator const* op) {
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
-#define ATOMIC_PAIR_BINOP_LIST(V) \
- V(Add) \
- V(Sub) \
- V(And) \
- V(Or) \
- V(Xor) \
- V(Exchange)
-
#define SIMD_LANE_OP_LIST(V) \
V(F64x2, 2) \
V(F32x4, 4) \
@@ -587,443 +598,372 @@ MachineType AtomicOpType(Operator const* op) {
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
+template <IrOpcode::Value op, int value_input_count, int effect_input_count,
+ int control_input_count, int value_output_count,
+ int effect_output_count, int control_output_count>
+struct CachedOperator : public Operator {
+ CachedOperator(Operator::Properties properties, const char* mnemonic)
+ : Operator(op, properties, mnemonic, value_input_count,
+ effect_input_count, control_input_count, value_output_count,
+ effect_output_count, control_output_count) {}
+};
+
+template <IrOpcode::Value op, int value_input_count, int control_input_count,
+ int value_output_count>
+struct CachedPureOperator : public Operator {
+ CachedPureOperator(Operator::Properties properties, const char* mnemonic)
+ : Operator(op, Operator::kPure | properties, mnemonic, value_input_count,
+ 0, control_input_count, value_output_count, 0, 0) {}
+};
+
+template <class Op>
+const Operator* GetCachedOperator() {
+ STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
+ static const Op op;
+ return &op;
+}
+
+template <class Op>
+const Operator* GetCachedOperator(Operator::Properties properties,
+ const char* mnemonic) {
+#ifdef DEBUG
+ static Operator::Properties const initial_properties = properties;
+ static const char* const initial_mnemonic = mnemonic;
+ DCHECK_EQ(properties, initial_properties);
+ DCHECK_EQ(mnemonic, initial_mnemonic);
+#endif
+ STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
+ static const Op op(properties, mnemonic);
+ return &op;
+}
+
struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
explicit StackSlotOperator(int size, int alignment)
- : Operator1<StackSlotRepresentation>(
- IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
- "StackSlot", 0, 0, 0, 1, 0, 0,
- StackSlotRepresentation(size, alignment)) {}
+ : Operator1(IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
+ "StackSlot", 0, 0, 0, 1, 0, 0,
+ StackSlotRepresentation(size, alignment)) {}
+};
+
+template <int size, int alignment>
+struct CachedStackSlotOperator : StackSlotOperator {
+ CachedStackSlotOperator() : StackSlotOperator(size, alignment) {}
};
-struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
- struct Name##Operator final : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
- value_input_count, 0, control_input_count, output_count, 0, \
- 0) {} \
- }; \
- Name##Operator k##Name;
- MACHINE_PURE_OP_LIST(PURE)
- PURE_OPTIONAL_OP_LIST(PURE)
+ const OptionalOperator MachineOperatorBuilder::Name() { \
+ return OptionalOperator( \
+ flags_ & k##Name, \
+ GetCachedOperator< \
+ CachedPureOperator<IrOpcode::k##Name, value_input_count, \
+ control_input_count, output_count>>(properties, \
+ #Name)); \
+ }
+PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
-#define OVERFLOW_OP(Name, properties) \
- struct Name##Operator final : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, \
- Operator::kEliminatable | Operator::kNoRead | properties, \
- #Name, 2, 0, 1, 2, 0, 0) {} \
- }; \
- Name##Operator k##Name;
- OVERFLOW_OP_LIST(OVERFLOW_OP)
+#define OVERFLOW_OP(Name, properties) \
+ const Operator* MachineOperatorBuilder::Name() { \
+ return GetCachedOperator< \
+ CachedOperator<IrOpcode::k##Name, 2, 0, 1, 2, 0, 0>>( \
+ Operator::kEliminatable | Operator::kNoRead | properties, #Name); \
+ }
+OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
-#define LOAD(Type) \
- struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>(IrOpcode::kLoad, \
- Operator::kEliminatable, "Load", 2, 1, \
- 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct PoisonedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- PoisonedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
- "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct UnalignedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- UnalignedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kUnalignedLoad, Operator::kEliminatable, \
- "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct ProtectedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- ProtectedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kProtectedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
- 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Load##Type##Operator kLoad##Type; \
- PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
- UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- ProtectedLoad##Type##Operator kProtectedLoad##Type;
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
+template <ShiftKind kind>
+struct Word32SarOperator : Operator1<ShiftKind> {
+ Word32SarOperator()
+ : Operator1(IrOpcode::kWord32Sar, Operator::kPure, "Word32Sar", 2, 0, 0,
+ 1, 0, 0, kind) {}
+};
-#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
- struct KIND##LoadTransform##TYPE##Operator final \
- : public Operator1<LoadTransformParameters> { \
- KIND##LoadTransform##TYPE##Operator() \
- : Operator1<LoadTransformParameters>( \
- IrOpcode::kLoadTransform, Operator::kEliminatable, \
- #KIND "LoadTransform", 2, 1, 1, 1, 1, 0, \
- LoadTransformParameters{LoadKind::k##KIND, \
- LoadTransformation::k##TYPE}) {} \
- }; \
- KIND##LoadTransform##TYPE##Operator k##KIND##LoadTransform##TYPE;
+const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
+ switch (kind) {
+ case ShiftKind::kNormal:
+ return GetCachedOperator<Word32SarOperator<ShiftKind::kNormal>>();
+ case ShiftKind::kShiftOutZeros:
+ return GetCachedOperator<Word32SarOperator<ShiftKind::kShiftOutZeros>>();
+ }
+}
-#define LOAD_TRANSFORM(TYPE) \
- LOAD_TRANSFORM_KIND(TYPE, Normal) \
- LOAD_TRANSFORM_KIND(TYPE, Unaligned) \
- LOAD_TRANSFORM_KIND(TYPE, Protected)
+template <ShiftKind kind>
+struct Word64SarOperator : Operator1<ShiftKind> {
+ Word64SarOperator()
+ : Operator1(IrOpcode::kWord64Sar, Operator::kPure, "Word64Sar", 2, 0, 0,
+ 1, 0, 0, kind) {}
+};
- LOAD_TRANSFORM_LIST(LOAD_TRANSFORM)
-#undef LOAD_TRANSFORM
-#undef LOAD_TRANSFORM_KIND
+const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
+ switch (kind) {
+ case ShiftKind::kNormal:
+ return GetCachedOperator<Word64SarOperator<ShiftKind::kNormal>>();
+ case ShiftKind::kShiftOutZeros:
+ return GetCachedOperator<Word64SarOperator<ShiftKind::kShiftOutZeros>>();
+ }
+}
-#define STACKSLOT(Size, Alignment) \
- struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
- : public StackSlotOperator { \
- StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
- : StackSlotOperator(Size, Alignment) {} \
- }; \
- StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
- kStackSlotOfSize##Size##OfAlignment##Alignment;
- STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
-#undef STACKSLOT
-
-#define STORE(Type) \
- struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
- explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
- : Operator1<StoreRepresentation>( \
- IrOpcode::kStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "Store", 3, 1, 1, 0, 1, 0, \
- StoreRepresentation(MachineRepresentation::Type, \
- write_barrier_kind)) {} \
- }; \
- struct Store##Type##NoWriteBarrier##Operator final \
- : public Store##Type##Operator { \
- Store##Type##NoWriteBarrier##Operator() \
- : Store##Type##Operator(kNoWriteBarrier) {} \
- }; \
- struct Store##Type##AssertNoWriteBarrier##Operator final \
- : public Store##Type##Operator { \
- Store##Type##AssertNoWriteBarrier##Operator() \
- : Store##Type##Operator(kAssertNoWriteBarrier) {} \
- }; \
- struct Store##Type##MapWriteBarrier##Operator final \
- : public Store##Type##Operator { \
- Store##Type##MapWriteBarrier##Operator() \
- : Store##Type##Operator(kMapWriteBarrier) {} \
- }; \
- struct Store##Type##PointerWriteBarrier##Operator final \
- : public Store##Type##Operator { \
- Store##Type##PointerWriteBarrier##Operator() \
- : Store##Type##Operator(kPointerWriteBarrier) {} \
- }; \
- struct Store##Type##EphemeronKeyWriteBarrier##Operator final \
- : public Store##Type##Operator { \
- Store##Type##EphemeronKeyWriteBarrier##Operator() \
- : Store##Type##Operator(kEphemeronKeyWriteBarrier) {} \
- }; \
- struct Store##Type##FullWriteBarrier##Operator final \
- : public Store##Type##Operator { \
- Store##Type##FullWriteBarrier##Operator() \
- : Store##Type##Operator(kFullWriteBarrier) {} \
- }; \
- struct UnalignedStore##Type##Operator final \
- : public Operator1<UnalignedStoreRepresentation> { \
- UnalignedStore##Type##Operator() \
- : Operator1<UnalignedStoreRepresentation>( \
- IrOpcode::kUnalignedStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "UnalignedStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
- }; \
- struct ProtectedStore##Type##Operator \
- : public Operator1<StoreRepresentation> { \
- explicit ProtectedStore##Type##Operator() \
- : Operator1<StoreRepresentation>( \
- IrOpcode::kProtectedStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "Store", 3, 1, 1, 0, 1, 0, \
- StoreRepresentation(MachineRepresentation::Type, \
- kNoWriteBarrier)) {} \
- }; \
- Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
- Store##Type##AssertNoWriteBarrier##Operator \
- kStore##Type##AssertNoWriteBarrier; \
- Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
- Store##Type##PointerWriteBarrier##Operator \
- kStore##Type##PointerWriteBarrier; \
- Store##Type##EphemeronKeyWriteBarrier##Operator \
- kStore##Type##EphemeronKeyWriteBarrier; \
- Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
- UnalignedStore##Type##Operator kUnalignedStore##Type; \
- ProtectedStore##Type##Operator kProtectedStore##Type;
- MACHINE_REPRESENTATION_LIST(STORE)
-#undef STORE
+template <MachineRepresentation rep, MachineSemantic sem>
+struct LoadOperator : public Operator1<LoadRepresentation> {
+ LoadOperator()
+ : Operator1(IrOpcode::kLoad, Operator::kEliminatable, "Load", 2, 1, 1, 1,
+ 1, 0, LoadRepresentation(rep, sem)) {}
+};
-#define ATOMIC_LOAD(Type) \
- struct Word32AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word32AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
- ATOMIC_TYPE_LIST(ATOMIC_LOAD)
-#undef ATOMIC_LOAD
-
-#define ATOMIC_LOAD(Type) \
- struct Word64AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word64AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
- ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
-#undef ATOMIC_LOAD
-
-#define ATOMIC_STORE(Type) \
- struct Word32AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word32AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kWord32AtomicStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
- }; \
- Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
- ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
-#undef ATOMIC_STORE
-
-#define ATOMIC_STORE(Type) \
- struct Word64AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word64AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kWord64AtomicStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
- }; \
- Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
- ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
-#undef ATOMIC_STORE
-
-#define ATOMIC_OP(op, type) \
- struct op##type##Operator : public Operator1<MachineType> { \
- op##type##Operator() \
- : Operator1<MachineType>(IrOpcode::k##op, \
- Operator::kNoDeopt | Operator::kNoThrow, #op, \
- 3, 1, 1, 1, 1, 0, MachineType::type()) {} \
- }; \
- op##type##Operator k##op##type;
-#define ATOMIC_OP_LIST(type) \
- ATOMIC_OP(Word32AtomicAdd, type) \
- ATOMIC_OP(Word32AtomicSub, type) \
- ATOMIC_OP(Word32AtomicAnd, type) \
- ATOMIC_OP(Word32AtomicOr, type) \
- ATOMIC_OP(Word32AtomicXor, type) \
- ATOMIC_OP(Word32AtomicExchange, type)
- ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
-#undef ATOMIC_OP_LIST
-#define ATOMIC64_OP_LIST(type) \
- ATOMIC_OP(Word64AtomicAdd, type) \
- ATOMIC_OP(Word64AtomicSub, type) \
- ATOMIC_OP(Word64AtomicAnd, type) \
- ATOMIC_OP(Word64AtomicOr, type) \
- ATOMIC_OP(Word64AtomicXor, type) \
- ATOMIC_OP(Word64AtomicExchange, type)
- ATOMIC_U64_TYPE_LIST(ATOMIC64_OP_LIST)
-#undef ATOMIC64_OP_LIST
-#undef ATOMIC_OP
+template <MachineRepresentation rep, MachineSemantic sem>
+struct PoisonedLoadOperator : public Operator1<LoadRepresentation> {
+ PoisonedLoadOperator()
+ : Operator1(IrOpcode::kPoisonedLoad, Operator::kEliminatable,
+ "PoisonedLoad", 2, 1, 1, 1, 1, 0,
+ LoadRepresentation(rep, sem)) {}
+};
-#define ATOMIC_COMPARE_EXCHANGE(Type) \
- struct Word32AtomicCompareExchange##Type##Operator \
- : public Operator1<MachineType> { \
- Word32AtomicCompareExchange##Type##Operator() \
- : Operator1<MachineType>(IrOpcode::kWord32AtomicCompareExchange, \
- Operator::kNoDeopt | Operator::kNoThrow, \
- "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, \
- 0, MachineType::Type()) {} \
- }; \
- Word32AtomicCompareExchange##Type##Operator \
- kWord32AtomicCompareExchange##Type;
- ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
-#undef ATOMIC_COMPARE_EXCHANGE
-
-#define ATOMIC_COMPARE_EXCHANGE(Type) \
- struct Word64AtomicCompareExchange##Type##Operator \
- : public Operator1<MachineType> { \
- Word64AtomicCompareExchange##Type##Operator() \
- : Operator1<MachineType>(IrOpcode::kWord64AtomicCompareExchange, \
- Operator::kNoDeopt | Operator::kNoThrow, \
- "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, \
- 0, MachineType::Type()) {} \
- }; \
- Word64AtomicCompareExchange##Type##Operator \
- kWord64AtomicCompareExchange##Type;
- ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
-#undef ATOMIC_COMPARE_EXCHANGE
-
- struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
- };
- Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
+template <MachineRepresentation rep, MachineSemantic sem>
+struct UnalignedLoadOperator : public Operator1<LoadRepresentation> {
+ UnalignedLoadOperator()
+ : Operator1(IrOpcode::kUnalignedLoad, Operator::kEliminatable,
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0,
+ LoadRepresentation(rep, sem)) {}
+};
+
+template <MachineRepresentation rep, MachineSemantic sem>
+struct ProtectedLoadOperator : public Operator1<LoadRepresentation> {
+ ProtectedLoadOperator()
+ : Operator1(IrOpcode::kProtectedLoad,
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2,
+ 1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {}
+};
+
+template <LoadKind kind, LoadTransformation type>
+struct LoadTransformOperator : public Operator1<LoadTransformParameters> {
+ LoadTransformOperator()
+ : Operator1(IrOpcode::kLoadTransform, Operator::kEliminatable,
+ "LoadTransform", 2, 1, 1, 1, 1, 0,
+ LoadTransformParameters{kind, type}) {}
+};
+
+template <MachineRepresentation rep, WriteBarrierKind write_barrier_kind>
+struct StoreOperator : public Operator1<StoreRepresentation> {
+ StoreOperator()
+ : Operator1(IrOpcode::kStore,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "Store", 3, 1, 1, 0, 1, 0,
+ StoreRepresentation(rep, write_barrier_kind)) {}
+};
+
+template <MachineRepresentation rep>
+struct UnalignedStoreOperator : public Operator1<UnalignedStoreRepresentation> {
+ UnalignedStoreOperator()
+ : Operator1(IrOpcode::kUnalignedStore,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "UnalignedStore", 3, 1, 1, 0, 1, 0, rep) {}
+};
+
+template <MachineRepresentation rep>
+struct ProtectedStoreOperator : public Operator1<StoreRepresentation> {
+ ProtectedStoreOperator()
+ : Operator1(IrOpcode::kProtectedStore,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "Store", 3, 1, 1, 0, 1, 0,
+ StoreRepresentation(rep, kNoWriteBarrier)) {}
+};
- struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+template <MachineRepresentation rep, MachineSemantic sem>
+struct Word32AtomicLoadOperator : public Operator1<LoadRepresentation> {
+ Word32AtomicLoadOperator()
+ : Operator1(IrOpcode::kWord32AtomicLoad, Operator::kEliminatable,
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType(rep, sem)) {
+ }
+};
+
+template <MachineRepresentation rep, MachineSemantic sem>
+struct Word64AtomicLoadOperator : public Operator1<LoadRepresentation> {
+ Word64AtomicLoadOperator()
+ : Operator1(IrOpcode::kWord64AtomicLoad, Operator::kEliminatable,
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType(rep, sem)) {
+ }
+};
+
+template <MachineRepresentation rep>
+struct Word32AtomicStoreOperator : public Operator1<MachineRepresentation> {
+ Word32AtomicStoreOperator()
+ : Operator1(IrOpcode::kWord32AtomicStore,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, rep) {}
+};
+
+template <MachineRepresentation rep>
+struct Word64AtomicStoreOperator : public Operator1<MachineRepresentation> {
+ Word64AtomicStoreOperator()
+ : Operator1(IrOpcode::kWord64AtomicStore,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, rep) {}
+};
+
+#define ATOMIC_OP(op) \
+ template <MachineRepresentation rep, MachineSemantic sem> \
+ struct op##Operator : public Operator1<MachineType> { \
+ op##Operator() \
+ : Operator1(IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, \
+ #op, 3, 1, 1, 1, 1, 0, MachineType(rep, sem)) {} \
};
- Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+ATOMIC_OP(Word32AtomicAdd)
+ATOMIC_OP(Word32AtomicSub)
+ATOMIC_OP(Word32AtomicAnd)
+ATOMIC_OP(Word32AtomicOr)
+ATOMIC_OP(Word32AtomicXor)
+ATOMIC_OP(Word32AtomicExchange)
+ATOMIC_OP(Word64AtomicAdd)
+ATOMIC_OP(Word64AtomicSub)
+ATOMIC_OP(Word64AtomicAnd)
+ATOMIC_OP(Word64AtomicOr)
+ATOMIC_OP(Word64AtomicXor)
+ATOMIC_OP(Word64AtomicExchange)
+#undef ATOMIC_OP
+
+template <MachineRepresentation rep, MachineSemantic sem>
+struct Word32AtomicCompareExchangeOperator : public Operator1<MachineType> {
+ Word32AtomicCompareExchangeOperator()
+ : Operator1(IrOpcode::kWord32AtomicCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, 0,
+ MachineType(rep, sem)) {}
+};
+
+template <MachineRepresentation rep, MachineSemantic sem>
+struct Word64AtomicCompareExchangeOperator : public Operator1<MachineType> {
+ Word64AtomicCompareExchangeOperator()
+ : Operator1(IrOpcode::kWord64AtomicCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, 0,
+ MachineType(rep, sem)) {}
+};
+
+struct Word32AtomicPairLoadOperator : public Operator {
+ Word32AtomicPairLoadOperator()
+ : Operator(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+};
+
+struct Word32AtomicPairStoreOperator : public Operator {
+ Word32AtomicPairStoreOperator()
+ : Operator(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+};
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
Word32AtomicPair##op##Operator() \
: Operator(IrOpcode::kWord32AtomicPair##op, \
Operator::kNoDeopt | Operator::kNoThrow, \
- "Word32AtomicPair##op", 4, 1, 1, 2, 1, 0) {} \
- }; \
- Word32AtomicPair##op##Operator kWord32AtomicPair##op;
- ATOMIC_PAIR_BINOP_LIST(ATOMIC_PAIR_OP)
+ "Word32AtomicPair" #op, 4, 1, 1, 2, 1, 0) {} \
+ };
+ATOMIC_PAIR_OP(Add)
+ATOMIC_PAIR_OP(Sub)
+ATOMIC_PAIR_OP(And)
+ATOMIC_PAIR_OP(Or)
+ATOMIC_PAIR_OP(Xor)
+ATOMIC_PAIR_OP(Exchange)
#undef ATOMIC_PAIR_OP
-#undef ATOMIC_PAIR_BINOP_LIST
- struct Word32AtomicPairCompareExchangeOperator : public Operator {
- Word32AtomicPairCompareExchangeOperator()
- : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
- };
- Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
+struct Word32AtomicPairCompareExchangeOperator : public Operator {
+ Word32AtomicPairCompareExchangeOperator()
+ : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
+};
- struct MemoryBarrierOperator : public Operator {
- MemoryBarrierOperator()
- : Operator(IrOpcode::kMemoryBarrier,
- Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
- 1, 1, 0, 1, 0) {}
- };
- MemoryBarrierOperator kMemoryBarrier;
-
- // The {BitcastWordToTagged} operator must not be marked as pure (especially
- // not idempotent), because otherwise the splitting logic in the Scheduler
- // might decide to split these operators, thus potentially creating live
- // ranges of allocation top across calls or other things that might allocate.
- // See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
- struct BitcastWordToTaggedOperator : public Operator {
- BitcastWordToTaggedOperator()
- : Operator(IrOpcode::kBitcastWordToTagged,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
- };
- BitcastWordToTaggedOperator kBitcastWordToTagged;
+struct MemoryBarrierOperator : public Operator {
+ MemoryBarrierOperator()
+ : Operator(IrOpcode::kMemoryBarrier,
+ Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0, 1,
+ 1, 0, 1, 0) {}
+};
- struct BitcastTaggedToWordOperator : public Operator {
- BitcastTaggedToWordOperator()
- : Operator(IrOpcode::kBitcastTaggedToWord,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
- };
- BitcastTaggedToWordOperator kBitcastTaggedToWord;
+// The {BitcastWordToTagged} operator must not be marked as pure (especially
+// not idempotent), because otherwise the splitting logic in the Scheduler
+// might decide to split these operators, thus potentially creating live
+// ranges of allocation top across calls or other things that might allocate.
+// See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
+struct BitcastWordToTaggedOperator : public Operator {
+ BitcastWordToTaggedOperator()
+ : Operator(IrOpcode::kBitcastWordToTagged,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
+};
- struct BitcastMaybeObjectToWordOperator : public Operator {
- BitcastMaybeObjectToWordOperator()
- : Operator(IrOpcode::kBitcastTaggedToWord,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastMaybeObjectToWord", 1, 1, 1, 1, 1, 0) {}
- };
- BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
+struct BitcastTaggedToWordOperator : public Operator {
+ BitcastTaggedToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
+};
- struct TaggedPoisonOnSpeculation : public Operator {
- TaggedPoisonOnSpeculation()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
+struct BitcastMaybeObjectToWordOperator : public Operator {
+ BitcastMaybeObjectToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastMaybeObjectToWord", 1, 1, 1, 1, 1, 0) {}
+};
- struct Word32PoisonOnSpeculation : public Operator {
- Word32PoisonOnSpeculation()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
+struct TaggedPoisonOnSpeculationOperator : public Operator {
+ TaggedPoisonOnSpeculationOperator()
+ : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+};
- struct Word64PoisonOnSpeculation : public Operator {
- Word64PoisonOnSpeculation()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
+struct Word32PoisonOnSpeculationOperator : public Operator {
+ Word32PoisonOnSpeculationOperator()
+ : Operator(IrOpcode::kWord32PoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+};
- struct AbortCSAAssertOperator : public Operator {
- AbortCSAAssertOperator()
- : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
- "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
- };
- AbortCSAAssertOperator kAbortCSAAssert;
+struct Word64PoisonOnSpeculationOperator : public Operator {
+ Word64PoisonOnSpeculationOperator()
+ : Operator(IrOpcode::kWord64PoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+};
- struct DebugBreakOperator : public Operator {
- DebugBreakOperator()
- : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
- 1, 1, 0, 1, 0) {}
- };
- DebugBreakOperator kDebugBreak;
+struct AbortCSAAssertOperator : public Operator {
+ AbortCSAAssertOperator()
+ : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
+ "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
+};
- struct UnsafePointerAddOperator final : public Operator {
- UnsafePointerAddOperator()
- : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
- "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
- };
- UnsafePointerAddOperator kUnsafePointerAdd;
+struct DebugBreakOperator : public Operator {
+ DebugBreakOperator()
+ : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0, 1,
+ 1, 0, 1, 0) {}
+};
- struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
- explicit StackPointerGreaterThanOperator(StackCheckKind kind)
- : Operator1<StackCheckKind>(
- IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
- "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
- };
-#define STACK_POINTER_GREATER_THAN(Kind) \
- struct StackPointerGreaterThan##Kind##Operator final \
- : public StackPointerGreaterThanOperator { \
- StackPointerGreaterThan##Kind##Operator() \
- : StackPointerGreaterThanOperator(StackCheckKind::k##Kind) {} \
- }; \
- StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind;
-
- STACK_POINTER_GREATER_THAN(JSFunctionEntry)
- STACK_POINTER_GREATER_THAN(JSIterationBody)
- STACK_POINTER_GREATER_THAN(CodeStubAssembler)
- STACK_POINTER_GREATER_THAN(Wasm)
-#undef STACK_POINTER_GREATER_THAN
+struct UnsafePointerAddOperator : public Operator {
+ UnsafePointerAddOperator()
+ : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
+ "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
+};
+
+template <StackCheckKind kind>
+struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
+ StackPointerGreaterThanOperator()
+ : Operator1(IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
+ "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
};
struct CommentOperator : public Operator1<const char*> {
explicit CommentOperator(const char* msg)
- : Operator1<const char*>(IrOpcode::kComment, Operator::kNoThrow,
- "Comment", 0, 1, 1, 0, 1, 0, msg) {}
+ : Operator1(IrOpcode::kComment, Operator::kNoThrow, "Comment", 0, 1, 1, 0,
+ 1, 0, msg) {}
};
-namespace {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(MachineOperatorGlobalCache,
- GetMachineOperatorGlobalCache)
-}
-
MachineOperatorBuilder::MachineOperatorBuilder(
Zone* zone, MachineRepresentation word, Flags flags,
AlignmentRequirements alignmentRequirements)
: zone_(zone),
- cache_(*GetMachineOperatorGlobalCache()),
word_(word),
flags_(flags),
alignment_requirements_(alignmentRequirements) {
@@ -1032,9 +972,11 @@ MachineOperatorBuilder::MachineOperatorBuilder(
}
const Operator* MachineOperatorBuilder::UnalignedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kUnalignedLoad##Type; \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return GetCachedOperator< \
+ UnalignedLoadOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1046,7 +988,8 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
switch (rep) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \
- return &cache_.kUnalignedStore##kRep;
+ return GetCachedOperator< \
+ UnalignedStoreOperator<MachineRepresentation::kRep>>();
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
@@ -1056,39 +999,35 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
UNREACHABLE();
}
-#define PURE(Name, properties, value_input_count, control_input_count, \
- output_count) \
- const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
-MACHINE_PURE_OP_LIST(PURE)
-#undef PURE
-
-#define PURE(Name, properties, value_input_count, control_input_count, \
- output_count) \
- const OptionalOperator MachineOperatorBuilder::Name() { \
- return OptionalOperator(flags_ & k##Name, &cache_.k##Name); \
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const Operator* MachineOperatorBuilder::Name() { \
+ return GetCachedOperator< \
+ CachedPureOperator<IrOpcode::k##Name, value_input_count, \
+ control_input_count, output_count>>(properties, \
+ #Name); \
}
-PURE_OPTIONAL_OP_LIST(PURE)
+MACHINE_PURE_OP_LIST(PURE)
#undef PURE
-#define OVERFLOW_OP(Name, properties) \
- const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
-OVERFLOW_OP_LIST(OVERFLOW_OP)
-#undef OVERFLOW_OP
-
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kLoad##Type; \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return GetCachedOperator< \
+ LoadOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
- MACHINE_TYPE_LIST(LOAD)
+ MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kPoisonedLoad##Type; \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return GetCachedOperator< \
+ PoisonedLoadOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1096,9 +1035,11 @@ const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kProtectedLoad##Type; \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return GetCachedOperator< \
+ ProtectedLoadOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1109,7 +1050,8 @@ const Operator* MachineOperatorBuilder::LoadTransform(
LoadKind kind, LoadTransformation transform) {
#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
if (kind == LoadKind::k##KIND && transform == LoadTransformation::k##TYPE) { \
- return &cache_.k##KIND##LoadTransform##TYPE; \
+ return GetCachedOperator<LoadTransformOperator< \
+ LoadKind::k##KIND, LoadTransformation::k##TYPE>>(); \
}
#define LOAD_TRANSFORM(TYPE) \
LOAD_TRANSFORM_KIND(TYPE, Normal) \
@@ -1125,9 +1067,9 @@ const Operator* MachineOperatorBuilder::LoadTransform(
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
-#define CASE_CACHED_SIZE(Size, Alignment) \
- if (size == Size && alignment == Alignment) { \
- return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
+#define CASE_CACHED_SIZE(Size, Alignment) \
+ if (size == Size && alignment == Alignment) { \
+ return GetCachedOperator<CachedStackSlotOperator<Size, Alignment>>(); \
}
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
@@ -1143,22 +1085,28 @@ const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- switch (store_rep.write_barrier_kind()) { \
- case kNoWriteBarrier: \
- return &cache_.k##Store##kRep##NoWriteBarrier; \
- case kAssertNoWriteBarrier: \
- return &cache_.k##Store##kRep##AssertNoWriteBarrier; \
- case kMapWriteBarrier: \
- return &cache_.k##Store##kRep##MapWriteBarrier; \
- case kPointerWriteBarrier: \
- return &cache_.k##Store##kRep##PointerWriteBarrier; \
- case kEphemeronKeyWriteBarrier: \
- return &cache_.k##Store##kRep##EphemeronKeyWriteBarrier; \
- case kFullWriteBarrier: \
- return &cache_.k##Store##kRep##FullWriteBarrier; \
- } \
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ switch (store_rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return GetCachedOperator< \
+ StoreOperator<MachineRepresentation::kRep, kNoWriteBarrier>>(); \
+ case kAssertNoWriteBarrier: \
+ return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
+ kAssertNoWriteBarrier>>(); \
+ case kMapWriteBarrier: \
+ return GetCachedOperator< \
+ StoreOperator<MachineRepresentation::kRep, kMapWriteBarrier>>(); \
+ case kPointerWriteBarrier: \
+ return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
+ kPointerWriteBarrier>>(); \
+ case kEphemeronKeyWriteBarrier: \
+ return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
+ kEphemeronKeyWriteBarrier>>(); \
+ case kFullWriteBarrier: \
+ return GetCachedOperator< \
+ StoreOperator<MachineRepresentation::kRep, kFullWriteBarrier>>(); \
+ } \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1172,9 +1120,10 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
const Operator* MachineOperatorBuilder::ProtectedStore(
MachineRepresentation rep) {
switch (rep) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- return &cache_.kProtectedStore##kRep; \
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return GetCachedOperator< \
+ ProtectedStoreOperator<MachineRepresentation::kRep>>(); \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1186,42 +1135,46 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
}
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
- return &cache_.kUnsafePointerAdd;
+ return GetCachedOperator<UnsafePointerAddOperator>();
}
const Operator* MachineOperatorBuilder::StackPointerGreaterThan(
StackCheckKind kind) {
switch (kind) {
case StackCheckKind::kJSFunctionEntry:
- return &cache_.kStackPointerGreaterThanJSFunctionEntry;
+ return GetCachedOperator<
+ StackPointerGreaterThanOperator<StackCheckKind::kJSFunctionEntry>>();
case StackCheckKind::kJSIterationBody:
- return &cache_.kStackPointerGreaterThanJSIterationBody;
+ return GetCachedOperator<
+ StackPointerGreaterThanOperator<StackCheckKind::kJSIterationBody>>();
case StackCheckKind::kCodeStubAssembler:
- return &cache_.kStackPointerGreaterThanCodeStubAssembler;
+ return GetCachedOperator<StackPointerGreaterThanOperator<
+ StackCheckKind::kCodeStubAssembler>>();
case StackCheckKind::kWasm:
- return &cache_.kStackPointerGreaterThanWasm;
+ return GetCachedOperator<
+ StackPointerGreaterThanOperator<StackCheckKind::kWasm>>();
}
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
- return &cache_.kBitcastWordToTagged;
+ return GetCachedOperator<BitcastWordToTaggedOperator>();
}
const Operator* MachineOperatorBuilder::BitcastTaggedToWord() {
- return &cache_.kBitcastTaggedToWord;
+ return GetCachedOperator<BitcastTaggedToWordOperator>();
}
const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
- return &cache_.kBitcastMaybeObjectToWord;
+ return GetCachedOperator<BitcastMaybeObjectToWordOperator>();
}
const Operator* MachineOperatorBuilder::AbortCSAAssert() {
- return &cache_.kAbortCSAAssert;
+ return GetCachedOperator<AbortCSAAssertOperator>();
}
const Operator* MachineOperatorBuilder::DebugBreak() {
- return &cache_.kDebugBreak;
+ return GetCachedOperator<DebugBreakOperator>();
}
const Operator* MachineOperatorBuilder::Comment(const char* msg) {
@@ -1229,14 +1182,16 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
}
const Operator* MachineOperatorBuilder::MemBarrier() {
- return &cache_.kMemoryBarrier;
+ return GetCachedOperator<MemoryBarrierOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord32AtomicLoad##Type; \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicLoadOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
@@ -1245,9 +1200,10 @@ const Operator* MachineOperatorBuilder::Word32AtomicLoad(
const Operator* MachineOperatorBuilder::Word32AtomicStore(
MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord32AtomicStore##kRep; \
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return GetCachedOperator< \
+ Word32AtomicStoreOperator<MachineRepresentation::kRep>>(); \
}
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1255,9 +1211,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicStore(
}
const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
-#define EXCHANGE(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicExchange##kType; \
+#define EXCHANGE(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicExchangeOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -1266,9 +1224,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
MachineType type) {
-#define COMPARE_EXCHANGE(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicCompareExchange##kType; \
+#define COMPARE_EXCHANGE(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator<Word32AtomicCompareExchangeOperator< \
+ MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
@@ -1276,9 +1236,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
}
const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
-#define ADD(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicAdd##kType; \
+#define ADD(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicAddOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
@@ -1286,9 +1248,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
-#define SUB(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicSub##kType; \
+#define SUB(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicSubOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
@@ -1296,9 +1260,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
-#define AND(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicAnd##kType; \
+#define AND(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicAndOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
@@ -1306,9 +1272,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
-#define OR(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicOr##kType; \
+#define OR(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicOrOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
@@ -1316,9 +1284,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
-#define XOR(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord32AtomicXor##kType; \
+#define XOR(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word32AtomicXorOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
@@ -1327,9 +1297,11 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord64AtomicLoad##Type; \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicLoadOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(LOAD)
#undef LOAD
@@ -1338,9 +1310,10 @@ const Operator* MachineOperatorBuilder::Word64AtomicLoad(
const Operator* MachineOperatorBuilder::Word64AtomicStore(
MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord64AtomicStore##kRep; \
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return GetCachedOperator< \
+ Word64AtomicStoreOperator<MachineRepresentation::kRep>>(); \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1348,9 +1321,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicStore(
}
const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
-#define ADD(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicAdd##kType; \
+#define ADD(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicAddOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(ADD)
#undef ADD
@@ -1358,9 +1333,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
-#define SUB(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicSub##kType; \
+#define SUB(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicSubOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(SUB)
#undef SUB
@@ -1368,9 +1345,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
-#define AND(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicAnd##kType; \
+#define AND(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicAndOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(AND)
#undef AND
@@ -1378,9 +1357,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
-#define OR(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicOr##kType; \
+#define OR(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicOrOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(OR)
#undef OR
@@ -1388,9 +1369,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
-#define XOR(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicXor##kType; \
+#define XOR(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicXorOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(XOR)
#undef XOR
@@ -1398,9 +1381,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
-#define EXCHANGE(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicExchange##kType; \
+#define EXCHANGE(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator< \
+ Word64AtomicExchangeOperator<MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -1409,9 +1394,11 @@ const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
MachineType type) {
-#define COMPARE_EXCHANGE(kType) \
- if (type == MachineType::kType()) { \
- return &cache_.kWord64AtomicCompareExchange##kType; \
+#define COMPARE_EXCHANGE(Type) \
+ if (type == MachineType::Type()) { \
+ return GetCachedOperator<Word64AtomicCompareExchangeOperator< \
+ MachineType::Type().representation(), \
+ MachineType::Type().semantic()>>(); \
}
ATOMIC_U64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
@@ -1419,51 +1406,51 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
}
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return &cache_.kWord32AtomicPairLoad;
+ return GetCachedOperator<Word32AtomicPairLoadOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return &cache_.kWord32AtomicPairStore;
+ return GetCachedOperator<Word32AtomicPairStoreOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
- return &cache_.kWord32AtomicPairAdd;
+ return GetCachedOperator<Word32AtomicPairAddOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairSub() {
- return &cache_.kWord32AtomicPairSub;
+ return GetCachedOperator<Word32AtomicPairSubOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAnd() {
- return &cache_.kWord32AtomicPairAnd;
+ return GetCachedOperator<Word32AtomicPairAndOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairOr() {
- return &cache_.kWord32AtomicPairOr;
+ return GetCachedOperator<Word32AtomicPairOrOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
- return &cache_.kWord32AtomicPairXor;
+ return GetCachedOperator<Word32AtomicPairXorOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairExchange() {
- return &cache_.kWord32AtomicPairExchange;
+ return GetCachedOperator<Word32AtomicPairExchangeOperator>();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
- return &cache_.kWord32AtomicPairCompareExchange;
+ return GetCachedOperator<Word32AtomicPairCompareExchangeOperator>();
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
+ return GetCachedOperator<TaggedPoisonOnSpeculationOperator>();
}
const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
+ return GetCachedOperator<Word32PoisonOnSpeculationOperator>();
}
const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
+ return GetCachedOperator<Word64PoisonOnSpeculationOperator>();
}
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index d4f9ffa0fd..aa4f2dcf2c 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -18,7 +18,6 @@ namespace internal {
namespace compiler {
// Forward declarations.
-struct MachineOperatorGlobalCache;
class Operator;
@@ -181,6 +180,16 @@ V8_EXPORT_PRIVATE S8x16ShuffleParameter const& S8x16ShuffleParameterOf(
StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT;
+// ShiftKind::kShiftOutZeros means that it is guaranteed that the bits shifted
+// out of the left operand are all zeros. If this is not the case, undefined
+// behavior (i.e., incorrect optimizations) will happen.
+// This is mostly useful for Smi untagging.
+enum class ShiftKind { kNormal, kShiftOutZeros };
+
+size_t hash_value(ShiftKind);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ShiftKind);
+ShiftKind ShiftKindOf(Operator const*) V8_WARN_UNUSED_RESULT;
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -211,13 +220,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64ReverseBits = 1u << 17,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
- kAllOptionalOps = kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
- kFloat64RoundUp | kFloat32RoundTruncate |
- kFloat64RoundTruncate | kFloat64RoundTiesAway |
- kFloat32RoundTiesEven | kFloat64RoundTiesEven |
- kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits |
- kInt32AbsWithOverflow | kInt64AbsWithOverflow
+ kWord32Rol = 1u << 22,
+ kWord64Rol = 1u << 23,
+ kAllOptionalOps =
+ kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
+ kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+ kWord32ReverseBits | kWord64ReverseBits | kInt32AbsWithOverflow |
+ kInt64AbsWithOverflow | kWord32Rol | kWord64Rol
};
using Flags = base::Flags<Flag, unsigned>;
@@ -294,7 +305,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word32Xor();
const Operator* Word32Shl();
const Operator* Word32Shr();
- const Operator* Word32Sar();
+ const Operator* Word32Sar(ShiftKind kind);
+ const Operator* Word32Sar() { return Word32Sar(ShiftKind::kNormal); }
+ const Operator* Word32SarShiftOutZeros() {
+ return Word32Sar(ShiftKind::kShiftOutZeros);
+ }
+ const OptionalOperator Word32Rol();
const Operator* Word32Ror();
const Operator* Word32Equal();
const Operator* Word32Clz();
@@ -319,7 +335,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word64Xor();
const Operator* Word64Shl();
const Operator* Word64Shr();
- const Operator* Word64Sar();
+ const Operator* Word64Sar(ShiftKind kind);
+ const Operator* Word64Sar() { return Word64Sar(ShiftKind::kNormal); }
+ const Operator* Word64SarShiftOutZeros() {
+ return Word64Sar(ShiftKind::kShiftOutZeros);
+ }
+ const OptionalOperator Word64Rol();
const Operator* Word64Ror();
const Operator* Word64Clz();
const OptionalOperator Word64Ctz();
@@ -553,6 +574,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Le();
const Operator* F64x2Qfma();
const Operator* F64x2Qfms();
+ const Operator* F64x2Pmin();
+ const Operator* F64x2Pmax();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -577,6 +600,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Le();
const Operator* F32x4Qfma();
const Operator* F32x4Qfms();
+ const Operator* F32x4Pmin();
+ const Operator* F32x4Pmax();
const Operator* I64x2Splat();
const Operator* I64x2SplatI32Pair();
@@ -842,7 +867,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Xor) \
V(Word, Shl) \
V(Word, Shr) \
- V(Word, Sar) \
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
@@ -865,9 +889,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
#undef PSEUDO_OP
#undef PSEUDO_OP_LIST
+ const Operator* WordSar(ShiftKind kind = ShiftKind::kNormal) {
+ return Is32() ? Word32Sar(kind) : Word64Sar(kind);
+ }
+ const Operator* WordSarShiftOutZeros() {
+ return WordSar(ShiftKind::kShiftOutZeros);
+ }
+
private:
Zone* zone_;
- MachineOperatorGlobalCache const& cache_;
MachineRepresentation const word_;
Flags const flags_;
AlignmentRequirements const alignment_requirements_;
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 6012ae62c5..8c230b6efd 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -5,6 +5,7 @@
#include "src/compiler/memory-lowering.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/common/external-pointer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -51,7 +52,7 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
const char* function_debug_name)
: isolate_(jsgraph->isolate()),
zone_(zone),
- graph_zone_(jsgraph->graph()->zone()),
+ graph_(jsgraph->graph()),
common_(jsgraph->common()),
machine_(jsgraph->machine()),
graph_assembler_(graph_assembler),
@@ -60,6 +61,8 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
+Zone* MemoryLowering::graph_zone() const { return graph()->zone(); }
+
Reduction MemoryLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kAllocate:
@@ -303,6 +306,29 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
return Changed(node);
}
+Node* MemoryLowering::DecodeExternalPointer(Node* node) {
+ DCHECK(V8_HEAP_SANDBOX_BOOL);
+ DCHECK(node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kPoisonedLoad);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ __ InitializeEffectControl(effect, control);
+
+ // Clone the load node and put it here.
+ // TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
+ // cloning nodes from arbitrary locaions in effect/control chains.
+ Node* node_copy = __ AddNode(graph()->CloneNode(node));
+
+ // Uncomment this to generate a breakpoint for debugging purposes.
+ // __ DebugBreak();
+
+ // Decode loaded enternal pointer.
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ Node* salt = __ IntPtrConstant(kExternalPointerSalt);
+ Node* decoded_ptr = __ WordXor(node_copy, salt);
+ return decoded_ptr;
+}
+
Reduction MemoryLowering::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
@@ -314,6 +340,13 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
+ if (V8_HEAP_SANDBOX_BOOL &&
+ access.type.Is(Type::SandboxedExternalPointer())) {
+ node = DecodeExternalPointer(node);
+ return Replace(node);
+ } else {
+ DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
+ }
return Changed(node);
}
@@ -351,6 +384,10 @@ Reduction MemoryLowering::ReduceStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
+ // External pointer must never be stored by optimized code.
+ DCHECK_IMPLIES(V8_HEAP_SANDBOX_BOOL,
+ !access.type.Is(Type::ExternalPointer()) &&
+ !access.type.Is(Type::SandboxedExternalPointer()));
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 2c9a0accdb..45015e98bb 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -107,13 +107,14 @@ class MemoryLowering final : public Reducer {
Node* value,
AllocationState const* state,
WriteBarrierKind);
+ Node* DecodeExternalPointer(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
- Graph* graph() const;
+ Graph* graph() const { return graph_; }
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
- Zone* graph_zone() const { return graph_zone_; }
+ inline Zone* graph_zone() const;
CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
JSGraphAssembler* gasm() const { return graph_assembler_; }
@@ -121,7 +122,7 @@ class MemoryLowering final : public Reducer {
SetOncePointer<const Operator> allocate_operator_;
Isolate* isolate_;
Zone* zone_;
- Zone* graph_zone_;
+ Graph* graph_;
CommonOperatorBuilder* common_;
MachineOperatorBuilder* machine_;
JSGraphAssembler* graph_assembler_;
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index c335b815aa..a54d89ffb0 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/memory-optimizer.h"
+#include "src/base/logging.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
@@ -321,8 +322,23 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
- memory_lowering()->ReduceLoadField(node);
+ Reduction reduction = memory_lowering()->ReduceLoadField(node);
+ DCHECK(reduction.Changed());
+ // In case of replacement, the replacement graph should not require futher
+ // lowering, so we can proceed iterating the graph from the node uses.
EnqueueUses(node, state);
+
+ // Node can be replaced only when V8_HEAP_SANDBOX_BOOL is enabled and
+ // when loading an external pointer value.
+ DCHECK_IMPLIES(!V8_HEAP_SANDBOX_BOOL, reduction.replacement() == node);
+ if (V8_HEAP_SANDBOX_BOOL && reduction.replacement() != node) {
+ // Replace all uses of node and kill the node to make sure we don't leave
+ // dangling dead uses.
+ NodeProperties::ReplaceUses(node, reduction.replacement(),
+ graph_assembler_.effect(),
+ graph_assembler_.control());
+ node->Kill();
+ }
}
void MemoryOptimizer::VisitStoreElement(Node* node,
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 8c4e5e76d5..cf0df2d636 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_NODE_MATCHERS_H_
#include <cmath>
+#include <limits>
#include "src/base/compiler-specific.h"
#include "src/codegen/external-reference.h"
@@ -160,7 +161,7 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
}
bool IsNegativePowerOf2() const {
return this->HasValue() && this->Value() < 0 &&
- ((this->Value() == kMinInt) ||
+ ((this->Value() == std::numeric_limits<T>::min()) ||
(-this->Value() & (-this->Value() - 1)) == 0);
}
bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index ab4ced69ab..3b78872437 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -158,6 +158,21 @@ void NodeProperties::MergeControlToEnd(Graph* graph,
graph->end()->set_op(common->End(graph->end()->InputCount()));
}
+void NodeProperties::RemoveControlFromEnd(Graph* graph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ int index_to_remove = -1;
+ for (int i = 0; i < graph->end()->op()->ControlInputCount(); i++) {
+ int index = NodeProperties::FirstControlIndex(graph->end()) + i;
+ if (graph->end()->InputAt(index) == node) {
+ index_to_remove = index;
+ break;
+ }
+ }
+ CHECK_NE(-1, index_to_remove);
+ graph->end()->RemoveInput(index_to_remove);
+ graph->end()->set_op(common->End(graph->end()->InputCount()));
+}
// static
void NodeProperties::ReplaceUses(Node* node, Node* value, Node* effect,
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 1d6e957d04..5b31f15d48 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -140,6 +140,11 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static void MergeControlToEnd(Graph* graph, CommonOperatorBuilder* common,
Node* node);
+ // Removes the control node {node} from the end of the graph, reducing the
+ // existing merge node's input count.
+ static void RemoveControlFromEnd(Graph* graph, CommonOperatorBuilder* common,
+ Node* node);
+
// Replace all uses of {node} with the given replacement nodes. All occurring
// use kinds need to be replaced, {nullptr} is only valid if a use kind is
// guaranteed not to exist.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 1bff71495c..5e7c9fcc39 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -314,22 +314,32 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
return mask == 3;
}
-void Node::Print() const {
+void Node::Print(int depth) const {
StdoutStream os;
- Print(os);
+ Print(os, depth);
}
-void Node::Print(std::ostream& os) const {
- os << *this << std::endl;
- for (Node* input : this->inputs()) {
+namespace {
+void PrintNode(const Node* node, std::ostream& os, int depth,
+ int indentation = 0) {
+ for (int i = 0; i < indentation; ++i) {
os << " ";
- if (input) {
- os << *input;
- } else {
- os << "(NULL)";
- }
- os << std::endl;
}
+ if (node) {
+ os << *node;
+ } else {
+ os << "(NULL)";
+ }
+ os << std::endl;
+ if (depth <= 0) return;
+ for (Node* input : node->inputs()) {
+ PrintNode(input, os, depth - 1, indentation + 1);
+ }
+}
+} // namespace
+
+void Node::Print(std::ostream& os, int depth) const {
+ PrintNode(this, os, depth);
}
std::ostream& operator<<(std::ostream& os, const Node& n) {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 41dca441f2..8072bab46e 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -144,8 +144,9 @@ class V8_EXPORT_PRIVATE Node final {
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
- void Print() const;
- void Print(std::ostream&) const;
+ void Print() const { Print(1); }
+ void Print(int depth) const;
+ void Print(std::ostream&, int depth = 1) const;
private:
struct Use;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 61af28841a..f3b3ff8c8e 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -532,6 +532,7 @@
V(Word32Shl) \
V(Word32Shr) \
V(Word32Sar) \
+ V(Word32Rol) \
V(Word32Ror) \
V(Int32Add) \
V(Int32AddWithOverflow) \
@@ -553,6 +554,7 @@
V(Word64Shl) \
V(Word64Shr) \
V(Word64Sar) \
+ V(Word64Rol) \
V(Word64Ror) \
V(Int64Add) \
V(Int64AddWithOverflow) \
@@ -761,6 +763,8 @@
V(F64x2Le) \
V(F64x2Qfma) \
V(F64x2Qfms) \
+ V(F64x2Pmin) \
+ V(F64x2Pmax) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -786,6 +790,8 @@
V(F32x4Ge) \
V(F32x4Qfma) \
V(F32x4Qfms) \
+ V(F32x4Pmin) \
+ V(F32x4Pmax) \
V(I64x2Splat) \
V(I64x2SplatI32Pair) \
V(I64x2ExtractLane) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 229c7864b2..fbce76716d 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -1042,8 +1042,11 @@ Type OperationTyper::NumberMax(Type lhs, Type rhs) {
}
if (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero())) {
type = Type::Union(type, Type::MinusZero(), zone());
+ // In order to ensure monotonicity of the computation below, we additionally
+ // pretend +0 is present (for simplicity on both sides).
+ lhs = Type::Union(lhs, cache_->kSingletonZero, zone());
+ rhs = Type::Union(rhs, cache_->kSingletonZero, zone());
}
-
if (!lhs.Is(cache_->kIntegerOrMinusZeroOrNaN) ||
!rhs.Is(cache_->kIntegerOrMinusZeroOrNaN)) {
return Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
@@ -1051,14 +1054,12 @@ Type OperationTyper::NumberMax(Type lhs, Type rhs) {
lhs = Type::Intersect(lhs, cache_->kInteger, zone());
rhs = Type::Intersect(rhs, cache_->kInteger, zone());
+ DCHECK(!lhs.IsNone());
+ DCHECK(!rhs.IsNone());
- if (!lhs.IsNone() || !rhs.IsNone()) {
- double min = std::max(lhs.IsNone() ? -V8_INFINITY : lhs.Min(),
- rhs.IsNone() ? -V8_INFINITY : rhs.Min());
- double max = std::max(lhs.IsNone() ? -V8_INFINITY : lhs.Max(),
- rhs.IsNone() ? -V8_INFINITY : rhs.Max());
- type = Type::Union(type, Type::Range(min, max, zone()), zone());
- }
+ double min = std::max(lhs.Min(), rhs.Min());
+ double max = std::max(lhs.Max(), rhs.Max());
+ type = Type::Union(type, Type::Range(min, max, zone()), zone());
return type;
}
@@ -1076,8 +1077,11 @@ Type OperationTyper::NumberMin(Type lhs, Type rhs) {
}
if (lhs.Maybe(Type::MinusZero()) || rhs.Maybe(Type::MinusZero())) {
type = Type::Union(type, Type::MinusZero(), zone());
+ // In order to ensure monotonicity of the computation below, we additionally
+ // pretend +0 is present (for simplicity on both sides).
+ lhs = Type::Union(lhs, cache_->kSingletonZero, zone());
+ rhs = Type::Union(rhs, cache_->kSingletonZero, zone());
}
-
if (!lhs.Is(cache_->kIntegerOrMinusZeroOrNaN) ||
!rhs.Is(cache_->kIntegerOrMinusZeroOrNaN)) {
return Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
@@ -1085,14 +1089,12 @@ Type OperationTyper::NumberMin(Type lhs, Type rhs) {
lhs = Type::Intersect(lhs, cache_->kInteger, zone());
rhs = Type::Intersect(rhs, cache_->kInteger, zone());
+ DCHECK(!lhs.IsNone());
+ DCHECK(!rhs.IsNone());
- if (!lhs.IsNone() || !rhs.IsNone()) {
- double min = std::min(lhs.IsNone() ? +V8_INFINITY : lhs.Min(),
- rhs.IsNone() ? +V8_INFINITY : rhs.Min());
- double max = std::min(lhs.IsNone() ? +V8_INFINITY : lhs.Max(),
- rhs.IsNone() ? +V8_INFINITY : rhs.Max());
- type = Type::Union(type, Type::Range(min, max, zone()), zone());
- }
+ double min = std::min(lhs.Min(), rhs.Min());
+ double max = std::min(lhs.Max(), rhs.Max());
+ type = Type::Union(type, Type::Range(min, max, zone()), zone());
return type;
}
@@ -1249,7 +1251,10 @@ Type OperationTyper::StrictEqual(Type lhs, Type rhs) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
DCHECK(lhs.Is(rhs));
- DCHECK(lhs.Is(Type::NonInternal()) || lhs.Is(Type::Hole()));
+ // TODO(neis): The last condition in this DCHECK is due the unittest
+ // throwing arbitrary types at the typer. This is not easy to fix.
+ DCHECK(lhs.Is(Type::NonInternal()) || lhs.Is(Type::Hole()) ||
+ FLAG_testing_d8_test_runner);
return singleton_true();
}
if ((lhs.Is(Type::Unique()) || rhs.Is(Type::Unique())) && !lhs.Maybe(rhs)) {
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 6b2fb8bf57..a4892cdb2a 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -53,7 +53,6 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSLoadMessage:
case IrOpcode::kJSStackCheck:
- case IrOpcode::kJSStoreGlobal:
case IrOpcode::kJSStoreMessage:
case IrOpcode::kJSGetIterator:
return false;
@@ -83,6 +82,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSStoreContext:
case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSStoreGlobal:
case IrOpcode::kJSStoreInArrayLiteral:
case IrOpcode::kJSStoreModule:
case IrOpcode::kJSStoreNamed:
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 23e844e164..7227c92cd8 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -66,8 +66,6 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out);
- virtual ~Operator() = default;
-
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
// the opcode is stored directly in the operator object.
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 9cfd1cf94d..5e2c7feffd 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -19,7 +19,7 @@ namespace {
// We log detailed phase information about the pipeline
// in both the v8.turbofan and the v8.wasm categories.
-const char kTraceCategory[] = // --
+constexpr const char kTraceCategory[] = // --
TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
TRACE_DISABLED_BY_DEFAULT("v8.wasm");
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 71cef6fd70..ee6609cfa6 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -988,7 +988,9 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
Status FinalizeJobImpl(Isolate* isolate) final;
// Registers weak object to optimized code dependencies.
- void RegisterWeakObjectsInOptimizedCode(Handle<Code> code, Isolate* isolate);
+ void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
+ Handle<NativeContext> context,
+ Handle<Code> code);
private:
Zone zone_;
@@ -1167,13 +1169,14 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
}
compilation_info()->SetCode(code);
- compilation_info()->native_context().AddOptimizedCode(*code);
- RegisterWeakObjectsInOptimizedCode(code, isolate);
+ Handle<NativeContext> context(compilation_info()->native_context(), isolate);
+ context->AddOptimizedCode(*code);
+ RegisterWeakObjectsInOptimizedCode(isolate, context, code);
return SUCCEEDED;
}
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
- Handle<Code> code, Isolate* isolate) {
+ Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
std::vector<Handle<Map>> maps;
DCHECK(code->is_optimized_code());
{
@@ -1191,7 +1194,7 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
}
}
for (Handle<Map> map : maps) {
- isolate->heap()->AddRetainedMap(map);
+ isolate->heap()->AddRetainedMap(context, map);
}
code->set_can_have_weak_objects(true);
}
@@ -1409,6 +1412,7 @@ struct InliningPhase {
AddReducer(data, &graph_reducer, &inlining);
}
graph_reducer.ReduceGraph();
+ info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
}
};
@@ -1951,10 +1955,12 @@ struct CsaOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
};
@@ -2322,7 +2328,6 @@ struct VerifyGraphPhase {
case Code::WASM_TO_CAPI_FUNCTION:
case Code::WASM_TO_JS_FUNCTION:
case Code::JS_TO_WASM_FUNCTION:
- case Code::WASM_INTERPRETER_ENTRY:
case Code::C_WASM_ENTRY:
code_type = Verifier::kWasm;
break;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 6298513f55..cbd7722dd2 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -311,6 +311,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* WordSar(Node* a, Node* b) {
return AddNode(machine()->WordSar(), a, b);
}
+ Node* WordSarShiftOutZeros(Node* a, Node* b) {
+ return AddNode(machine()->WordSarShiftOutZeros(), a, b);
+ }
Node* WordRor(Node* a, Node* b) {
return AddNode(machine()->WordRor(), a, b);
}
@@ -346,6 +349,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Word32Sar(Node* a, Node* b) {
return AddNode(machine()->Word32Sar(), a, b);
}
+ Node* Word32SarShiftOutZeros(Node* a, Node* b) {
+ return AddNode(machine()->Word32SarShiftOutZeros(), a, b);
+ }
Node* Word32Ror(Node* a, Node* b) {
return AddNode(machine()->Word32Ror(), a, b);
}
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index d0bac9bfc0..13611e0883 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -238,7 +238,9 @@ Node* RedundancyElimination::EffectPathChecks::LookupBoundsCheckFor(
Node* node) const {
for (Check const* check = head_; check != nullptr; check = check->next) {
if (check->node->opcode() == IrOpcode::kCheckBounds &&
- check->node->InputAt(0) == node && TypeSubsumes(node, check->node)) {
+ check->node->InputAt(0) == node && TypeSubsumes(node, check->node) &&
+ !(CheckBoundsParametersOf(check->node->op()).flags() &
+ CheckBoundsFlag::kConvertStringAndMinusZero)) {
return check->node;
}
}
@@ -333,8 +335,8 @@ Reduction RedundancyElimination::ReduceSpeculativeNumberComparison(Node* node) {
// the regular Number comparisons in JavaScript also identify
// 0 and -0 (unlike special comparisons as Object.is).
NodeProperties::ReplaceValueInput(node, check, 0);
- Reduction const reduction = ReduceSpeculativeNumberComparison(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(
+ ReduceSpeculativeNumberComparison(node));
}
}
}
@@ -351,8 +353,8 @@ Reduction RedundancyElimination::ReduceSpeculativeNumberComparison(Node* node) {
// the regular Number comparisons in JavaScript also identify
// 0 and -0 (unlike special comparisons as Object.is).
NodeProperties::ReplaceValueInput(node, check, 1);
- Reduction const reduction = ReduceSpeculativeNumberComparison(node);
- return reduction.Changed() ? reduction : Changed(node);
+ return Changed(node).FollowedBy(
+ ReduceSpeculativeNumberComparison(node));
}
}
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 45d0ec994c..7077f7d643 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -170,7 +170,12 @@ Node* RepresentationChanger::GetRepresentationFor(
// Handle the no-op shortcuts when no checking is necessary.
if (use_info.type_check() == TypeCheckKind::kNone ||
- output_rep != MachineRepresentation::kWord32) {
+ // TODO(nicohartmann@, chromium:1077804): Ignoring {use_info.type_check()}
+ // in case the representation already matches is not correct. For now,
+ // this behavior is disabled only for TypeCheckKind::kBigInt, but should
+ // be fixed for all other type checks.
+ (output_rep != MachineRepresentation::kWord32 &&
+ use_info.type_check() != TypeCheckKind::kBigInt)) {
if (use_info.representation() == output_rep) {
// Representations are the same. That's a no-op.
return node;
@@ -381,6 +386,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
switch (node->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kDelayedStringConstant:
+ if (use_info.type_check() == TypeCheckKind::kBigInt) break;
return node; // No change necessary.
case IrOpcode::kInt32Constant:
case IrOpcode::kFloat64Constant:
@@ -1160,6 +1166,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ DCHECK_EQ(use_info.type_check(), TypeCheckKind::kBigInt);
+ if (output_type.Is(Type::BigInt())) {
+ return node;
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+ }
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 8bdcef511b..cc3243cb2e 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -218,7 +218,7 @@ void Schedule::AddNode(BasicBlock* block, Node* node) {
}
void Schedule::AddGoto(BasicBlock* block, BasicBlock* succ) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kGoto);
AddSuccessor(block, succ);
}
@@ -243,7 +243,7 @@ bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
BasicBlock* exception_block) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
DCHECK(IsPotentiallyThrowingCall(call->opcode()));
block->set_control(BasicBlock::kCall);
AddSuccessor(block, success_block);
@@ -253,7 +253,7 @@ void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
block->set_control(BasicBlock::kBranch);
AddSuccessor(block, tblock);
@@ -263,7 +263,7 @@ void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
size_t succ_count) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
block->set_control(BasicBlock::kSwitch);
for (size_t index = 0; index < succ_count; ++index) {
@@ -273,28 +273,28 @@ void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
}
void Schedule::AddTailCall(BasicBlock* block, Node* input) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kTailCall);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
}
void Schedule::AddReturn(BasicBlock* block, Node* input) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kReturn);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
}
void Schedule::AddDeoptimize(BasicBlock* block, Node* input) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kDeoptimize);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
}
void Schedule::AddThrow(BasicBlock* block, Node* input) {
- DCHECK_EQ(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kThrow);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
@@ -302,8 +302,8 @@ void Schedule::AddThrow(BasicBlock* block, Node* input) {
void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock) {
- DCHECK_NE(BasicBlock::kNone, block->control());
- DCHECK_EQ(BasicBlock::kNone, end->control());
+ CHECK_NE(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, end->control());
end->set_control(block->control());
block->set_control(BasicBlock::kBranch);
MoveSuccessors(block, end);
@@ -317,8 +317,8 @@ void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
BasicBlock** succ_blocks, size_t succ_count) {
- DCHECK_NE(BasicBlock::kNone, block->control());
- DCHECK_EQ(BasicBlock::kNone, end->control());
+ CHECK_NE(BasicBlock::kNone, block->control());
+ CHECK_EQ(BasicBlock::kNone, end->control());
end->set_control(block->control());
block->set_control(BasicBlock::kSwitch);
MoveSuccessors(block, end);
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index c6e352d90a..4c968d21c8 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -1090,6 +1090,9 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
// OSR entry point. TODO(neis): Support OSR?
return false;
}
+ if (FLAG_turboprop && feedback.slot_kind() == FeedbackSlotKind::kCall) {
+ return false;
+ }
if (feedback.IsInsufficient()) {
environment()->Kill();
return true;
@@ -2299,6 +2302,12 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
if (arguments.size() >= 1) {
ProcessMapHintsForPromises(arguments[0]);
}
+ SharedFunctionInfoRef(
+ broker(),
+ broker()->isolate()->factory()->promise_catch_finally_shared_fun());
+ SharedFunctionInfoRef(
+ broker(),
+ broker()->isolate()->factory()->promise_then_finally_shared_fun());
}
break;
}
@@ -2433,6 +2442,17 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
kMissingArgumentsAreUnknown, result_hints);
}
}
+ SharedFunctionInfoRef(
+ broker(), broker()
+ ->isolate()
+ ->factory()
+ ->promise_capability_default_reject_shared_fun());
+ SharedFunctionInfoRef(
+ broker(), broker()
+ ->isolate()
+ ->factory()
+ ->promise_capability_default_resolve_shared_fun());
+
break;
case Builtins::kFunctionPrototypeCall:
if (arguments.size() >= 1) {
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index db5a736855..21d34b21d2 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -26,7 +26,31 @@ static const int32_t kMask16 = 0xFFFF;
static const int32_t kMask8 = 0xFF;
static const int32_t kShift16 = 16;
static const int32_t kShift8 = 24;
-} // anonymous
+static const int32_t kShiftMask8 = 0x7;
+static const int32_t kShiftMask16 = 0xF;
+static const int32_t kShiftMask32 = 0x1F;
+
+// Shift values are taken modulo lane size. This helper calculates the mask
+// required for different shift opcodes.
+int GetMaskForShift(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kI8x16Shl:
+ case IrOpcode::kI8x16ShrS:
+ case IrOpcode::kI8x16ShrU:
+ return kShiftMask8;
+ case IrOpcode::kI16x8Shl:
+ case IrOpcode::kI16x8ShrS:
+ case IrOpcode::kI16x8ShrU:
+ return kShiftMask16;
+ case IrOpcode::kI32x4Shl:
+ case IrOpcode::kI32x4ShrS:
+ case IrOpcode::kI32x4ShrU:
+ return kShiftMask32;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+} // anonymous namespace
SimdScalarLowering::SimdScalarLowering(
MachineGraph* mcgraph, Signature<MachineRepresentation>* signature)
@@ -934,9 +958,8 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
}
void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
- DCHECK_EQ(1, node->InputCount());
- int32_t shift_amount = OpParameter<int32_t>(node->op());
- Node* shift_node = graph()->NewNode(common()->Int32Constant(shift_amount));
+ DCHECK_EQ(2, node->InputCount());
+ Node* shift_node = Mask(node->InputAt(1), GetMaskForShift(node));
Node** rep = GetReplacementsWithType(node->InputAt(0), type);
int num_lanes = NumLanes(type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1612,8 +1635,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kS1x16AllTrue: {
DCHECK_EQ(1, node->InputCount());
SimdType input_rep_type = ReplacementType(node->InputAt(0));
+ Node** rep;
+ // If the input is a SIMD float, bitcast it to a SIMD int of the same
+ // shape, because the comparisons below use Word32.
+ if (input_rep_type == SimdType::kFloat32x4) {
+ // TODO(v8:9418): f64x2 lowering is not implemented yet.
+ rep = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32x4);
+ } else {
+ rep = GetReplacements(node->InputAt(0));
+ }
int input_num_lanes = NumLanes(input_rep_type);
- Node** rep = GetReplacements(node->InputAt(0));
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
Node* true_node = mcgraph_->Int32Constant(1);
Node* false_node = mcgraph_->Int32Constant(0);
@@ -1810,6 +1841,9 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Int32ToFloat32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
UNIMPLEMENTED();
+ } else if (ReplacementType(node) == SimdType::kInt8x16) {
+ SmallerIntToInt32<int8_t>(replacements, result);
+ Int32ToFloat32(result, result);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index aa222af96c..d00acefc39 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -251,6 +251,7 @@ class RepresentationSelector {
void reset_state() { state_ = kUnvisited; }
bool visited() const { return state_ == kVisited; }
bool queued() const { return state_ == kQueued; }
+ bool pushed() const { return state_ == kPushed; }
bool unvisited() const { return state_ == kUnvisited; }
Truncation truncation() const { return truncation_; }
void set_output(MachineRepresentation output) { representation_ = output; }
@@ -284,6 +285,7 @@ class RepresentationSelector {
TickCounter* tick_counter)
: jsgraph_(jsgraph),
zone_(zone),
+ might_need_revisit_(zone),
count_(jsgraph->graph()->NodeCount()),
info_(count_, zone),
#ifdef DEBUG
@@ -291,7 +293,6 @@ class RepresentationSelector {
#endif
nodes_(zone),
replacements_(zone),
- phase_(PROPAGATE),
changer_(changer),
queue_(zone),
typing_stack_(zone),
@@ -306,7 +307,6 @@ class RepresentationSelector {
void RunTypePropagationPhase() {
// Run type propagation.
TRACE("--{Type propagation phase}--\n");
- phase_ = RETYPE;
ResetNodeInfoState();
DCHECK(typing_stack_.empty());
@@ -326,6 +326,11 @@ class RepresentationSelector {
typing_stack_.push({input, 0});
pushed_unvisited = true;
break;
+ } else if (input_info->pushed()) {
+ // If we had already pushed (and not visited) an input, it means that
+ // the current node will be visited before one of its inputs. If this
+ // happens, the current node might need to be revisited.
+ MarkAsPossibleRevisit(current.node, input);
}
}
if (pushed_unvisited) continue;
@@ -337,13 +342,17 @@ class RepresentationSelector {
info->set_visited();
bool updated = UpdateFeedbackType(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode(node, info->truncation(), nullptr);
+ VisitNode<RETYPE>(node, info->truncation(), nullptr);
TRACE(" ==> output ");
PrintOutputInfo(info);
TRACE("\n");
if (updated) {
- for (Node* const user : node->uses()) {
+ auto it = might_need_revisit_.find(node);
+ if (it == might_need_revisit_.end()) continue;
+
+ for (Node* const user : it->second) {
if (GetInfo(user)->visited()) {
+ TRACE(" QUEUEING #%d: %s\n", user->id(), user->op()->mnemonic());
GetInfo(user)->set_queued();
queue_.push(user);
}
@@ -358,14 +367,18 @@ class RepresentationSelector {
NodeInfo* info = GetInfo(node);
info->set_visited();
bool updated = UpdateFeedbackType(node);
- TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode(node, info->truncation(), nullptr);
+ TRACE(" revisit #%d: %s\n", node->id(), node->op()->mnemonic());
+ VisitNode<RETYPE>(node, info->truncation(), nullptr);
TRACE(" ==> output ");
PrintOutputInfo(info);
TRACE("\n");
if (updated) {
+ // Here we need to check all uses since we can't easily know which nodes
+ // will need to be revisited due to having an input which was a
+ // revisited node.
for (Node* const user : node->uses()) {
if (GetInfo(user)->visited()) {
+ TRACE(" QUEUEING #%d: %s\n", user->id(), user->op()->mnemonic());
GetInfo(user)->set_queued();
queue_.push(user);
}
@@ -595,7 +608,6 @@ class RepresentationSelector {
void RunTruncationPropagationPhase() {
// Run propagation phase to a fixpoint.
TRACE("--{Propagation phase}--\n");
- phase_ = PROPAGATE;
EnqueueInitial(jsgraph_->graph()->end());
// Process nodes from the queue until it is empty.
while (!queue_.empty()) {
@@ -605,7 +617,7 @@ class RepresentationSelector {
info->set_visited();
TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
info->truncation().description());
- VisitNode(node, info->truncation(), nullptr);
+ VisitNode<PROPAGATE>(node, info->truncation(), nullptr);
}
}
@@ -616,7 +628,6 @@ class RepresentationSelector {
// Run lowering and change insertion phase.
TRACE("--{Simplified lowering phase}--\n");
- phase_ = LOWER;
// Process nodes from the collected {nodes_} vector.
for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
Node* node = *i;
@@ -627,7 +638,7 @@ class RepresentationSelector {
source_positions_, source_positions_->GetSourcePosition(node));
NodeOriginTable::Scope origin_scope(node_origins_, "simplified lowering",
node);
- VisitNode(node, info->truncation(), lowering);
+ VisitNode<LOWER>(node, info->truncation(), lowering);
}
// Perform the final replacements.
@@ -652,68 +663,34 @@ class RepresentationSelector {
queue_.push(node);
}
- // Enqueue {use_node}'s {index} input if the {use} contains new information
- // for that input node. Add the input to {nodes_} if this is the first time
- // it's been visited.
+ // Just assert for Retype and Lower. Propagate specialized below.
+ template <Phase T>
void EnqueueInput(Node* use_node, int index,
UseInfo use_info = UseInfo::None()) {
- Node* node = use_node->InputAt(index);
- if (phase_ != PROPAGATE) return;
- NodeInfo* info = GetInfo(node);
-#ifdef DEBUG
- // Check monotonicity of input requirements.
- node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
- use_info);
-#endif // DEBUG
- if (info->unvisited()) {
- // First visit of this node.
- info->set_queued();
- nodes_.push_back(node);
- queue_.push(node);
- TRACE(" initial #%i: ", node->id());
- info->AddUse(use_info);
- PrintTruncation(info->truncation());
- return;
- }
- TRACE(" queue #%i?: ", node->id());
- PrintTruncation(info->truncation());
- if (info->AddUse(use_info)) {
- // New usage information for the node is available.
- if (!info->queued()) {
- queue_.push(node);
- info->set_queued();
- TRACE(" added: ");
- } else {
- TRACE(" inqueue: ");
- }
- PrintTruncation(info->truncation());
- }
+ static_assert(retype<T>() || lower<T>(),
+ "This version of ProcessRemainingInputs has to be called in "
+ "the Retype or Lower phase.");
}
- bool lower() const { return phase_ == LOWER; }
- bool retype() const { return phase_ == RETYPE; }
- bool propagate() const { return phase_ == PROPAGATE; }
+ template <Phase T>
+ static constexpr bool propagate() {
+ return T == PROPAGATE;
+ }
- void SetOutput(Node* node, MachineRepresentation representation,
- Type restriction_type = Type::Any()) {
- NodeInfo* const info = GetInfo(node);
- switch (phase_) {
- case PROPAGATE:
- info->set_restriction_type(restriction_type);
- break;
- case RETYPE:
- DCHECK(info->restriction_type().Is(restriction_type));
- DCHECK(restriction_type.Is(info->restriction_type()));
- info->set_output(representation);
- break;
- case LOWER:
- DCHECK_EQ(info->representation(), representation);
- DCHECK(info->restriction_type().Is(restriction_type));
- DCHECK(restriction_type.Is(info->restriction_type()));
- break;
- }
+ template <Phase T>
+ static constexpr bool retype() {
+ return T == RETYPE;
+ }
+
+ template <Phase T>
+ static constexpr bool lower() {
+ return T == LOWER;
}
+ template <Phase T>
+ void SetOutput(Node* node, MachineRepresentation representation,
+ Type restriction_type = Type::Any());
+
Type GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
bool InputCannotBe(Node* node, Type type) {
@@ -842,148 +819,147 @@ class RepresentationSelector {
}
}
- void ProcessInput(Node* node, int index, UseInfo use) {
- DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
- !node->op()->HasProperty(Operator::kNoDeopt) &&
- node->op()->EffectInputCount() > 0);
-
- switch (phase_) {
- case PROPAGATE:
- EnqueueInput(node, index, use);
- break;
- case RETYPE:
- break;
- case LOWER:
- ConvertInput(node, index, use);
- break;
- }
- }
+ template <Phase T>
+ void ProcessInput(Node* node, int index, UseInfo use);
+ // Just assert for Retype and Lower. Propagate specialized below.
+ template <Phase T>
void ProcessRemainingInputs(Node* node, int index) {
+ static_assert(retype<T>() || lower<T>(),
+ "This version of ProcessRemainingInputs has to be called in "
+ "the Retype or Lower phase.");
DCHECK_GE(index, NodeProperties::PastValueIndex(node));
DCHECK_GE(index, NodeProperties::PastContextIndex(node));
- for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
- i < NodeProperties::PastEffectIndex(node); ++i) {
- EnqueueInput(node, i); // Effect inputs: just visit
- }
- for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
- i < NodeProperties::PastControlIndex(node); ++i) {
- EnqueueInput(node, i); // Control inputs: just visit
+ }
+
+ // Marks node as a possible revisit since it is a use of input that will be
+ // visited before input is visited.
+ void MarkAsPossibleRevisit(Node* node, Node* input) {
+ auto it = might_need_revisit_.find(input);
+ if (it == might_need_revisit_.end()) {
+ it = might_need_revisit_.insert({input, ZoneVector<Node*>(zone())}).first;
}
+ it->second.push_back(node);
+ TRACE(" Marking #%d: %s as needing revisit due to #%d: %s\n", node->id(),
+ node->op()->mnemonic(), input->id(), input->op()->mnemonic());
}
- // The default, most general visitation case. For {node}, process all value,
- // context, frame state, effect, and control inputs, assuming that value
- // inputs should have {kRepTagged} representation and can observe all output
- // values {kTypeAny}.
+ // Just assert for Retype. Propagate and Lower specialized below.
+ template <Phase T>
void VisitInputs(Node* node) {
- int tagged_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
- // Visit value, context and frame state inputs as tagged.
- for (int i = 0; i < tagged_count; i++) {
- ProcessInput(node, i, UseInfo::AnyTagged());
- }
- // Only enqueue other inputs (effects, control).
- for (int i = tagged_count; i < node->InputCount(); i++) {
- EnqueueInput(node, i);
- }
+ static_assert(
+ retype<T>(),
+ "This version of VisitInputs has to be called in the Retype phase.");
}
+ template <Phase T>
void VisitReturn(Node* node) {
int tagged_limit = node->op()->ValueInputCount() +
OperatorProperties::GetContextInputCount(node->op()) +
OperatorProperties::GetFrameStateInputCount(node->op());
// Visit integer slot count to pop
- ProcessInput(node, 0, UseInfo::TruncatingWord32());
+ ProcessInput<T>(node, 0, UseInfo::TruncatingWord32());
// Visit value, context and frame state inputs as tagged.
for (int i = 1; i < tagged_limit; i++) {
- ProcessInput(node, i, UseInfo::AnyTagged());
+ ProcessInput<T>(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (effects, control).
for (int i = tagged_limit; i < node->InputCount(); i++) {
- EnqueueInput(node, i);
+ EnqueueInput<T>(node, i);
}
}
// Helper for an unused node.
+ template <Phase T>
void VisitUnused(Node* node) {
int value_count = node->op()->ValueInputCount() +
OperatorProperties::GetContextInputCount(node->op()) +
OperatorProperties::GetFrameStateInputCount(node->op());
for (int i = 0; i < value_count; i++) {
- ProcessInput(node, i, UseInfo::None());
+ ProcessInput<T>(node, i, UseInfo::None());
}
- ProcessRemainingInputs(node, value_count);
- if (lower()) Kill(node);
+ ProcessRemainingInputs<T>(node, value_count);
+ if (lower<T>()) Kill(node);
}
// Helper for no-op node.
+ template <Phase T>
void VisitNoop(Node* node, Truncation truncation) {
- if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
MachineRepresentation representation =
GetOutputInfoForPhi(node, TypeOf(node), truncation);
- VisitUnop(node, UseInfo(representation, truncation), representation);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo(representation, truncation), representation);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
}
// Helper for binops of the R x L -> O variety.
+ template <Phase T>
void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
MachineRepresentation output,
Type restriction_type = Type::Any()) {
DCHECK_EQ(2, node->op()->ValueInputCount());
- ProcessInput(node, 0, left_use);
- ProcessInput(node, 1, right_use);
+ ProcessInput<T>(node, 0, left_use);
+ ProcessInput<T>(node, 1, right_use);
for (int i = 2; i < node->InputCount(); i++) {
- EnqueueInput(node, i);
+ EnqueueInput<T>(node, i);
}
- SetOutput(node, output, restriction_type);
+ SetOutput<T>(node, output, restriction_type);
}
// Helper for binops of the I x I -> O variety.
+ template <Phase T>
void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output,
Type restriction_type = Type::Any()) {
- VisitBinop(node, input_use, input_use, output, restriction_type);
+ VisitBinop<T>(node, input_use, input_use, output, restriction_type);
}
+ template <Phase T>
void VisitSpeculativeInt32Binop(Node* node) {
DCHECK_EQ(2, node->op()->ValueInputCount());
if (BothInputsAre(node, Type::NumberOrOddball())) {
- return VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ return VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
}
NumberOperationHint hint = NumberOperationHintOf(node->op());
- return VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32);
+ return VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32);
}
// Helper for unops of the I -> O variety.
+ template <Phase T>
void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output,
Type restriction_type = Type::Any()) {
DCHECK_EQ(1, node->op()->ValueInputCount());
- ProcessInput(node, 0, input_use);
- ProcessRemainingInputs(node, 1);
- SetOutput(node, output, restriction_type);
+ ProcessInput<T>(node, 0, input_use);
+ ProcessRemainingInputs<T>(node, 1);
+ SetOutput<T>(node, output, restriction_type);
}
// Helper for leaf nodes.
+ template <Phase T>
void VisitLeaf(Node* node, MachineRepresentation output) {
DCHECK_EQ(0, node->InputCount());
- SetOutput(node, output);
+ SetOutput<T>(node, output);
}
// Helpers for specific types of binops.
+
+ template <Phase T>
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
+ VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
}
+
+ template <Phase T>
void VisitInt64Binop(Node* node) {
- VisitBinop(node, UseInfo::Word64(), MachineRepresentation::kWord64);
+ VisitBinop<T>(node, UseInfo::Word64(), MachineRepresentation::kWord64);
}
+
+ template <Phase T>
void VisitWord32TruncatingBinop(Node* node) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
}
// Infer representation for phi-like nodes.
@@ -1014,23 +990,25 @@ class RepresentationSelector {
return MachineRepresentation::kFloat64;
} else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) {
return MachineRepresentation::kWord64;
- } else if (type.Is(Type::ExternalPointer())) {
+ } else if (type.Is(Type::ExternalPointer()) ||
+ type.Is(Type::SandboxedExternalPointer())) {
return MachineType::PointerRepresentation();
}
return MachineRepresentation::kTagged;
}
// Helper for handling selects.
+ template <Phase T>
void VisitSelect(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
- ProcessInput(node, 0, UseInfo::Bool());
+ ProcessInput<T>(node, 0, UseInfo::Bool());
MachineRepresentation output =
GetOutputInfoForPhi(node, TypeOf(node), truncation);
- SetOutput(node, output);
+ SetOutput<T>(node, output);
- if (lower()) {
+ if (lower<T>()) {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
if (output != p.representation()) {
@@ -1041,21 +1019,22 @@ class RepresentationSelector {
// Convert inputs to the output representation of this phi, pass the
// truncation truncation along.
UseInfo input_use(output, truncation);
- ProcessInput(node, 1, input_use);
- ProcessInput(node, 2, input_use);
+ ProcessInput<T>(node, 1, input_use);
+ ProcessInput<T>(node, 2, input_use);
}
// Helper for handling phis.
+ template <Phase T>
void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
MachineRepresentation output =
GetOutputInfoForPhi(node, TypeOf(node), truncation);
// Only set the output representation if not running with type
// feedback. (Feedback typing will set the representation.)
- SetOutput(node, output);
+ SetOutput<T>(node, output);
int values = node->op()->ValueInputCount();
- if (lower()) {
+ if (lower<T>()) {
// Update the phi operator.
if (output != PhiRepresentationOf(node->op())) {
NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values));
@@ -1066,37 +1045,40 @@ class RepresentationSelector {
// truncation along.
UseInfo input_use(output, truncation);
for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, i < values ? input_use : UseInfo::None());
+ ProcessInput<T>(node, i, i < values ? input_use : UseInfo::None());
}
}
+ template <Phase T>
void VisitObjectIs(Node* node, Type type, SimplifiedLowering* lowering) {
Type const input_type = TypeOf(node->InputAt(0));
if (input_type.Is(type)) {
- VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
- if (lower() && !input_type.Maybe(type)) {
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ if (lower<T>() && !input_type.Maybe(type)) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
}
}
+ template <Phase T>
void VisitCheck(Node* node, Type type, SimplifiedLowering* lowering) {
if (InputIs(node, type)) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node,
- UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node,
+ UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
+ MachineRepresentation::kTaggedPointer);
}
}
+ template <Phase T>
void VisitCall(Node* node, SimplifiedLowering* lowering) {
auto call_descriptor = CallDescriptorOf(node->op());
int params = static_cast<int>(call_descriptor->ParameterCount());
@@ -1105,21 +1087,21 @@ class RepresentationSelector {
for (int i = 0; i < value_input_count; i++) {
if (i == 0) {
// The target of the call.
- ProcessInput(node, i, UseInfo::Any());
+ ProcessInput<T>(node, i, UseInfo::Any());
} else if ((i - 1) < params) {
- ProcessInput(node, i,
- TruncatingUseInfoFromRepresentation(
- call_descriptor->GetInputType(i).representation()));
+ ProcessInput<T>(node, i,
+ TruncatingUseInfoFromRepresentation(
+ call_descriptor->GetInputType(i).representation()));
} else {
- ProcessInput(node, i, UseInfo::AnyTagged());
+ ProcessInput<T>(node, i, UseInfo::AnyTagged());
}
}
- ProcessRemainingInputs(node, value_input_count);
+ ProcessRemainingInputs<T>(node, value_input_count);
if (call_descriptor->ReturnCount() > 0) {
- SetOutput(node, call_descriptor->GetReturnType(0).representation());
+ SetOutput<T>(node, call_descriptor->GetReturnType(0).representation());
} else {
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
}
}
@@ -1168,8 +1150,9 @@ class RepresentationSelector {
return machine_type;
}
+ template <Phase T>
void VisitStateValues(Node* node) {
- if (propagate()) {
+ if (propagate<T>()) {
for (int i = 0; i < node->InputCount(); i++) {
// When lowering 64 bit BigInts to Word64 representation, we have to
// make sure they are rematerialized before deoptimization. By
@@ -1178,12 +1161,12 @@ class RepresentationSelector {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) {
- EnqueueInput(node, i, UseInfo::AnyTagged());
+ EnqueueInput<T>(node, i, UseInfo::AnyTagged());
} else {
- EnqueueInput(node, i, UseInfo::Any());
+ EnqueueInput<T>(node, i, UseInfo::Any());
}
}
- } else if (lower()) {
+ } else if (lower<T>()) {
Zone* zone = jsgraph_->zone();
ZoneVector<MachineType>* types =
new (zone->New(sizeof(ZoneVector<MachineType>)))
@@ -1203,29 +1186,30 @@ class RepresentationSelector {
NodeProperties::ChangeOp(
node, jsgraph_->common()->TypedStateValues(types, mask));
}
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
}
+ template <Phase T>
void VisitFrameState(Node* node) {
DCHECK_EQ(5, node->op()->ValueInputCount());
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // Parameters.
- ProcessInput(node, 1, UseInfo::AnyTagged()); // Registers.
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // Parameters.
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // Registers.
// Accumulator is a special flower - we need to remember its type in
// a singleton typed-state-values node (as if it was a singleton
// state-values node).
Node* accumulator = node->InputAt(2);
- if (propagate()) {
+ if (propagate<T>()) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(accumulator).Is(Type::BigInt())) {
- EnqueueInput(node, 2, UseInfo::AnyTagged());
+ EnqueueInput<T>(node, 2, UseInfo::AnyTagged());
} else {
- EnqueueInput(node, 2, UseInfo::Any());
+ EnqueueInput<T>(node, 2, UseInfo::Any());
}
- } else if (lower()) {
+ } else if (lower<T>()) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(accumulator).Is(Type::BigInt())) {
@@ -1248,24 +1232,25 @@ class RepresentationSelector {
}
}
- ProcessInput(node, 3, UseInfo::AnyTagged()); // Context.
- ProcessInput(node, 4, UseInfo::AnyTagged()); // Closure.
- ProcessInput(node, 5, UseInfo::AnyTagged()); // Outer frame state.
- return SetOutput(node, MachineRepresentation::kTagged);
+ ProcessInput<T>(node, 3, UseInfo::AnyTagged()); // Context.
+ ProcessInput<T>(node, 4, UseInfo::AnyTagged()); // Closure.
+ ProcessInput<T>(node, 5, UseInfo::AnyTagged()); // Outer frame state.
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
}
+ template <Phase T>
void VisitObjectState(Node* node) {
- if (propagate()) {
+ if (propagate<T>()) {
for (int i = 0; i < node->InputCount(); i++) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) {
- EnqueueInput(node, i, UseInfo::AnyTagged());
+ EnqueueInput<T>(node, i, UseInfo::AnyTagged());
} else {
- EnqueueInput(node, i, UseInfo::Any());
+ EnqueueInput<T>(node, i, UseInfo::Any());
}
}
- } else if (lower()) {
+ } else if (lower<T>()) {
Zone* zone = jsgraph_->zone();
ZoneVector<MachineType>* types =
new (zone->New(sizeof(ZoneVector<MachineType>)))
@@ -1283,7 +1268,7 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, jsgraph_->common()->TypedObjectState(
ObjectIdOf(node->op()), types));
}
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
}
const Operator* Int32Op(Node* node) {
@@ -1402,6 +1387,7 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, Uint32OverflowOp(node));
}
+ template <Phase T>
void VisitSpeculativeIntegerAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
Type left_upper = GetUpperBound(node->InputAt(0));
@@ -1411,7 +1397,7 @@ class RepresentationSelector {
right_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero)) {
// Only eliminate the node if its typing rule can be satisfied, namely
// that a safe integer is produced.
- if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
// If we know how to interpret the result or if the users only care
// about the low 32-bits, we can truncate to Word32 do a wrapping
@@ -1420,8 +1406,8 @@ class RepresentationSelector {
GetUpperBound(node).Is(Type::Unsigned32()) ||
truncation.IsUsedAsWord32()) {
// => Int32Add/Sub
- VisitWord32TruncatingBinop(node);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
return;
}
}
@@ -1445,8 +1431,8 @@ class RepresentationSelector {
if (left_upper.Is(left_constraint_type) &&
right_upper.Is(Type::Signed32OrMinusZero()) &&
(left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
} else {
// If the output's truncation is identify-zeros, we can pass it
// along. Moreover, if the operation is addition and we know the
@@ -1465,10 +1451,10 @@ class RepresentationSelector {
// potentially guarded by a check.
UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
kIdentifyZeros);
- VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
- Type::Signed32());
+ VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32,
+ Type::Signed32());
}
- if (lower()) {
+ if (lower<T>()) {
if (truncation.IsUsedAsWord32() ||
!CanOverflowSigned32(node->op(), left_feedback_type,
right_feedback_type, graph_zone())) {
@@ -1481,6 +1467,7 @@ class RepresentationSelector {
return;
}
+ template <Phase T>
void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
if (BothInputsAre(node, type_cache_->kAdditiveSafeIntegerOrMinusZero) &&
@@ -1488,38 +1475,39 @@ class RepresentationSelector {
GetUpperBound(node).Is(Type::Unsigned32()) ||
truncation.IsUsedAsWord32())) {
// => Int32Add/Sub
- VisitWord32TruncatingBinop(node);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
return;
}
// default case => Float64Add/Sub
- VisitBinop(node,
- UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
- FeedbackSource()),
- MachineRepresentation::kFloat64, Type::Number());
- if (lower()) {
+ VisitBinop<T>(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
+ FeedbackSource()),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower<T>()) {
ChangeToPureOp(node, Float64Op(node));
}
return;
}
+ template <Phase T>
void VisitSpeculativeNumberModulus(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
// => unsigned Uint32Mod
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node));
return;
}
if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node).Is(Type::Signed32()))) {
// => signed Int32Mod
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
return;
}
@@ -1531,9 +1519,9 @@ class RepresentationSelector {
if (BothInputsAreUnsigned32(node)) {
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Unsigned32());
- if (lower()) ChangeToUint32OverflowOp(node);
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower<T>()) ChangeToUint32OverflowOp(node);
return;
}
}
@@ -1544,9 +1532,9 @@ class RepresentationSelector {
// If both the inputs the feedback are int32, use the overflow op.
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) ChangeToInt32OverflowOp(node);
return;
}
}
@@ -1563,16 +1551,16 @@ class RepresentationSelector {
UseInfo const rhs_use = CheckedUseInfoAsWord32FromHint(
hint, FeedbackSource(), kIdentifyZeros);
if (truncation.IsUsedAsWord32()) {
- VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
} else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) {
- VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
- Type::Unsigned32());
- if (lower()) ChangeToUint32OverflowOp(node);
+ VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
+ Type::Unsigned32());
+ if (lower<T>()) ChangeToUint32OverflowOp(node);
} else {
- VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
- Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
+ VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32,
+ Type::Signed32());
+ if (lower<T>()) ChangeToInt32OverflowOp(node);
}
return;
}
@@ -1581,18 +1569,18 @@ class RepresentationSelector {
TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node).Is(Type::Unsigned32()))) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Number());
- if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Number());
+ if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node));
return;
}
if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
TypeOf(node->InputAt(1)).Is(Type::Signed32()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node).Is(Type::Signed32()))) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Number());
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Number());
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
return;
}
@@ -1605,60 +1593,42 @@ class RepresentationSelector {
truncation.identify_zeros(), FeedbackSource());
UseInfo const rhs_use = UseInfo::CheckedNumberOrOddballAsFloat64(
kIdentifyZeros, FeedbackSource());
- VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kFloat64,
- Type::Number());
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64,
+ Type::Number());
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
+ // Just assert for Propagate and Retype. Lower specialized below.
+ template <Phase T>
void InsertUnreachableIfNecessary(Node* node) {
- DCHECK(lower());
- // If the node is effectful and it produces an impossible value, then we
- // insert Unreachable node after it.
- if (node->op()->ValueOutputCount() > 0 &&
- node->op()->EffectOutputCount() > 0 &&
- node->opcode() != IrOpcode::kUnreachable && TypeOf(node).IsNone()) {
- Node* control =
- (node->op()->ControlOutputCount() == 0)
- ? NodeProperties::GetControlInput(node, 0)
- : NodeProperties::FindSuccessfulControlProjection(node);
-
- Node* unreachable =
- graph()->NewNode(common()->Unreachable(), node, control);
-
- // Insert unreachable node and replace all the effect uses of the {node}
- // with the new unreachable node.
- for (Edge edge : node->use_edges()) {
- if (!NodeProperties::IsEffectEdge(edge)) continue;
- // Make sure to not overwrite the unreachable node's input. That would
- // create a cycle.
- if (edge.from() == unreachable) continue;
- // Avoid messing up the exceptional path.
- if (edge.from()->opcode() == IrOpcode::kIfException) {
- DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
- DCHECK_EQ(NodeProperties::GetControlInput(edge.from()), node);
- continue;
- }
-
- edge.UpdateTo(unreachable);
- }
- }
+ static_assert(propagate<T>() || retype<T>(),
+ "This version of InsertUnreachableIfNecessary has to be "
+ "called in the Propagate or Retype phase.");
}
+ template <Phase T>
void VisitCheckBounds(Node* node, SimplifiedLowering* lowering) {
CheckBoundsParameters const& p = CheckBoundsParametersOf(node->op());
FeedbackSource const& feedback = p.check_parameters().feedback();
Type const index_type = TypeOf(node->InputAt(0));
Type const length_type = TypeOf(node->InputAt(1));
+
+ // Conversions, if requested and needed, will be handled by the
+ // representation changer, not by the lower-level Checked*Bounds operators.
+ CheckBoundsFlags new_flags =
+ p.flags().without(CheckBoundsFlag::kConvertStringAndMinusZero);
+
if (length_type.Is(Type::Unsigned31())) {
- if (index_type.Is(Type::Integral32OrMinusZero())) {
- // Map -0 to 0, and the values in the [-2^31,-1] range to the
- // [2^31,2^32-1] range, which will be considered out-of-bounds
- // as well, because the {length_type} is limited to Unsigned31.
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) {
- CheckBoundsParameters::Mode mode = p.mode();
+ if (index_type.Is(Type::Integral32()) ||
+ (index_type.Is(Type::Integral32OrMinusZero()) &&
+ p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero)) {
+ // Map the values in the [-2^31,-1] range to the [2^31,2^32-1] range,
+ // which will be considered out-of-bounds because the {length_type} is
+ // limited to Unsigned31. This also converts -0 to 0.
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
if (lowering->poisoning_level_ ==
PoisoningMitigationLevel::kDontPoison &&
(index_type.IsNone() || length_type.IsNone() ||
@@ -1666,32 +1636,45 @@ class RepresentationSelector {
index_type.Max() < length_type.Min()))) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
- mode = CheckBoundsParameters::kAbortOnOutOfBounds;
+ // TODO(neis): Move this into TypedOptimization?
+ new_flags |= CheckBoundsFlag::kAbortOnOutOfBounds;
}
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, mode));
+ node, simplified()->CheckedUint32Bounds(feedback, new_flags));
}
- } else {
- VisitBinop(node, UseInfo::CheckedTaggedAsArrayIndex(feedback),
- UseInfo::Word(), MachineType::PointerRepresentation());
- if (lower()) {
+ } else if (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) {
+ VisitBinop<T>(node, UseInfo::CheckedTaggedAsArrayIndex(feedback),
+ UseInfo::Word(), MachineType::PointerRepresentation());
+ if (lower<T>()) {
if (jsgraph_->machine()->Is64()) {
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(feedback, p.mode()));
+ node, simplified()->CheckedUint64Bounds(feedback, new_flags));
} else {
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, p.mode()));
+ node, simplified()->CheckedUint32Bounds(feedback, new_flags));
}
}
+ } else {
+ VisitBinop<T>(
+ node, UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, feedback),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+ if (lower<T>()) {
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ }
}
} else {
CHECK(length_type.Is(type_cache_->kPositiveSafeInteger));
- VisitBinop(node,
- UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback),
- UseInfo::Word64(), MachineRepresentation::kWord64);
- if (lower()) {
+ IdentifyZeros zero_handling =
+ (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero)
+ ? kIdentifyZeros
+ : kDistinguishZeros;
+ VisitBinop<T>(node,
+ UseInfo::CheckedSigned64AsWord64(zero_handling, feedback),
+ UseInfo::Word64(), MachineRepresentation::kWord64);
+ if (lower<T>()) {
NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(feedback, p.mode()));
+ node, simplified()->CheckedUint64Bounds(feedback, new_flags));
}
}
}
@@ -1748,6 +1731,7 @@ class RepresentationSelector {
static constexpr int kInitialArgumentsCount = 10;
+ template <Phase T>
void VisitFastApiCall(Node* node) {
FastApiCallParameters const& params = FastApiCallParametersOf(node->op());
const CFunctionInfo* c_signature = params.signature();
@@ -1758,19 +1742,19 @@ class RepresentationSelector {
base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info(
c_arg_count);
- ProcessInput(node, 0, UseInfo::Word());
+ ProcessInput<T>(node, 0, UseInfo::Word());
// Propagate representation information from TypeInfo.
for (int i = 0; i < c_arg_count; i++) {
arg_use_info[i] = UseInfoForFastApiCallArgument(
c_signature->ArgumentInfo(i).GetType(), params.feedback());
- ProcessInput(node, i + 1, arg_use_info[i]);
+ ProcessInput<T>(node, i + 1, arg_use_info[i]);
}
MachineType return_type =
MachineTypeFor(c_signature->ReturnInfo().GetType());
- SetOutput(node, return_type.representation());
+ SetOutput<T>(node, return_type.representation());
- if (lower()) {
+ if (lower<T>()) {
MachineSignature::Builder builder(graph()->zone(), 1, c_arg_count);
builder.AddReturn(return_type);
for (int i = 0; i < c_arg_count; ++i) {
@@ -1794,6 +1778,7 @@ class RepresentationSelector {
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
+ template <Phase T>
void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
tick_counter_->DoTick();
@@ -1809,10 +1794,10 @@ class RepresentationSelector {
// Dead as well.
if (node->op()->ValueInputCount() > 0 &&
node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) {
- return VisitUnused(node);
+ return VisitUnused<T>(node);
}
- if (lower()) InsertUnreachableIfNecessary(node);
+ if (lower<T>()) InsertUnreachableIfNecessary<T>(node);
switch (node->opcode()) {
//------------------------------------------------------------------
@@ -1822,36 +1807,37 @@ class RepresentationSelector {
// We use Start as a terminator for the frame state chain, so even
// tho Start doesn't really produce a value, we have to say Tagged
// here, otherwise the input conversion will fail.
- return VisitLeaf(node, MachineRepresentation::kTagged);
+ return VisitLeaf<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kParameter:
// TODO(titzer): use representation from linkage.
- return VisitUnop(node, UseInfo::None(), MachineRepresentation::kTagged);
+ return VisitUnop<T>(node, UseInfo::None(),
+ MachineRepresentation::kTagged);
case IrOpcode::kInt32Constant:
- return VisitLeaf(node, MachineRepresentation::kWord32);
+ return VisitLeaf<T>(node, MachineRepresentation::kWord32);
case IrOpcode::kInt64Constant:
- return VisitLeaf(node, MachineRepresentation::kWord64);
+ return VisitLeaf<T>(node, MachineRepresentation::kWord64);
case IrOpcode::kExternalConstant:
- return VisitLeaf(node, MachineType::PointerRepresentation());
+ return VisitLeaf<T>(node, MachineType::PointerRepresentation());
case IrOpcode::kNumberConstant: {
double const value = OpParameter<double>(node->op());
int value_as_int;
if (DoubleToSmiInteger(value, &value_as_int)) {
- VisitLeaf(node, MachineRepresentation::kTaggedSigned);
- if (lower()) {
+ VisitLeaf<T>(node, MachineRepresentation::kTaggedSigned);
+ if (lower<T>()) {
intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int));
DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(smi));
}
return;
}
- VisitLeaf(node, MachineRepresentation::kTagged);
+ VisitLeaf<T>(node, MachineRepresentation::kTagged);
return;
}
case IrOpcode::kHeapConstant:
case IrOpcode::kDelayedStringConstant:
- return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
+ return VisitLeaf<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kPointerConstant: {
- VisitLeaf(node, MachineType::PointerRepresentation());
- if (lower()) {
+ VisitLeaf<T>(node, MachineType::PointerRepresentation());
+ if (lower<T>()) {
intptr_t const value = OpParameter<intptr_t>(node->op());
DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value));
}
@@ -1860,32 +1846,32 @@ class RepresentationSelector {
case IrOpcode::kBranch: {
DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
- ProcessInput(node, 0, UseInfo::Bool());
- EnqueueInput(node, NodeProperties::FirstControlIndex(node));
+ ProcessInput<T>(node, 0, UseInfo::Bool());
+ EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node));
return;
}
case IrOpcode::kSwitch:
- ProcessInput(node, 0, UseInfo::TruncatingWord32());
- EnqueueInput(node, NodeProperties::FirstControlIndex(node));
+ ProcessInput<T>(node, 0, UseInfo::TruncatingWord32());
+ EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node));
return;
case IrOpcode::kSelect:
- return VisitSelect(node, truncation, lowering);
+ return VisitSelect<T>(node, truncation, lowering);
case IrOpcode::kPhi:
- return VisitPhi(node, truncation, lowering);
+ return VisitPhi<T>(node, truncation, lowering);
case IrOpcode::kCall:
- return VisitCall(node, lowering);
+ return VisitCall<T>(node, lowering);
//------------------------------------------------------------------
// JavaScript operators.
//------------------------------------------------------------------
case IrOpcode::kToBoolean: {
if (truncation.IsUsedAsBool()) {
- ProcessInput(node, 0, UseInfo::Bool());
- SetOutput(node, MachineRepresentation::kBit);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ ProcessInput<T>(node, 0, UseInfo::Bool());
+ SetOutput<T>(node, MachineRepresentation::kBit);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitInputs(node);
- SetOutput(node, MachineRepresentation::kTaggedPointer);
+ VisitInputs<T>(node);
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
}
return;
}
@@ -1894,18 +1880,18 @@ class RepresentationSelector {
case IrOpcode::kJSToNumeric: {
DCHECK(NodeProperties::GetType(node).Is(Type::Union(
Type::BigInt(), Type::NumberOrOddball(), graph()->zone())));
- VisitInputs(node);
+ VisitInputs<T>(node);
// TODO(bmeurer): Optimize somewhat based on input type?
if (truncation.IsUsedAsWord32()) {
- SetOutput(node, MachineRepresentation::kWord32);
- if (lower())
+ SetOutput<T>(node, MachineRepresentation::kWord32);
+ if (lower<T>())
lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this);
} else if (truncation.TruncatesOddballAndBigIntToNumber()) {
- SetOutput(node, MachineRepresentation::kFloat64);
- if (lower())
+ SetOutput<T>(node, MachineRepresentation::kFloat64);
+ if (lower<T>())
lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this);
} else {
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
}
return;
}
@@ -1914,7 +1900,7 @@ class RepresentationSelector {
// Simplified operators.
//------------------------------------------------------------------
case IrOpcode::kBooleanNot: {
- if (lower()) {
+ if (lower<T>()) {
NodeInfo* input_info = GetInfo(node->InputAt(0));
if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
@@ -1930,8 +1916,8 @@ class RepresentationSelector {
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
- SetOutput(node, MachineRepresentation::kBit);
+ ProcessInput<T>(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput<T>(node, MachineRepresentation::kBit);
}
return;
}
@@ -1949,9 +1935,9 @@ class RepresentationSelector {
rhs_type.Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
OneInputCannotBe(node, type_cache_->kZeroish))) {
// => unsigned Int32Cmp
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
return;
}
if ((lhs_type.Is(Type::Signed32OrMinusZero()) &&
@@ -1960,15 +1946,15 @@ class RepresentationSelector {
rhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
OneInputCannotBe(node, type_cache_->kZeroish))) {
// => signed Int32Cmp
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
return;
}
// => Float64Cmp
- VisitBinop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
- MachineRepresentation::kBit);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
+ MachineRepresentation::kBit);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberLessThan:
@@ -1981,31 +1967,31 @@ class RepresentationSelector {
if (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
rhs_type.Is(Type::Unsigned32OrMinusZero())) {
// => unsigned Int32Cmp
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
rhs_type.Is(Type::Signed32OrMinusZero())) {
// => signed Int32Cmp
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Cmp
- VisitBinop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
- MachineRepresentation::kBit);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
+ MachineRepresentation::kBit);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kSpeculativeSafeIntegerAdd:
case IrOpcode::kSpeculativeSafeIntegerSubtract:
- return VisitSpeculativeIntegerAdditiveOp(node, truncation, lowering);
+ return VisitSpeculativeIntegerAdditiveOp<T>(node, truncation, lowering);
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
- return VisitSpeculativeAdditiveOp(node, truncation, lowering);
+ return VisitSpeculativeAdditiveOp<T>(node, truncation, lowering);
case IrOpcode::kSpeculativeNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
@@ -2018,16 +2004,16 @@ class RepresentationSelector {
if (lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
rhs_type.Is(Type::Unsigned32OrMinusZero())) {
// => unsigned Int32Cmp
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) ChangeToPureOp(node, Uint32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) ChangeToPureOp(node, Uint32Op(node));
return;
} else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
rhs_type.Is(Type::Signed32OrMinusZero())) {
// => signed Int32Cmp
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
return;
}
// Try to use type feedback.
@@ -2035,31 +2021,31 @@ class RepresentationSelector {
switch (hint) {
case NumberOperationHint::kSigned32:
case NumberOperationHint::kSignedSmall:
- if (propagate()) {
- VisitBinop(node,
- CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
- kIdentifyZeros),
- MachineRepresentation::kBit);
- } else if (retype()) {
- SetOutput(node, MachineRepresentation::kBit, Type::Any());
+ if (propagate<T>()) {
+ VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(
+ hint, FeedbackSource(), kIdentifyZeros),
+ MachineRepresentation::kBit);
+ } else if (retype<T>()) {
+ SetOutput<T>(node, MachineRepresentation::kBit, Type::Any());
} else {
- DCHECK(lower());
+ DCHECK(lower<T>());
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
if (IsNodeRepresentationTagged(lhs) &&
IsNodeRepresentationTagged(rhs)) {
- VisitBinop(node,
- UseInfo::CheckedSignedSmallAsTaggedSigned(
- FeedbackSource(), kIdentifyZeros),
- MachineRepresentation::kBit);
+ VisitBinop<T>(node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(
+ FeedbackSource(), kIdentifyZeros),
+ MachineRepresentation::kBit);
ChangeToPureOp(
node, changer_->TaggedSignedOperatorFor(node->opcode()));
} else {
- VisitBinop(node,
- CheckedUseInfoAsWord32FromHint(
- hint, FeedbackSource(), kIdentifyZeros),
- MachineRepresentation::kBit);
+ VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(
+ hint, FeedbackSource(), kIdentifyZeros),
+ MachineRepresentation::kBit);
ChangeToPureOp(node, Int32Op(node));
}
}
@@ -2074,11 +2060,11 @@ class RepresentationSelector {
DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
V8_FALLTHROUGH;
case NumberOperationHint::kNumber:
- VisitBinop(node,
- CheckedUseInfoAsFloat64FromHint(hint, FeedbackSource(),
- kIdentifyZeros),
- MachineRepresentation::kBit);
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitBinop<T>(node,
+ CheckedUseInfoAsFloat64FromHint(
+ hint, FeedbackSource(), kIdentifyZeros),
+ MachineRepresentation::kBit);
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
UNREACHABLE();
@@ -2095,18 +2081,18 @@ class RepresentationSelector {
TypeOf(node).Is(Type::Unsigned32()) ||
truncation.IsUsedAsWord32())) {
// => Int32Add/Sub
- VisitWord32TruncatingBinop(node);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
} else if (jsgraph_->machine()->Is64() &&
BothInputsAre(node, type_cache_->kSafeInteger) &&
GetUpperBound(node).Is(type_cache_->kSafeInteger)) {
// => Int64Add/Sub
- VisitInt64Binop(node);
- if (lower()) ChangeToPureOp(node, Int64Op(node));
+ VisitInt64Binop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Int64Op(node));
} else {
// => Float64Add/Sub
- VisitFloat64Binop(node);
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitFloat64Binop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
}
return;
}
@@ -2122,8 +2108,8 @@ class RepresentationSelector {
// (b) the output is known to be Unsigned32, or
// (c) the uses are truncating and the result is in the safe
// integer range.
- VisitWord32TruncatingBinop(node);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
return;
}
// Try to use type feedback.
@@ -2137,9 +2123,9 @@ class RepresentationSelector {
// If both inputs and feedback are int32, use the overflow op.
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) {
LowerToCheckedInt32Mul(node, truncation, input0_type,
input1_type);
}
@@ -2149,20 +2135,20 @@ class RepresentationSelector {
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) {
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) {
LowerToCheckedInt32Mul(node, truncation, input0_type, input1_type);
}
return;
}
// Checked float64 x float64 => float64
- VisitBinop(node,
- UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
- FeedbackSource()),
- MachineRepresentation::kFloat64, Type::Number());
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitBinop<T>(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(
+ kDistinguishZeros, FeedbackSource()),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberMultiply: {
@@ -2177,33 +2163,33 @@ class RepresentationSelector {
// (b) the output is known to be Unsigned32, or
// (c) the uses are truncating and the result is in the safe
// integer range.
- VisitWord32TruncatingBinop(node);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Int32Op(node));
return;
}
// Number x Number => Float64Mul
- VisitFloat64Binop(node);
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitFloat64Binop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kSpeculativeNumberDivide: {
if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
// => unsigned Uint32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node));
return;
}
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node).Is(Type::Signed32())) {
// => signed Int32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
return;
}
if (truncation.IsUsedAsWord32()) {
// => signed Int32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
return;
}
}
@@ -2216,9 +2202,9 @@ class RepresentationSelector {
if (BothInputsAreUnsigned32(node)) {
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Unsigned32());
- if (lower()) ChangeToUint32OverflowOp(node);
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower<T>()) ChangeToUint32OverflowOp(node);
return;
}
}
@@ -2229,9 +2215,9 @@ class RepresentationSelector {
// If both the inputs the feedback are int32, use the overflow op.
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) ChangeToInt32OverflowOp(node);
return;
}
}
@@ -2241,24 +2227,24 @@ class RepresentationSelector {
hint == NumberOperationHint::kSignedSmallInputs) {
// If the result is truncated, we only need to check the inputs.
if (truncation.IsUsedAsWord32()) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
return;
} else if (hint != NumberOperationHint::kSignedSmallInputs) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) ChangeToInt32OverflowOp(node);
return;
}
}
// default case => Float64Div
- VisitBinop(node,
- UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
- FeedbackSource()),
- MachineRepresentation::kFloat64, Type::Number());
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitBinop<T>(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(
+ kDistinguishZeros, FeedbackSource()),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberDivide: {
@@ -2267,8 +2253,8 @@ class RepresentationSelector {
(truncation.IsUsedAsWord32() ||
TypeOf(node).Is(Type::Unsigned32()))) {
// => unsigned Uint32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Uint32Div(node));
return;
}
if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) &&
@@ -2276,17 +2262,17 @@ class RepresentationSelector {
(truncation.IsUsedAsWord32() ||
TypeOf(node).Is(Type::Signed32()))) {
// => signed Int32Div
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Div(node));
return;
}
// Number x Number => Float64Div
- VisitFloat64Binop(node);
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitFloat64Binop<T>(node);
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kSpeculativeNumberModulus:
- return VisitSpeculativeNumberModulus(node, truncation, lowering);
+ return VisitSpeculativeNumberModulus<T>(node, truncation, lowering);
case IrOpcode::kNumberModulus: {
Type const lhs_type = TypeOf(node->InputAt(0));
Type const rhs_type = TypeOf(node->InputAt(1));
@@ -2295,8 +2281,8 @@ class RepresentationSelector {
(truncation.IsUsedAsWord32() ||
TypeOf(node).Is(Type::Unsigned32()))) {
// => unsigned Uint32Mod
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node));
return;
}
if ((lhs_type.Is(Type::Signed32OrMinusZeroOrNaN()) &&
@@ -2305,8 +2291,8 @@ class RepresentationSelector {
(truncation.IdentifiesZeroAndMinusZero() &&
TypeOf(node).Is(Type::Signed32OrMinusZero())))) {
// => signed Int32Mod
- VisitWord32TruncatingBinop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
return;
}
// => Float64Mod
@@ -2317,30 +2303,31 @@ class RepresentationSelector {
UseInfo const lhs_use =
UseInfo::TruncatingFloat64(truncation.identify_zeros());
UseInfo const rhs_use = UseInfo::TruncatingFloat64(kIdentifyZeros);
- VisitBinop(node, lhs_use, rhs_use, MachineRepresentation::kFloat64);
- if (lower()) ChangeToPureOp(node, Float64Op(node));
+ VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64);
+ if (lower<T>()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
case IrOpcode::kNumberBitwiseAnd: {
- VisitWord32TruncatingBinop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
return;
}
case IrOpcode::kSpeculativeNumberBitwiseOr:
case IrOpcode::kSpeculativeNumberBitwiseXor:
case IrOpcode::kSpeculativeNumberBitwiseAnd:
- VisitSpeculativeInt32Binop(node);
- if (lower()) {
+ VisitSpeculativeInt32Binop<T>(node);
+ if (lower<T>()) {
ChangeToPureOp(node, Int32Op(node));
}
return;
case IrOpcode::kNumberShiftLeft: {
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Shl());
}
@@ -2349,10 +2336,10 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberShiftLeft: {
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Shl());
}
@@ -2360,9 +2347,9 @@ class RepresentationSelector {
}
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) {
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Shl());
}
@@ -2370,9 +2357,10 @@ class RepresentationSelector {
}
case IrOpcode::kNumberShiftRight: {
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Sar());
}
@@ -2381,10 +2369,10 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberShiftRight: {
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Sar());
}
@@ -2392,9 +2380,9 @@ class RepresentationSelector {
}
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) {
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Sar());
}
@@ -2402,9 +2390,10 @@ class RepresentationSelector {
}
case IrOpcode::kNumberShiftRightLogical: {
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Shr());
}
@@ -2421,9 +2410,9 @@ class RepresentationSelector {
// have seen so far were of type Unsigned31. We speculate that this
// will continue to hold. Moreover, since the RHS is 0, the result
// will just be the (converted) LHS.
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Unsigned31());
- if (lower()) {
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Unsigned31());
+ if (lower<T>()) {
node->RemoveInput(1);
NodeProperties::ChangeOp(
node, simplified()->CheckedUint32ToInt32(FeedbackSource()));
@@ -2431,18 +2420,18 @@ class RepresentationSelector {
return;
}
if (BothInputsAre(node, Type::NumberOrOddball())) {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Shr());
}
return;
}
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Unsigned32());
- if (lower()) {
+ VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
ChangeToPureOp(node, lowering->machine()->Word32Shr());
}
@@ -2454,40 +2443,41 @@ class RepresentationSelector {
// choose to ignore minus zero in all cases.
Type const input_type = TypeOf(node->InputAt(0));
if (input_type.Is(Type::Unsigned32OrMinusZero())) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else if (input_type.Is(Type::Signed32OrMinusZero())) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Abs(node));
} else if (input_type.Is(type_cache_->kPositiveIntegerOrNaN)) {
- VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
- MachineRepresentation::kFloat64);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kNumberClz32: {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
return;
}
case IrOpcode::kNumberImul: {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
return;
}
case IrOpcode::kNumberFround: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat32);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat32);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberMax: {
@@ -2504,8 +2494,8 @@ class RepresentationSelector {
(lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
rhs_type.Is(Type::Unsigned32OrMinusZero()) &&
truncation.IdentifiesZeroAndMinusZero())) {
- VisitWord32TruncatingBinop(node);
- if (lower()) {
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) {
lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
@@ -2514,24 +2504,24 @@ class RepresentationSelector {
(lhs_type.Is(Type::Signed32OrMinusZero()) &&
rhs_type.Is(Type::Signed32OrMinusZero()) &&
truncation.IdentifiesZeroAndMinusZero())) {
- VisitWord32TruncatingBinop(node);
- if (lower()) {
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) {
lowering->DoMax(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
} else if (jsgraph_->machine()->Is64() &&
lhs_type.Is(type_cache_->kSafeInteger) &&
rhs_type.Is(type_cache_->kSafeInteger)) {
- VisitInt64Binop(node);
- if (lower()) {
+ VisitInt64Binop<T>(node);
+ if (lower<T>()) {
lowering->DoMax(node, lowering->machine()->Int64LessThan(),
MachineRepresentation::kWord64);
}
} else {
- VisitBinop(node,
- UseInfo::TruncatingFloat64(truncation.identify_zeros()),
- MachineRepresentation::kFloat64);
- if (lower()) {
+ VisitBinop<T>(node,
+ UseInfo::TruncatingFloat64(truncation.identify_zeros()),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) {
// If the right hand side is not NaN, and the left hand side
// is not NaN (or -0 if the difference between the zeros is
// observed), we can do a simple floating point comparison here.
@@ -2562,8 +2552,8 @@ class RepresentationSelector {
(lhs_type.Is(Type::Unsigned32OrMinusZero()) &&
rhs_type.Is(Type::Unsigned32OrMinusZero()) &&
truncation.IdentifiesZeroAndMinusZero())) {
- VisitWord32TruncatingBinop(node);
- if (lower()) {
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) {
lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
MachineRepresentation::kWord32);
}
@@ -2572,24 +2562,24 @@ class RepresentationSelector {
(lhs_type.Is(Type::Signed32OrMinusZero()) &&
rhs_type.Is(Type::Signed32OrMinusZero()) &&
truncation.IdentifiesZeroAndMinusZero())) {
- VisitWord32TruncatingBinop(node);
- if (lower()) {
+ VisitWord32TruncatingBinop<T>(node);
+ if (lower<T>()) {
lowering->DoMin(node, lowering->machine()->Int32LessThan(),
MachineRepresentation::kWord32);
}
} else if (jsgraph_->machine()->Is64() &&
lhs_type.Is(type_cache_->kSafeInteger) &&
rhs_type.Is(type_cache_->kSafeInteger)) {
- VisitInt64Binop(node);
- if (lower()) {
+ VisitInt64Binop<T>(node);
+ if (lower<T>()) {
lowering->DoMin(node, lowering->machine()->Int64LessThan(),
MachineRepresentation::kWord64);
}
} else {
- VisitBinop(node,
- UseInfo::TruncatingFloat64(truncation.identify_zeros()),
- MachineRepresentation::kFloat64);
- if (lower()) {
+ VisitBinop<T>(node,
+ UseInfo::TruncatingFloat64(truncation.identify_zeros()),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) {
// If the left hand side is not NaN, and the right hand side
// is not NaN (or -0 if the difference between the zeros is
// observed), we can do a simple floating point comparison here.
@@ -2609,9 +2599,9 @@ class RepresentationSelector {
}
case IrOpcode::kNumberAtan2:
case IrOpcode::kNumberPow: {
- VisitBinop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberCeil:
@@ -2623,9 +2613,10 @@ class RepresentationSelector {
// no-ops if we figure out (late) that their input is already an
// integer, NaN or -0.
Type const input_type = TypeOf(node->InputAt(0));
- VisitUnop(node, UseInfo::TruncatingFloat64(truncation.identify_zeros()),
- MachineRepresentation::kFloat64);
- if (lower()) {
+ VisitUnop<T>(node,
+ UseInfo::TruncatingFloat64(truncation.identify_zeros()),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) {
if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
DeferReplacement(node, node->InputAt(0));
} else if (node->opcode() == IrOpcode::kNumberRound) {
@@ -2638,16 +2629,16 @@ class RepresentationSelector {
}
case IrOpcode::kCheckBigInt: {
if (InputIs(node, Type::BigInt())) {
- VisitNoop(node, truncation);
+ VisitNoop<T>(node, truncation);
} else {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
}
return;
}
case IrOpcode::kBigIntAsUintN: {
- ProcessInput(node, 0, UseInfo::TruncatingWord64());
- SetOutput(node, MachineRepresentation::kWord64, Type::BigInt());
+ ProcessInput<T>(node, 0, UseInfo::TruncatingWord64());
+ SetOutput<T>(node, MachineRepresentation::kWord64, Type::BigInt());
return;
}
case IrOpcode::kNumberAcos:
@@ -2669,20 +2660,20 @@ class RepresentationSelector {
case IrOpcode::kNumberSinh:
case IrOpcode::kNumberTan:
case IrOpcode::kNumberTanh: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberSign: {
if (InputIs(node, Type::Signed32())) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, lowering->Int32Sign(node));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, lowering->Int32Sign(node));
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, lowering->Float64Sign(node));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) DeferReplacement(node, lowering->Float64Sign(node));
}
return;
}
@@ -2690,20 +2681,20 @@ class RepresentationSelector {
Type const input_type = TypeOf(node->InputAt(0));
if (input_type.Is(Type::OrderedNumber())) {
// No need to silence anything if the input cannot be NaN.
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kNumberSqrt: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberToBoolean: {
@@ -2714,67 +2705,67 @@ class RepresentationSelector {
if (input_type.Is(Type::Integral32OrMinusZeroOrNaN())) {
// 0, -0 and NaN all map to false, so we can safely truncate
// all of them to zero here.
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kBit);
- if (lower()) lowering->DoIntegral32ToBit(node);
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) lowering->DoIntegral32ToBit(node);
} else if (input_type.Is(Type::OrderedNumber())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
- MachineRepresentation::kBit);
- if (lower()) lowering->DoOrderedNumberToBit(node);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
+ MachineRepresentation::kBit);
+ if (lower<T>()) lowering->DoOrderedNumberToBit(node);
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
- MachineRepresentation::kBit);
- if (lower()) lowering->DoNumberToBit(node);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
+ MachineRepresentation::kBit);
+ if (lower<T>()) lowering->DoNumberToBit(node);
}
return;
}
case IrOpcode::kNumberToInt32: {
// Just change representation if necessary.
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
case IrOpcode::kNumberToString: {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
case IrOpcode::kNumberToUint8Clamped: {
Type const input_type = TypeOf(node->InputAt(0));
if (input_type.Is(type_cache_->kUint8OrMinusZeroOrNaN)) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else if (input_type.Is(Type::Unsigned32OrMinusZeroOrNaN())) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) lowering->DoUnsigned32ToUint8Clamped(node);
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) lowering->DoUnsigned32ToUint8Clamped(node);
} else if (input_type.Is(Type::Signed32OrMinusZeroOrNaN())) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) lowering->DoSigned32ToUint8Clamped(node);
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) lowering->DoSigned32ToUint8Clamped(node);
} else if (input_type.Is(type_cache_->kIntegerOrMinusZeroOrNaN)) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) lowering->DoIntegerToUint8Clamped(node);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) lowering->DoIntegerToUint8Clamped(node);
} else {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) lowering->DoNumberToUint8Clamped(node);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) lowering->DoNumberToUint8Clamped(node);
}
return;
}
case IrOpcode::kReferenceEqual: {
- VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ if (lower<T>()) {
if (COMPRESS_POINTERS_BOOL) {
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
} else {
@@ -2784,49 +2775,49 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSameValueNumbersOnly: {
- VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kSameValue: {
- if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
if (BothInputsAre(node, Type::Number())) {
- VisitBinop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
lowering->simplified()->NumberSameValue());
}
} else {
- VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
}
return;
}
case IrOpcode::kTypeOf: {
- return VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ return VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kNewConsString: {
- ProcessInput(node, 0, UseInfo::TruncatingWord32()); // length
- ProcessInput(node, 1, UseInfo::AnyTagged()); // first
- ProcessInput(node, 2, UseInfo::AnyTagged()); // second
- SetOutput(node, MachineRepresentation::kTaggedPointer);
+ ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); // length
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // first
+ ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // second
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kSpeculativeBigIntAdd: {
if (truncation.IsUsedAsWord64()) {
- VisitBinop(node,
- UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
- MachineRepresentation::kWord64);
- if (lower()) {
+ VisitBinop<T>(
+ node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
+ MachineRepresentation::kWord64);
+ if (lower<T>()) {
ChangeToPureOp(node, lowering->machine()->Int64Add());
}
} else {
- VisitBinop(node,
- UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
- MachineRepresentation::kTaggedPointer);
- if (lower()) {
+ VisitBinop<T>(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
}
}
@@ -2834,17 +2825,17 @@ class RepresentationSelector {
}
case IrOpcode::kSpeculativeBigIntSubtract: {
if (truncation.IsUsedAsWord64()) {
- VisitBinop(node,
- UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
- MachineRepresentation::kWord64);
- if (lower()) {
+ VisitBinop<T>(
+ node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
+ MachineRepresentation::kWord64);
+ if (lower<T>()) {
ChangeToPureOp(node, lowering->machine()->Int64Sub());
}
} else {
- VisitBinop(node,
- UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
- MachineRepresentation::kTaggedPointer);
- if (lower()) {
+ VisitBinop<T>(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
lowering->simplified()->BigIntSubtract());
}
@@ -2853,18 +2844,18 @@ class RepresentationSelector {
}
case IrOpcode::kSpeculativeBigIntNegate: {
if (truncation.IsUsedAsWord64()) {
- VisitUnop(node,
- UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
- MachineRepresentation::kWord64);
- if (lower()) {
+ VisitUnop<T>(node,
+ UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
+ MachineRepresentation::kWord64);
+ if (lower<T>()) {
ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0,
jsgraph_->Int64Constant(0));
}
} else {
- VisitUnop(node,
- UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
- MachineRepresentation::kTaggedPointer);
- if (lower()) {
+ VisitUnop<T>(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
ChangeToPureOp(node, lowering->simplified()->BigIntNegate());
}
}
@@ -2876,138 +2867,140 @@ class RepresentationSelector {
// actual string concatenation. We should also use the length to pass it
// to the builtin or decide in optimized code how to construct the
// resulting string (i.e. cons string or sequential string).
- ProcessInput(node, 0, UseInfo::TaggedSigned()); // length
- ProcessInput(node, 1, UseInfo::AnyTagged()); // first
- ProcessInput(node, 2, UseInfo::AnyTagged()); // second
- SetOutput(node, MachineRepresentation::kTaggedPointer);
+ ProcessInput<T>(node, 0, UseInfo::TaggedSigned()); // length
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // first
+ ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // second
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
- return VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ return VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kStringCharCodeAt: {
- return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
- MachineRepresentation::kWord32);
+ return VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::Word(),
+ MachineRepresentation::kWord32);
}
case IrOpcode::kStringCodePointAt: {
- return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
- MachineRepresentation::kTaggedSigned);
+ return VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::Word(),
+ MachineRepresentation::kTaggedSigned);
}
case IrOpcode::kStringFromSingleCharCode: {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kStringFromSingleCodePoint: {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kStringFromCodePointAt: {
- return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
- MachineRepresentation::kTaggedPointer);
+ return VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::Word(),
+ MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kStringIndexOf: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
- ProcessInput(node, 1, UseInfo::AnyTagged());
- ProcessInput(node, 2, UseInfo::TaggedSigned());
- SetOutput(node, MachineRepresentation::kTaggedSigned);
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 2, UseInfo::TaggedSigned());
+ SetOutput<T>(node, MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kStringLength: {
// TODO(bmeurer): The input representation should be TaggedPointer.
// Fix this once we have a dedicated StringConcat/JSStringAdd
// operator, which marks it's output as TaggedPointer properly.
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kWord32);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kWord32);
return;
}
case IrOpcode::kStringSubstring: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
- ProcessInput(node, 1, UseInfo::TruncatingWord32());
- ProcessInput(node, 2, UseInfo::TruncatingWord32());
- ProcessRemainingInputs(node, 3);
- SetOutput(node, MachineRepresentation::kTaggedPointer);
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 1, UseInfo::TruncatingWord32());
+ ProcessInput<T>(node, 2, UseInfo::TruncatingWord32());
+ ProcessRemainingInputs<T>(node, 3);
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl: {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kCheckBounds:
- return VisitCheckBounds(node, lowering);
+ return VisitCheckBounds<T>(node, lowering);
case IrOpcode::kPoisonIndex: {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
return;
}
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
} else {
- VisitUnop(node,
- UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(
+ node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
+ MachineRepresentation::kTaggedPointer);
}
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
case IrOpcode::kCheckIf: {
- ProcessInput(node, 0, UseInfo::Bool());
- ProcessRemainingInputs(node, 1);
- SetOutput(node, MachineRepresentation::kNone);
+ ProcessInput<T>(node, 0, UseInfo::Bool());
+ ProcessRemainingInputs<T>(node, 1);
+ SetOutput<T>(node, MachineRepresentation::kNone);
return;
}
case IrOpcode::kCheckInternalizedString: {
- VisitCheck(node, Type::InternalizedString(), lowering);
+ VisitCheck<T>(node, Type::InternalizedString(), lowering);
return;
}
case IrOpcode::kCheckNumber: {
Type const input_type = TypeOf(node->InputAt(0));
if (input_type.Is(Type::Number())) {
- VisitNoop(node, truncation);
+ VisitNoop<T>(node, truncation);
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
}
return;
}
case IrOpcode::kCheckReceiver: {
- VisitCheck(node, Type::Receiver(), lowering);
+ VisitCheck<T>(node, Type::Receiver(), lowering);
return;
}
case IrOpcode::kCheckReceiverOrNullOrUndefined: {
- VisitCheck(node, Type::ReceiverOrNullOrUndefined(), lowering);
+ VisitCheck<T>(node, Type::ReceiverOrNullOrUndefined(), lowering);
return;
}
case IrOpcode::kCheckSmi: {
const CheckParameters& params = CheckParametersOf(node->op());
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
- VisitUnop(node,
- UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros,
- params.feedback()),
- MachineRepresentation::kWord32);
+ VisitUnop<T>(node,
+ UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros,
+ params.feedback()),
+ MachineRepresentation::kWord32);
} else {
- VisitUnop(
+ VisitUnop<T>(
node,
UseInfo::CheckedSignedSmallAsTaggedSigned(params.feedback()),
MachineRepresentation::kTaggedSigned);
}
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
case IrOpcode::kCheckString: {
const CheckParameters& params = CheckParametersOf(node->op());
if (InputIs(node, Type::String())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(
+ VisitUnop<T>(
node,
UseInfo::CheckedHeapObjectAsTaggedPointer(params.feedback()),
MachineRepresentation::kTaggedPointer);
@@ -3015,40 +3008,40 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckSymbol: {
- VisitCheck(node, Type::Symbol(), lowering);
+ VisitCheck<T>(node, Type::Symbol(), lowering);
return;
}
case IrOpcode::kAllocate: {
- ProcessInput(node, 0, UseInfo::Word());
- ProcessRemainingInputs(node, 1);
- SetOutput(node, MachineRepresentation::kTaggedPointer);
+ ProcessInput<T>(node, 0, UseInfo::Word());
+ ProcessRemainingInputs<T>(node, 1);
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kLoadMessage: {
- if (truncation.IsUnused()) return VisitUnused(node);
- VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
+ VisitUnop<T>(node, UseInfo::Word(), MachineRepresentation::kTagged);
return;
}
case IrOpcode::kStoreMessage: {
- ProcessInput(node, 0, UseInfo::Word());
- ProcessInput(node, 1, UseInfo::AnyTagged());
- ProcessRemainingInputs(node, 2);
- SetOutput(node, MachineRepresentation::kNone);
+ ProcessInput<T>(node, 0, UseInfo::Word());
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged());
+ ProcessRemainingInputs<T>(node, 2);
+ SetOutput<T>(node, MachineRepresentation::kNone);
return;
}
case IrOpcode::kLoadFieldByIndex: {
- if (truncation.IsUnused()) return VisitUnused(node);
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTagged);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
+ VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTagged);
return;
}
case IrOpcode::kLoadField: {
- if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
FieldAccess access = FieldAccessOf(node->op());
MachineRepresentation const representation =
access.machine_type.representation();
- VisitUnop(node, UseInfoForBasePointer(access), representation);
+ VisitUnop<T>(node, UseInfoForBasePointer(access), representation);
return;
}
case IrOpcode::kStoreField: {
@@ -3067,12 +3060,12 @@ class RepresentationSelector {
access.base_is_tagged, field_representation, access.offset,
access.type, input_info->representation(), value_node);
- ProcessInput(node, 0, UseInfoForBasePointer(access));
- ProcessInput(node, 1,
- TruncatingUseInfoFromRepresentation(field_representation));
- ProcessRemainingInputs(node, 2);
- SetOutput(node, MachineRepresentation::kNone);
- if (lower()) {
+ ProcessInput<T>(node, 0, UseInfoForBasePointer(access));
+ ProcessInput<T>(
+ node, 1, TruncatingUseInfoFromRepresentation(field_representation));
+ ProcessRemainingInputs<T>(node, 2);
+ SetOutput<T>(node, MachineRepresentation::kNone);
+ if (lower<T>()) {
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
NodeProperties::ChangeOp(
@@ -3082,15 +3075,15 @@ class RepresentationSelector {
return;
}
case IrOpcode::kLoadElement: {
- if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
ElementAccess access = ElementAccessOf(node->op());
- VisitBinop(node, UseInfoForBasePointer(access), UseInfo::Word(),
- access.machine_type.representation());
+ VisitBinop<T>(node, UseInfoForBasePointer(access), UseInfo::Word(),
+ access.machine_type.representation());
return;
}
case IrOpcode::kLoadStackArgument: {
- if (truncation.IsUnused()) return VisitUnused(node);
- VisitBinop(node, UseInfo::Word(), MachineRepresentation::kTagged);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
+ VisitBinop<T>(node, UseInfo::Word(), MachineRepresentation::kTagged);
return;
}
case IrOpcode::kStoreElement: {
@@ -3108,14 +3101,14 @@ class RepresentationSelector {
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, element_representation, access.type,
input_info->representation(), value_node);
- ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
- ProcessInput(node, 1, UseInfo::Word()); // index
- ProcessInput(node, 2,
- TruncatingUseInfoFromRepresentation(
- element_representation)); // value
- ProcessRemainingInputs(node, 3);
- SetOutput(node, MachineRepresentation::kNone);
- if (lower()) {
+ ProcessInput<T>(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput<T>(node, 1, UseInfo::Word()); // index
+ ProcessInput<T>(node, 2,
+ TruncatingUseInfoFromRepresentation(
+ element_representation)); // value
+ ProcessRemainingInputs<T>(node, 3);
+ SetOutput<T>(node, MachineRepresentation::kNone);
+ if (lower<T>()) {
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
NodeProperties::ChangeOp(
@@ -3125,99 +3118,99 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNumberIsFloat64Hole: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
return;
}
case IrOpcode::kTransitionAndStoreElement: {
Type value_type = TypeOf(node->InputAt(2));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // array
- ProcessInput(node, 1, UseInfo::Word()); // index
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // array
+ ProcessInput<T>(node, 1, UseInfo::Word()); // index
if (value_type.Is(Type::SignedSmall())) {
- ProcessInput(node, 2, UseInfo::TruncatingWord32()); // value
- if (lower()) {
+ ProcessInput<T>(node, 2, UseInfo::TruncatingWord32()); // value
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
simplified()->StoreSignedSmallElement());
}
} else if (value_type.Is(Type::Number())) {
- ProcessInput(node, 2, UseInfo::TruncatingFloat64()); // value
- if (lower()) {
+ ProcessInput<T>(node, 2, UseInfo::TruncatingFloat64()); // value
+ if (lower<T>()) {
Handle<Map> double_map = DoubleMapParameterOf(node->op());
NodeProperties::ChangeOp(
node,
simplified()->TransitionAndStoreNumberElement(double_map));
}
} else if (value_type.Is(Type::NonNumber())) {
- ProcessInput(node, 2, UseInfo::AnyTagged()); // value
- if (lower()) {
+ ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
+ if (lower<T>()) {
Handle<Map> fast_map = FastMapParameterOf(node->op());
NodeProperties::ChangeOp(
node, simplified()->TransitionAndStoreNonNumberElement(
fast_map, value_type));
}
} else {
- ProcessInput(node, 2, UseInfo::AnyTagged()); // value
+ ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
}
- ProcessRemainingInputs(node, 3);
- SetOutput(node, MachineRepresentation::kNone);
+ ProcessRemainingInputs<T>(node, 3);
+ SetOutput<T>(node, MachineRepresentation::kNone);
return;
}
case IrOpcode::kLoadTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
- ProcessInput(node, 2, UseInfo::Word()); // external pointer
- ProcessInput(node, 3, UseInfo::Word()); // index
- ProcessRemainingInputs(node, 4);
- SetOutput(node, rep);
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // base pointer
+ ProcessInput<T>(node, 2, UseInfo::Word()); // external pointer
+ ProcessInput<T>(node, 3, UseInfo::Word()); // index
+ ProcessRemainingInputs<T>(node, 4);
+ SetOutput<T>(node, rep);
return;
}
case IrOpcode::kLoadDataViewElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // object
- ProcessInput(node, 1, UseInfo::Word()); // base
- ProcessInput(node, 2, UseInfo::Word()); // index
- ProcessInput(node, 3, UseInfo::Bool()); // little-endian
- ProcessRemainingInputs(node, 4);
- SetOutput(node, rep);
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
+ ProcessInput<T>(node, 1, UseInfo::Word()); // base
+ ProcessInput<T>(node, 2, UseInfo::Word()); // index
+ ProcessInput<T>(node, 3, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs<T>(node, 4);
+ SetOutput<T>(node, rep);
return;
}
case IrOpcode::kStoreTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
- ProcessInput(node, 2, UseInfo::Word()); // external pointer
- ProcessInput(node, 3, UseInfo::Word()); // index
- ProcessInput(node, 4,
- TruncatingUseInfoFromRepresentation(rep)); // value
- ProcessRemainingInputs(node, 5);
- SetOutput(node, MachineRepresentation::kNone);
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // base pointer
+ ProcessInput<T>(node, 2, UseInfo::Word()); // external pointer
+ ProcessInput<T>(node, 3, UseInfo::Word()); // index
+ ProcessInput<T>(node, 4,
+ TruncatingUseInfoFromRepresentation(rep)); // value
+ ProcessRemainingInputs<T>(node, 5);
+ SetOutput<T>(node, MachineRepresentation::kNone);
return;
}
case IrOpcode::kStoreDataViewElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // object
- ProcessInput(node, 1, UseInfo::Word()); // base
- ProcessInput(node, 2, UseInfo::Word()); // index
- ProcessInput(node, 3,
- TruncatingUseInfoFromRepresentation(rep)); // value
- ProcessInput(node, 4, UseInfo::Bool()); // little-endian
- ProcessRemainingInputs(node, 5);
- SetOutput(node, MachineRepresentation::kNone);
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
+ ProcessInput<T>(node, 1, UseInfo::Word()); // base
+ ProcessInput<T>(node, 2, UseInfo::Word()); // index
+ ProcessInput<T>(node, 3,
+ TruncatingUseInfoFromRepresentation(rep)); // value
+ ProcessInput<T>(node, 4, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs<T>(node, 5);
+ SetOutput<T>(node, MachineRepresentation::kNone);
return;
}
case IrOpcode::kConvertReceiver: {
Type input_type = TypeOf(node->InputAt(0));
- VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower<T>()) {
// Try to optimize the {node} based on the input type.
if (input_type.Is(Type::Receiver())) {
DeferReplacement(node, node->InputAt(0));
@@ -3233,41 +3226,43 @@ class RepresentationSelector {
}
case IrOpcode::kPlainPrimitiveToNumber: {
if (InputIs(node, Type::Boolean())) {
- VisitUnop(node, UseInfo::Bool(), MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::Bool(), MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::String())) {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node, simplified()->StringToNumber());
}
} else if (truncation.IsUsedAsWord32()) {
if (InputIs(node, Type::NumberOrOddball())) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kWord32);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
simplified()->PlainPrimitiveToWord32());
}
}
} else if (truncation.TruncatesOddballAndBigIntToNumber()) {
if (InputIs(node, Type::NumberOrOddball())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kFloat64);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
simplified()->PlainPrimitiveToFloat64());
}
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
}
return;
}
@@ -3278,92 +3273,92 @@ class RepresentationSelector {
case NumberOperationHint::kSigned32:
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- VisitUnop(node,
- CheckedUseInfoAsWord32FromHint(p.hint(), p.feedback()),
- MachineRepresentation::kWord32, Type::Signed32());
+ VisitUnop<T>(node,
+ CheckedUseInfoAsWord32FromHint(p.hint(), p.feedback()),
+ MachineRepresentation::kWord32, Type::Signed32());
break;
case NumberOperationHint::kNumber:
case NumberOperationHint::kNumberOrOddball:
- VisitUnop(node,
- CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
- MachineRepresentation::kFloat64);
+ VisitUnop<T>(
+ node, CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
+ MachineRepresentation::kFloat64);
break;
}
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
case IrOpcode::kObjectIsArrayBufferView: {
// TODO(turbofan): Introduce a Type::ArrayBufferView?
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
}
case IrOpcode::kObjectIsBigInt: {
- VisitObjectIs(node, Type::BigInt(), lowering);
+ VisitObjectIs<T>(node, Type::BigInt(), lowering);
return;
}
case IrOpcode::kObjectIsCallable: {
- VisitObjectIs(node, Type::Callable(), lowering);
+ VisitObjectIs<T>(node, Type::Callable(), lowering);
return;
}
case IrOpcode::kObjectIsConstructor: {
// TODO(turbofan): Introduce a Type::Constructor?
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
}
case IrOpcode::kObjectIsDetectableCallable: {
- VisitObjectIs(node, Type::DetectableCallable(), lowering);
+ VisitObjectIs<T>(node, Type::DetectableCallable(), lowering);
return;
}
case IrOpcode::kObjectIsFiniteNumber: {
Type const input_type = GetUpperBound(node->InputAt(0));
if (input_type.Is(type_cache_->kSafeInteger)) {
- VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::Number())) {
- VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
lowering->simplified()->NumberIsFinite());
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
}
return;
}
case IrOpcode::kNumberIsFinite: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
return;
}
case IrOpcode::kObjectIsSafeInteger: {
Type const input_type = GetUpperBound(node->InputAt(0));
if (input_type.Is(type_cache_->kSafeInteger)) {
- VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::Number())) {
- VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
NodeProperties::ChangeOp(
node, lowering->simplified()->NumberIsSafeInteger());
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
}
return;
}
@@ -3373,128 +3368,130 @@ class RepresentationSelector {
case IrOpcode::kObjectIsInteger: {
Type const input_type = GetUpperBound(node->InputAt(0));
if (input_type.Is(type_cache_->kSafeInteger)) {
- VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::Number())) {
- VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node,
lowering->simplified()->NumberIsInteger());
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
}
return;
}
case IrOpcode::kNumberIsInteger: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
return;
}
case IrOpcode::kObjectIsMinusZero: {
Type const input_type = GetUpperBound(node->InputAt(0));
if (input_type.Is(Type::MinusZero())) {
- VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::MinusZero())) {
- VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node, simplified()->NumberIsMinusZero());
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
}
return;
}
case IrOpcode::kObjectIsNaN: {
Type const input_type = GetUpperBound(node->InputAt(0));
if (input_type.Is(Type::NaN())) {
- VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::NaN())) {
- VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower<T>()) {
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower<T>()) {
NodeProperties::ChangeOp(node, simplified()->NumberIsNaN());
}
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
}
return;
}
case IrOpcode::kNumberIsNaN: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
return;
}
case IrOpcode::kObjectIsNonCallable: {
- VisitObjectIs(node, Type::NonCallable(), lowering);
+ VisitObjectIs<T>(node, Type::NonCallable(), lowering);
return;
}
case IrOpcode::kObjectIsNumber: {
- VisitObjectIs(node, Type::Number(), lowering);
+ VisitObjectIs<T>(node, Type::Number(), lowering);
return;
}
case IrOpcode::kObjectIsReceiver: {
- VisitObjectIs(node, Type::Receiver(), lowering);
+ VisitObjectIs<T>(node, Type::Receiver(), lowering);
return;
}
case IrOpcode::kObjectIsSmi: {
// TODO(turbofan): Optimize based on input representation.
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
}
case IrOpcode::kObjectIsString: {
- VisitObjectIs(node, Type::String(), lowering);
+ VisitObjectIs<T>(node, Type::String(), lowering);
return;
}
case IrOpcode::kObjectIsSymbol: {
- VisitObjectIs(node, Type::Symbol(), lowering);
+ VisitObjectIs<T>(node, Type::Symbol(), lowering);
return;
}
case IrOpcode::kObjectIsUndetectable: {
- VisitObjectIs(node, Type::Undetectable(), lowering);
+ VisitObjectIs<T>(node, Type::Undetectable(), lowering);
return;
}
case IrOpcode::kArgumentsFrame: {
- SetOutput(node, MachineType::PointerRepresentation());
+ SetOutput<T>(node, MachineType::PointerRepresentation());
return;
}
case IrOpcode::kArgumentsLength: {
- VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTaggedSigned);
+ VisitUnop<T>(node, UseInfo::Word(),
+ MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kNewDoubleElements:
case IrOpcode::kNewSmiOrObjectElements: {
- VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::Word(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kNewArgumentsElements: {
- VisitBinop(node, UseInfo::Word(), UseInfo::TaggedSigned(),
- MachineRepresentation::kTaggedPointer);
+ VisitBinop<T>(node, UseInfo::Word(), UseInfo::TaggedSigned(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kCheckFloat64Hole: {
@@ -3505,114 +3502,104 @@ class RepresentationSelector {
// If {mode} is allow-return-hole _and_ the {truncation}
// identifies NaN and undefined, we can just pass along
// the {truncation} and completely wipe the {node}.
- if (truncation.IsUnused()) return VisitUnused(node);
+ if (truncation.IsUnused()) return VisitUnused<T>(node);
if (truncation.TruncatesOddballAndBigIntToNumber()) {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
return;
}
}
- VisitUnop(node,
- UseInfo(MachineRepresentation::kFloat64, Truncation::Any()),
- MachineRepresentation::kFloat64, Type::Number());
- if (lower() && input_type.Is(Type::Number())) {
+ VisitUnop<T>(
+ node, UseInfo(MachineRepresentation::kFloat64, Truncation::Any()),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower<T>() && input_type.Is(Type::Number())) {
DeferReplacement(node, node->InputAt(0));
}
return;
}
case IrOpcode::kCheckNotTaggedHole: {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
return;
}
case IrOpcode::kCheckClosure: {
- VisitUnop(node,
- UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(
+ node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kConvertTaggedHoleToUndefined: {
if (InputIs(node, Type::NumberOrOddball()) &&
truncation.IsUsedAsWord32()) {
// Propagate the Word32 truncation.
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::NumberOrOddball()) &&
truncation.TruncatesOddballAndBigIntToNumber()) {
// Propagate the Float64 truncation.
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::NonInternal())) {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
+ if (lower<T>()) DeferReplacement(node, node->InputAt(0));
} else {
// TODO(turbofan): Add a (Tagged) truncation that identifies hole
// and undefined, i.e. for a[i] === obj cases.
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
}
return;
}
case IrOpcode::kCheckEqualsSymbol:
case IrOpcode::kCheckEqualsInternalizedString:
- return VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kNone);
+ return VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kNone);
case IrOpcode::kMapGuard:
// Eliminate MapGuard nodes here.
- return VisitUnused(node);
+ return VisitUnused<T>(node);
case IrOpcode::kCheckMaps: {
CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
- return VisitUnop(
+ return VisitUnop<T>(
node, UseInfo::CheckedHeapObjectAsTaggedPointer(p.feedback()),
MachineRepresentation::kNone);
}
case IrOpcode::kTransitionElementsKind: {
- return VisitUnop(
+ return VisitUnop<T>(
node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
MachineRepresentation::kNone);
}
case IrOpcode::kCompareMaps:
- return VisitUnop(
+ return VisitUnop<T>(
node, UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()),
MachineRepresentation::kBit);
case IrOpcode::kEnsureWritableFastElements:
- return VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ return VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
case IrOpcode::kMaybeGrowFastElements: {
- Type const index_type = TypeOf(node->InputAt(2));
- Type const length_type = TypeOf(node->InputAt(3));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // object
- ProcessInput(node, 1, UseInfo::AnyTagged()); // elements
- ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
- ProcessInput(node, 3, UseInfo::TruncatingWord32()); // length
- ProcessRemainingInputs(node, 4);
- SetOutput(node, MachineRepresentation::kTaggedPointer);
- if (lower()) {
- // If the index is known to be less than the length (or if
- // we're in dead code), we know that we don't need to grow
- // the elements, so we can just remove this operation all
- // together and replace it with the elements that we have
- // on the inputs.
- if (index_type.IsNone() || length_type.IsNone() ||
- index_type.Max() < length_type.Min()) {
- DeferReplacement(node, node->InputAt(1));
- }
- }
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // object
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // elements
+ ProcessInput<T>(node, 2, UseInfo::TruncatingWord32()); // index
+ ProcessInput<T>(node, 3, UseInfo::TruncatingWord32()); // length
+ ProcessRemainingInputs<T>(node, 4);
+ SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kDateNow:
- VisitInputs(node);
- return SetOutput(node, MachineRepresentation::kTaggedPointer);
+ VisitInputs<T>(node);
+ return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kFrameState:
- return VisitFrameState(node);
+ return VisitFrameState<T>(node);
case IrOpcode::kStateValues:
- return VisitStateValues(node);
+ return VisitStateValues<T>(node);
case IrOpcode::kObjectState:
- return VisitObjectState(node);
+ return VisitObjectState<T>(node);
case IrOpcode::kObjectId:
- return SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kTypeGuard: {
// We just get rid of the sigma here, choosing the best representation
@@ -3624,49 +3611,49 @@ class RepresentationSelector {
// Here we pretend that the input has the sigma's type for the
// conversion.
UseInfo use(representation, truncation);
- if (propagate()) {
- EnqueueInput(node, 0, use);
- } else if (lower()) {
+ if (propagate<T>()) {
+ EnqueueInput<T>(node, 0, use);
+ } else if (lower<T>()) {
ConvertInput(node, 0, use, type);
}
- ProcessRemainingInputs(node, 1);
- SetOutput(node, representation);
+ ProcessRemainingInputs<T>(node, 1);
+ SetOutput<T>(node, representation);
return;
}
case IrOpcode::kFoldConstant:
- VisitInputs(node);
- return SetOutput(node, MachineRepresentation::kTaggedPointer);
+ VisitInputs<T>(node);
+ return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kFinishRegion:
- VisitInputs(node);
+ VisitInputs<T>(node);
// Assume the output is tagged pointer.
- return SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kReturn:
- VisitReturn(node);
+ VisitReturn<T>(node);
// Assume the output is tagged.
- return SetOutput(node, MachineRepresentation::kTagged);
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kFindOrderedHashMapEntry: {
Type const key_type = TypeOf(node->InputAt(1));
if (key_type.Is(Type::Signed32OrMinusZero())) {
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineType::PointerRepresentation());
- if (lower()) {
+ VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineType::PointerRepresentation());
+ if (lower<T>()) {
NodeProperties::ChangeOp(
node,
lowering->simplified()->FindOrderedHashMapEntryForInt32Key());
}
} else {
- VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedSigned);
+ VisitBinop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedSigned);
}
return;
}
case IrOpcode::kFastApiCall: {
- VisitFastApiCall(node);
+ VisitFastApiCall<T>(node);
return;
}
@@ -3709,17 +3696,18 @@ class RepresentationSelector {
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
case IrOpcode::kJSParseInt:
- VisitInputs(node);
+ VisitInputs<T>(node);
// Assume the output is tagged.
- return SetOutput(node, MachineRepresentation::kTagged);
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kDeadValue:
- ProcessInput(node, 0, UseInfo::Any());
- return SetOutput(node, MachineRepresentation::kNone);
+ ProcessInput<T>(node, 0, UseInfo::Any());
+ return SetOutput<T>(node, MachineRepresentation::kNone);
case IrOpcode::kStaticAssert:
- return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged);
+ return VisitUnop<T>(node, UseInfo::Any(),
+ MachineRepresentation::kTagged);
case IrOpcode::kAssertType:
- return VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTagged);
+ return VisitUnop<T>(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
default:
FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
@@ -3797,6 +3785,8 @@ class RepresentationSelector {
private:
JSGraph* jsgraph_;
Zone* zone_; // Temporary zone.
+ // Map from node to its uses that might need to be revisited.
+ ZoneMap<Node*, ZoneVector<Node*>> might_need_revisit_;
size_t const count_; // number of nodes in the graph
ZoneVector<NodeInfo> info_; // node id -> usage information
#ifdef DEBUG
@@ -3805,7 +3795,6 @@ class RepresentationSelector {
#endif // DEBUG
NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
- Phase phase_; // current phase of algorithm
RepresentationChanger* changer_; // for inserting representation changes
ZoneQueue<Node*> queue_; // queue for traversing the graph
@@ -3833,6 +3822,177 @@ class RepresentationSelector {
Zone* graph_zone() { return jsgraph_->zone(); }
};
+// Template specializations
+
+// Enqueue {use_node}'s {index} input if the {use_info} contains new information
+// for that input node. Add the input to {nodes_} if this is the first time it's
+// been visited.
+template <>
+void RepresentationSelector::EnqueueInput<PROPAGATE>(Node* use_node, int index,
+ UseInfo use_info) {
+ Node* node = use_node->InputAt(index);
+ NodeInfo* info = GetInfo(node);
+#ifdef DEBUG
+ // Check monotonicity of input requirements.
+ node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
+ use_info);
+#endif // DEBUG
+ if (info->unvisited()) {
+ // First visit of this node.
+ info->set_queued();
+ nodes_.push_back(node);
+ queue_.push(node);
+ TRACE(" initial #%i: ", node->id());
+ info->AddUse(use_info);
+ PrintTruncation(info->truncation());
+ return;
+ }
+ TRACE(" queue #%i?: ", node->id());
+ PrintTruncation(info->truncation());
+ if (info->AddUse(use_info)) {
+ // New usage information for the node is available.
+ if (!info->queued()) {
+ DCHECK(info->visited());
+ queue_.push(node);
+ info->set_queued();
+ TRACE(" added: ");
+ } else {
+ TRACE(" inqueue: ");
+ }
+ PrintTruncation(info->truncation());
+ }
+}
+
+template <>
+void RepresentationSelector::SetOutput<PROPAGATE>(
+ Node* node, MachineRepresentation representation, Type restriction_type) {
+ NodeInfo* const info = GetInfo(node);
+ info->set_restriction_type(restriction_type);
+}
+
+template <>
+void RepresentationSelector::SetOutput<RETYPE>(
+ Node* node, MachineRepresentation representation, Type restriction_type) {
+ NodeInfo* const info = GetInfo(node);
+ DCHECK(info->restriction_type().Is(restriction_type));
+ DCHECK(restriction_type.Is(info->restriction_type()));
+ info->set_output(representation);
+}
+
+template <>
+void RepresentationSelector::SetOutput<LOWER>(
+ Node* node, MachineRepresentation representation, Type restriction_type) {
+ NodeInfo* const info = GetInfo(node);
+ DCHECK_EQ(info->representation(), representation);
+ DCHECK(info->restriction_type().Is(restriction_type));
+ DCHECK(restriction_type.Is(info->restriction_type()));
+ USE(info);
+}
+
+template <>
+void RepresentationSelector::ProcessInput<PROPAGATE>(Node* node, int index,
+ UseInfo use) {
+ DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
+ !node->op()->HasProperty(Operator::kNoDeopt) &&
+ node->op()->EffectInputCount() > 0);
+ EnqueueInput<PROPAGATE>(node, index, use);
+}
+
+template <>
+void RepresentationSelector::ProcessInput<RETYPE>(Node* node, int index,
+ UseInfo use) {
+ DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
+ !node->op()->HasProperty(Operator::kNoDeopt) &&
+ node->op()->EffectInputCount() > 0);
+}
+
+template <>
+void RepresentationSelector::ProcessInput<LOWER>(Node* node, int index,
+ UseInfo use) {
+ DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
+ !node->op()->HasProperty(Operator::kNoDeopt) &&
+ node->op()->EffectInputCount() > 0);
+ ConvertInput(node, index, use);
+}
+
+template <>
+void RepresentationSelector::ProcessRemainingInputs<PROPAGATE>(Node* node,
+ int index) {
+ DCHECK_GE(index, NodeProperties::PastValueIndex(node));
+ DCHECK_GE(index, NodeProperties::PastContextIndex(node));
+ for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
+ i < NodeProperties::PastEffectIndex(node); ++i) {
+ EnqueueInput<PROPAGATE>(node, i); // Effect inputs: just visit
+ }
+ for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
+ i < NodeProperties::PastControlIndex(node); ++i) {
+ EnqueueInput<PROPAGATE>(node, i); // Control inputs: just visit
+ }
+}
+
+// The default, most general visitation case. For {node}, process all value,
+// context, frame state, effect, and control inputs, assuming that value
+// inputs should have {kRepTagged} representation and can observe all output
+// values {kTypeAny}.
+template <>
+void RepresentationSelector::VisitInputs<PROPAGATE>(Node* node) {
+ int tagged_count = node->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(node->op()) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ // Visit value, context and frame state inputs as tagged.
+ for (int i = 0; i < tagged_count; i++) {
+ ProcessInput<PROPAGATE>(node, i, UseInfo::AnyTagged());
+ }
+ // Only enqueue other inputs (effects, control).
+ for (int i = tagged_count; i < node->InputCount(); i++) {
+ EnqueueInput<PROPAGATE>(node, i);
+ }
+}
+
+template <>
+void RepresentationSelector::VisitInputs<LOWER>(Node* node) {
+ int tagged_count = node->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(node->op()) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ // Visit value, context and frame state inputs as tagged.
+ for (int i = 0; i < tagged_count; i++) {
+ ProcessInput<LOWER>(node, i, UseInfo::AnyTagged());
+ }
+}
+
+template <>
+void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
+ // If the node is effectful and it produces an impossible value, then we
+ // insert Unreachable node after it.
+ if (node->op()->ValueOutputCount() > 0 &&
+ node->op()->EffectOutputCount() > 0 &&
+ node->opcode() != IrOpcode::kUnreachable && TypeOf(node).IsNone()) {
+ Node* control = (node->op()->ControlOutputCount() == 0)
+ ? NodeProperties::GetControlInput(node, 0)
+ : NodeProperties::FindSuccessfulControlProjection(node);
+
+ Node* unreachable =
+ graph()->NewNode(common()->Unreachable(), node, control);
+
+ // Insert unreachable node and replace all the effect uses of the {node}
+ // with the new unreachable node.
+ for (Edge edge : node->use_edges()) {
+ if (!NodeProperties::IsEffectEdge(edge)) continue;
+ // Make sure to not overwrite the unreachable node's input. That would
+ // create a cycle.
+ if (edge.from() == unreachable) continue;
+ // Avoid messing up the exceptional path.
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+ DCHECK_EQ(NodeProperties::GetControlInput(edge.from()), node);
+ continue;
+ }
+
+ edge.UpdateTo(unreachable);
+ }
+ }
+}
+
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
Zone* zone,
SourcePositionTable* source_positions,
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 2b1e0ab99e..1be2bed001 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -839,7 +839,6 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckedUint64ToTaggedSigned, 1, 1)
#define CHECKED_BOUNDS_OP_LIST(V) \
- V(CheckBounds) \
V(CheckedUint32Bounds) \
V(CheckedUint64Bounds)
@@ -889,19 +888,26 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK)
#undef CHECKED_WITH_FEEDBACK
-#define CHECKED_BOUNDS(Name) \
- struct Name##Operator final : public Operator1<CheckBoundsParameters> { \
- Name##Operator(FeedbackSource feedback, CheckBoundsParameters::Mode mode) \
- : Operator1<CheckBoundsParameters>( \
- IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
- #Name, 2, 1, 1, 1, 1, 0, \
- CheckBoundsParameters(feedback, mode)) {} \
- }; \
- Name##Operator k##Name##Deopting = { \
- FeedbackSource(), CheckBoundsParameters::kDeoptOnOutOfBounds}; \
- Name##Operator k##Name##Aborting = { \
- FeedbackSource(), CheckBoundsParameters::kAbortOnOutOfBounds};
+#define CHECKED_BOUNDS(Name) \
+ struct Name##Operator final : public Operator1<CheckBoundsParameters> { \
+ Name##Operator(FeedbackSource feedback, CheckBoundsFlags flags) \
+ : Operator1<CheckBoundsParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
+ #Name, 2, 1, 1, 1, 1, 0, \
+ CheckBoundsParameters(feedback, flags)) {} \
+ }; \
+ Name##Operator k##Name = {FeedbackSource(), CheckBoundsFlags()}; \
+ Name##Operator k##Name##Aborting = {FeedbackSource(), \
+ CheckBoundsFlag::kAbortOnOutOfBounds};
CHECKED_BOUNDS_OP_LIST(CHECKED_BOUNDS)
+ CHECKED_BOUNDS(CheckBounds)
+ // For IrOpcode::kCheckBounds, we allow additional flags:
+ CheckBoundsOperator kCheckBoundsConverting = {
+ FeedbackSource(), CheckBoundsFlag::kConvertStringAndMinusZero};
+ CheckBoundsOperator kCheckBoundsAbortingAndConverting = {
+ FeedbackSource(),
+ CheckBoundsFlags(CheckBoundsFlag::kAbortOnOutOfBounds) |
+ CheckBoundsFlags(CheckBoundsFlag::kConvertStringAndMinusZero)};
#undef CHECKED_BOUNDS
template <DeoptimizeReason kDeoptimizeReason>
@@ -1206,23 +1212,45 @@ GET_FROM_CACHE(LoadFieldByIndex)
CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
#undef GET_FROM_CACHE_WITH_FEEDBACK
-#define GET_FROM_CACHE_WITH_FEEDBACK(Name) \
- const Operator* SimplifiedOperatorBuilder::Name( \
- const FeedbackSource& feedback, CheckBoundsParameters::Mode mode) { \
- if (!feedback.IsValid()) { \
- switch (mode) { \
- case CheckBoundsParameters::kDeoptOnOutOfBounds: \
- return &cache_.k##Name##Deopting; \
- case CheckBoundsParameters::kAbortOnOutOfBounds: \
- return &cache_.k##Name##Aborting; \
- } \
- } \
- return new (zone()) \
- SimplifiedOperatorGlobalCache::Name##Operator(feedback, mode); \
+#define GET_FROM_CACHE_WITH_FEEDBACK(Name) \
+ const Operator* SimplifiedOperatorBuilder::Name( \
+ const FeedbackSource& feedback, CheckBoundsFlags flags) { \
+ DCHECK(!(flags & CheckBoundsFlag::kConvertStringAndMinusZero)); \
+ if (!feedback.IsValid()) { \
+ if (flags & CheckBoundsFlag::kAbortOnOutOfBounds) { \
+ return &cache_.k##Name##Aborting; \
+ } else { \
+ return &cache_.k##Name; \
+ } \
+ } \
+ return new (zone()) \
+ SimplifiedOperatorGlobalCache::Name##Operator(feedback, flags); \
}
CHECKED_BOUNDS_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
#undef GET_FROM_CACHE_WITH_FEEDBACK
+// For IrOpcode::kCheckBounds, we allow additional flags:
+const Operator* SimplifiedOperatorBuilder::CheckBounds(
+ const FeedbackSource& feedback, CheckBoundsFlags flags) {
+ if (!feedback.IsValid()) {
+ if (flags & CheckBoundsFlag::kAbortOnOutOfBounds) {
+ if (flags & CheckBoundsFlag::kConvertStringAndMinusZero) {
+ return &cache_.kCheckBoundsAbortingAndConverting;
+ } else {
+ return &cache_.kCheckBoundsAborting;
+ }
+ } else {
+ if (flags & CheckBoundsFlag::kConvertStringAndMinusZero) {
+ return &cache_.kCheckBoundsConverting;
+ } else {
+ return &cache_.kCheckBounds;
+ }
+ }
+ }
+ return new (zone())
+ SimplifiedOperatorGlobalCache::CheckBoundsOperator(feedback, flags);
+}
+
bool IsCheckedWithFeedback(const Operator* op) {
#define CASE(Name, ...) case IrOpcode::k##Name:
switch (op->opcode()) {
@@ -1628,23 +1656,15 @@ CheckParameters const& CheckParametersOf(Operator const* op) {
bool operator==(CheckBoundsParameters const& lhs,
CheckBoundsParameters const& rhs) {
return lhs.check_parameters() == rhs.check_parameters() &&
- lhs.mode() == rhs.mode();
+ lhs.flags() == rhs.flags();
}
size_t hash_value(CheckBoundsParameters const& p) {
- return base::hash_combine(hash_value(p.check_parameters()), p.mode());
+ return base::hash_combine(hash_value(p.check_parameters()), p.flags());
}
std::ostream& operator<<(std::ostream& os, CheckBoundsParameters const& p) {
- os << p.check_parameters() << ", ";
- switch (p.mode()) {
- case CheckBoundsParameters::kDeoptOnOutOfBounds:
- os << "deopt";
- break;
- case CheckBoundsParameters::kAbortOnOutOfBounds:
- os << "abort";
- break;
- }
+ os << p.check_parameters() << ", " << p.flags();
return os;
}
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 251bb43678..df2516646b 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -220,19 +220,24 @@ std::ostream& operator<<(std::ostream&, CheckParameters const&);
CheckParameters const& CheckParametersOf(Operator const*) V8_WARN_UNUSED_RESULT;
+enum class CheckBoundsFlag : uint8_t {
+ kConvertStringAndMinusZero = 1 << 0, // instead of deopting on such inputs
+ kAbortOnOutOfBounds = 1 << 1, // instead of deopting if input is OOB
+};
+using CheckBoundsFlags = base::Flags<CheckBoundsFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(CheckBoundsFlags)
+
class CheckBoundsParameters final {
public:
- enum Mode { kAbortOnOutOfBounds, kDeoptOnOutOfBounds };
+ CheckBoundsParameters(const FeedbackSource& feedback, CheckBoundsFlags flags)
+ : check_parameters_(feedback), flags_(flags) {}
- CheckBoundsParameters(const FeedbackSource& feedback, Mode mode)
- : check_parameters_(feedback), mode_(mode) {}
-
- Mode mode() const { return mode_; }
+ CheckBoundsFlags flags() const { return flags_; }
const CheckParameters& check_parameters() const { return check_parameters_; }
private:
CheckParameters check_parameters_;
- Mode mode_;
+ CheckBoundsFlags flags_;
};
bool operator==(CheckBoundsParameters const&, CheckBoundsParameters const&);
@@ -377,10 +382,9 @@ size_t hash_value(const CheckMinusZeroParameters& params);
bool operator==(CheckMinusZeroParameters const&,
CheckMinusZeroParameters const&);
-// Flags for map checks.
enum class CheckMapsFlag : uint8_t {
kNone = 0u,
- kTryMigrateInstance = 1u << 0, // Try instance migration.
+ kTryMigrateInstance = 1u << 0,
};
using CheckMapsFlags = base::Flags<CheckMapsFlag>;
@@ -784,12 +788,11 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* MapGuard(ZoneHandleSet<Map> maps);
const Operator* CheckBounds(const FeedbackSource& feedback,
- CheckBoundsParameters::Mode mode =
- CheckBoundsParameters::kDeoptOnOutOfBounds);
+ CheckBoundsFlags flags = {});
const Operator* CheckedUint32Bounds(const FeedbackSource& feedback,
- CheckBoundsParameters::Mode mode);
+ CheckBoundsFlags flags);
const Operator* CheckedUint64Bounds(const FeedbackSource& feedback,
- CheckBoundsParameters::Mode mode);
+ CheckBoundsFlags flags);
const Operator* CheckClosure(const Handle<FeedbackCell>& feedback_cell);
const Operator* CheckEqualsInternalizedString();
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 50e2a640f6..c8c422f66b 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -38,8 +38,12 @@ Reduction TypedOptimization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node);
+ case IrOpcode::kMaybeGrowFastElements:
+ return ReduceMaybeGrowFastElements(node);
case IrOpcode::kCheckHeapObject:
return ReduceCheckHeapObject(node);
+ case IrOpcode::kCheckBounds:
+ return ReduceCheckBounds(node);
case IrOpcode::kCheckNotTaggedHole:
return ReduceCheckNotTaggedHole(node);
case IrOpcode::kCheckMaps:
@@ -159,6 +163,48 @@ Reduction TypedOptimization::ReduceCheckHeapObject(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceMaybeGrowFastElements(Node* node) {
+ Node* const elements = NodeProperties::GetValueInput(node, 1);
+ Node* const index = NodeProperties::GetValueInput(node, 2);
+ Node* const length = NodeProperties::GetValueInput(node, 3);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+
+ Type const index_type = NodeProperties::GetType(index);
+ Type const length_type = NodeProperties::GetType(length);
+ CHECK(index_type.Is(Type::Unsigned31()));
+ CHECK(length_type.Is(Type::Unsigned31()));
+
+ if (!index_type.IsNone() && !length_type.IsNone() &&
+ index_type.Max() < length_type.Min()) {
+ Node* check_bounds = graph()->NewNode(
+ simplified()->CheckBounds(FeedbackSource{},
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, effect, control);
+ ReplaceWithValue(node, elements);
+ return Replace(check_bounds);
+ }
+
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceCheckBounds(Node* node) {
+ CheckBoundsParameters const& p = CheckBoundsParametersOf(node->op());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type const input_type = NodeProperties::GetType(input);
+ if (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero &&
+ !input_type.Maybe(Type::String()) &&
+ !input_type.Maybe(Type::MinusZero())) {
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->CheckBounds(
+ p.check_parameters().feedback(),
+ p.flags().without(CheckBoundsFlag::kConvertStringAndMinusZero)));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceCheckNotTaggedHole(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type const input_type = NodeProperties::GetType(input);
@@ -285,7 +331,7 @@ Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
// NumberToUint32(NumberDivide(lhs, rhs))
//
// and just smash the type [0...lhs.Max] on the {node},
- // as the truncated result must be loewr than {lhs}'s maximum
+ // as the truncated result must be lower than {lhs}'s maximum
// value (note that {rhs} cannot be less than 1 due to the
// plain-number type constraint on the {node}).
NodeProperties::ChangeOp(node, simplified()->NumberToUint32());
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 58efff918f..336c29540d 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -37,6 +37,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
private:
Reduction ReduceConvertReceiver(Node* node);
+ Reduction ReduceMaybeGrowFastElements(Node* node);
+ Reduction ReduceCheckBounds(Node* node);
Reduction ReduceCheckHeapObject(Node* node);
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckNumber(Node* node);
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 08e6e8023d..47280becbd 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -224,6 +224,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
+ case JS_AGGREGATE_ERROR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_REG_EXP_TYPE:
@@ -240,16 +241,17 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_FINALIZATION_REGISTRY_TYPE:
- case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
+ case WASM_ARRAY_TYPE:
case WASM_EXCEPTION_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_INSTANCE_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
+ case WASM_STRUCT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
@@ -351,6 +353,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case TUPLE2_TYPE:
case BREAK_POINT_TYPE:
case BREAK_POINT_INFO_TYPE:
+ case WASM_VALUE_TYPE:
case CACHED_TEMPLATE_OBJECT_TYPE:
case ENUM_CACHE_TYPE:
case WASM_CAPI_FUNCTION_DATA_TYPE:
@@ -368,8 +371,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
-#define MAKE_TORQUE_CLASS_TYPE(V) case V:
- TORQUE_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+#define MAKE_TORQUE_CLASS_TYPE(INSTANCE_TYPE, Name, name) case INSTANCE_TYPE:
+ TORQUE_INTERNAL_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index fb1fa37d9d..49775a3856 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -112,7 +112,7 @@ namespace compiler {
V(Null, 1u << 7) \
V(Undefined, 1u << 8) \
V(Boolean, 1u << 9) \
- V(Unsigned30, 1u << 10) \
+ V(Unsigned30, 1u << 10) \
V(MinusZero, 1u << 11) \
V(NaN, 1u << 12) \
V(Symbol, 1u << 13) \
@@ -129,6 +129,9 @@ namespace compiler {
V(ExternalPointer, 1u << 25) \
V(Array, 1u << 26) \
V(BigInt, 1u << 27) \
+ /* TODO(v8:10391): Remove this type once all ExternalPointer usages are */ \
+ /* sandbox-ready. */ \
+ V(SandboxedExternalPointer, 1u << 28) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -192,7 +195,8 @@ namespace compiler {
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | \
kUndefined | kHole | kReceiver) \
- V(Internal, kHole | kExternalPointer | kOtherInternal) \
+ V(Internal, kHole | kExternalPointer | \
+ kSandboxedExternalPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
V(NonBigInt, kNonBigIntPrimitive | kReceiver) \
V(NonNumber, kBigInt | kUnique | kString | kInternal) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 1be87c9463..de560da00c 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -1623,6 +1623,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
case IrOpcode::kWord32Sar:
+ case IrOpcode::kWord32Rol:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
case IrOpcode::kWord32Clz:
@@ -1637,6 +1638,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
+ case IrOpcode::kWord64Rol:
case IrOpcode::kWord64Ror:
case IrOpcode::kWord64Clz:
case IrOpcode::kWord64Popcnt:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 9373a2b4b9..ac7a681336 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -11,7 +11,6 @@
#include "src/base/platform/platform.h"
#include "src/base/small-vector.h"
#include "src/base/v8-fallthrough.h"
-#include "src/builtins/builtins.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/assembler.h"
#include "src/codegen/code-factory.h"
@@ -40,6 +39,7 @@
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/heap-number.h"
+#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/vector.h"
@@ -50,6 +50,7 @@
#include "src/wasm/memory-tracing.h"
#include "src/wasm/object-access.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
@@ -78,6 +79,17 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
+// We would like to use gasm_->Call() to implement this macro,
+// but this doesn't work currently when we try to call it from functions
+// which set IfSuccess/IfFailure control paths (e.g. within Throw()).
+// TODO(manoskouk): Maybe clean this up at some point?
+#define CALL_BUILTIN(name, ...) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->common()->Call(GetBuiltinCallDescriptor<name##Descriptor>( \
+ this, StubCallMode::kCallBuiltinPointer)), \
+ GetBuiltinPointerTarget(Builtins::k##name), ##__VA_ARGS__, effect(), \
+ control()))
+
#define LOAD_INSTANCE_FIELD(name, type) \
gasm_->Load(assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name))
@@ -144,6 +156,19 @@ bool ContainsInt64(const wasm::FunctionSig* sig) {
}
return false;
}
+
+template <typename BuiltinDescriptor>
+CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
+ StubCallMode stub_mode) {
+ BuiltinDescriptor interface_descriptor;
+ return Linkage::GetStubCallDescriptor(
+ builder->mcgraph()->zone(), // zone
+ interface_descriptor, // descriptor
+ interface_descriptor.GetStackParameterCount(), // stack parameter count
+ CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ stub_mode); // stub call mode
+}
} // namespace
class WasmGraphAssembler : public GraphAssembler {
@@ -256,17 +281,29 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
}
Node* WasmGraphBuilder::RefNull() {
- Node* isolate_root = BuildLoadIsolateRoot();
return LOAD_FULL_POINTER(
- isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue));
+ BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(RootIndex::kNullValue));
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
- Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(function_index))};
- Node* result =
- BuildCallToRuntime(Runtime::kWasmRefFunc, args, arraysize(args));
- return result;
+ auto call_descriptor = GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmRefFunc, RelocInfo::WASM_STUB_CALL);
+
+ return SetEffectControl(
+ graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
+ Uint32Constant(function_index), effect(), control()));
+}
+
+Node* WasmGraphBuilder::RefAsNonNull(Node* arg,
+ wasm::WasmCodePosition position) {
+ TrapIfTrue(wasm::kTrapIllegalCast, gasm_->WordEqual(arg, RefNull()),
+ position);
+ return arg;
}
Node* WasmGraphBuilder::NoContextConstant() {
@@ -414,7 +451,11 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
right = MaskShiftCount32(right);
break;
case wasm::kExprI32Rol:
- right = MaskShiftCount32(right);
+ if (m->Word32Rol().IsSupported()) {
+ op = m->Word32Rol().op();
+ right = MaskShiftCount32(right);
+ break;
+ }
return BuildI32Rol(left, right);
case wasm::kExprI32Eq:
op = m->Word32Equal();
@@ -525,6 +566,14 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
right = MaskShiftCount64(right);
break;
case wasm::kExprI64Rol:
+ if (m->Word64Rol().IsSupported()) {
+ op = m->Word64Rol().op();
+ right = MaskShiftCount64(right);
+ break;
+ } else if (m->Word32Rol().IsSupported()) {
+ op = m->Word64Rol().placeholder();
+ break;
+ }
return BuildI64Rol(left, right);
case wasm::kExprF32CopySign:
return BuildF32CopySign(left, right);
@@ -611,6 +660,8 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
break;
case wasm::kExprF64Mod:
return BuildF64Mod(left, right);
+ case wasm::kExprRefEq:
+ return gasm_->TaggedEqual(left, right);
case wasm::kExprI32AsmjsDivS:
return BuildI32AsmjsDivS(left, right);
case wasm::kExprI32AsmjsDivU:
@@ -1073,8 +1124,9 @@ Node* WasmGraphBuilder::Return(Vector<Node*> vals) {
return ret;
}
-Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
- TrapIfFalse(wasm::TrapReason::kTrapUnreachable, Int32Constant(0), position);
+Node* WasmGraphBuilder::Trap(wasm::TrapReason reason,
+ wasm::WasmCodePosition position) {
+ TrapIfFalse(reason, Int32Constant(0), position);
Return(Vector<Node*>{});
return nullptr;
}
@@ -1111,8 +1163,9 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
return node;
}
-static bool ReverseBytesSupported(MachineOperatorBuilder* m,
- size_t size_in_bytes) {
+namespace {
+
+bool ReverseBytesSupported(MachineOperatorBuilder* m, size_t size_in_bytes) {
switch (size_in_bytes) {
case 4:
case 16:
@@ -1125,6 +1178,8 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
return false;
}
+} // namespace
+
Node* WasmGraphBuilder::BuildChangeEndiannessStore(
Node* node, MachineRepresentation mem_rep, wasm::ValueType wasmtype) {
Node* result;
@@ -2017,8 +2072,12 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
arraysize(create_parameters));
SetSourcePosition(except_obj, position);
- Node* values_array =
- BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
+ Node* values_array = CALL_BUILTIN(
+ WasmGetOwnProperty, except_obj,
+ LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(
+ RootIndex::kwasm_exception_values_symbol)),
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
MachineOperatorBuilder* m = mcgraph()->machine();
@@ -2061,6 +2120,9 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
case wasm::ValueType::kFuncRef:
case wasm::ValueType::kNullRef:
case wasm::ValueType::kExnRef:
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kEqRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
@@ -2154,14 +2216,23 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj,
wasm::WasmCodePosition position) {
TrapIfTrue(wasm::kTrapBrOnExnNullRef, gasm_->WordEqual(RefNull(), except_obj),
position);
- return BuildCallToRuntime(Runtime::kWasmExceptionGetTag, &except_obj, 1);
+ return CALL_BUILTIN(
+ WasmGetOwnProperty, except_obj,
+ LOAD_FULL_POINTER(
+ BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(RootIndex::kwasm_exception_tag_symbol)),
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
}
Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
const wasm::WasmException* exception,
Vector<Node*> values) {
- Node* values_array =
- BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
+ Node* values_array = CALL_BUILTIN(
+ WasmGetOwnProperty, except_obj,
+ LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(
+ RootIndex::kwasm_exception_values_symbol)),
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
DCHECK_EQ(sig->parameter_count(), values.size());
@@ -2202,6 +2273,9 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
case wasm::ValueType::kFuncRef:
case wasm::ValueType::kNullRef:
case wasm::ValueType::kExnRef:
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kEqRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
@@ -2504,6 +2578,11 @@ Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
ZeroCheck64(wasm::kTrapRemByZero, right, position));
}
+Node* WasmGraphBuilder::GetBuiltinPointerTarget(int builtin_id) {
+ static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
+ return graph()->NewNode(mcgraph()->common()->NumberConstant(builtin_id));
+}
+
Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ExternalReference ref,
MachineType result_type,
@@ -2779,7 +2858,7 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
&ift_instances);
- const wasm::FunctionSig* sig = env_->module->signatures[sig_index];
+ const wasm::FunctionSig* sig = env_->module->signature(sig_index);
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
@@ -2882,6 +2961,15 @@ Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
kReturnCall);
}
+Node* WasmGraphBuilder::BrOnNull(Node* ref_object, Node** null_node,
+ Node** non_null_node) {
+ BranchExpectFalse(gasm_->WordEqual(ref_object, RefNull()), null_node,
+ non_null_node);
+ // Return value is not used, but we need it for compatibility
+ // with graph-builder-interface.
+ return nullptr;
+}
+
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
// Implement Rol by Ror since TurboFan does not have Rol opcode.
// TODO(weiliang): support Word32Rol opcode in TurboFan.
@@ -2910,19 +2998,6 @@ Node* WasmGraphBuilder::Invert(Node* node) {
return Unop(wasm::kExprI32Eqz, node);
}
-bool CanCover(Node* value, IrOpcode::Value opcode) {
- if (value->opcode() != opcode) return false;
- bool first = true;
- for (Edge const edge : value->use_edges()) {
- if (NodeProperties::IsControlEdge(edge)) continue;
- if (NodeProperties::IsEffectEdge(edge)) continue;
- DCHECK(NodeProperties::IsValueEdge(edge));
- if (!first) return false;
- first = false;
- }
- return true;
-}
-
Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) {
if (mcgraph()->machine()->Is64()) {
value =
@@ -3342,73 +3417,10 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
graph()->NewNode(op, base, offset, val, effect(), control()));
}
-void WasmGraphBuilder::BoundsCheckTable(uint32_t table_index, Node* entry_index,
- wasm::WasmCodePosition position,
- wasm::TrapReason trap_reason,
- Node** base_node) {
- Node* tables = LOAD_INSTANCE_FIELD(Tables, MachineType::TaggedPointer());
- Node* table = LOAD_FIXED_ARRAY_SLOT_ANY(tables, table_index);
-
- int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
- WasmTableObject::kCurrentLengthOffset + 1;
- Node* length_smi = gasm_->Load(
- assert_size(length_field_size, MachineType::TaggedSigned()), table,
- wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset));
- Node* length = BuildChangeSmiToInt32(length_smi);
-
- // Bounds check against the table size.
- Node* in_bounds = graph()->NewNode(mcgraph()->machine()->Uint32LessThan(),
- entry_index, length);
- TrapIfFalse(trap_reason, in_bounds, position);
-
- if (base_node) {
- int storage_field_size = WasmTableObject::kEntriesOffsetEnd -
- WasmTableObject::kEntriesOffset + 1;
- *base_node = gasm_->Load(
- assert_size(storage_field_size, MachineType::TaggedPointer()), table,
- wasm::ObjectAccess::ToTagged(WasmTableObject::kEntriesOffset));
- }
-}
-
-void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index,
- Node* entry_index,
- wasm::WasmCodePosition position,
- Node** base_node,
- Node** offset_node) {
- BoundsCheckTable(table_index, entry_index, position,
- wasm::kTrapTableOutOfBounds, base_node);
- // From the index, calculate the actual offset in the FixeArray. This
- // is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
- // wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
- Node* index_times_tagged_size = graph()->NewNode(
- mcgraph()->machine()->IntMul(), Uint32ToUintptr(entry_index),
- mcgraph()->Int32Constant(kTaggedSize));
-
- *offset_node = graph()->NewNode(
- mcgraph()->machine()->IntAdd(), index_times_tagged_size,
- mcgraph()->IntPtrConstant(
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
-}
-
Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
- if (env_->module->tables[table_index].type == wasm::kWasmAnyRef ||
- env_->module->tables[table_index].type == wasm::kWasmNullRef ||
- env_->module->tables[table_index].type == wasm::kWasmExnRef) {
- Node* base = nullptr;
- Node* offset = nullptr;
- GetTableBaseAndOffset(table_index, index, position, &base, &offset);
- return gasm_->Load(MachineType::AnyTagged(), base, offset);
- }
- // We access funcref tables through runtime calls.
- WasmTableGetDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallWasmRuntimeStub); // stub call mode
+ auto call_descriptor = GetBuiltinCallDescriptor<WasmTableGetDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched at relocation.
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
@@ -3416,40 +3428,21 @@ Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
return SetEffectControl(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), call_target,
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), index,
- effect(), control()));
+ IntPtrConstant(table_index), index, effect(), control()));
}
Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
- if (env_->module->tables[table_index].type == wasm::kWasmAnyRef ||
- env_->module->tables[table_index].type == wasm::kWasmNullRef ||
- env_->module->tables[table_index].type == wasm::kWasmExnRef) {
- Node* base = nullptr;
- Node* offset = nullptr;
- GetTableBaseAndOffset(table_index, index, position, &base, &offset);
- return STORE_RAW_NODE_OFFSET(
- base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
- } else {
- // We access funcref tables through runtime calls.
- WasmTableSetDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallWasmRuntimeStub); // stub call mode
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmTableSet, RelocInfo::WASM_STUB_CALL);
+ auto call_descriptor = GetBuiltinCallDescriptor<WasmTableSetDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmTableSet, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- index, val, effect(), control()));
- }
+ return SetEffectControl(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), call_target,
+ IntPtrConstant(table_index), index, val, effect(), control()));
}
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
@@ -4023,19 +4016,6 @@ Signature<MachineRepresentation>* CreateMachineSignature(
return builder.Build();
}
-template <typename BuiltinDescriptor>
-CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
- StubCallMode stub_mode) {
- BuiltinDescriptor interface_descriptor;
- return Linkage::GetStubCallDescriptor(
- builder->mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode); // stub call mode
-}
-
} // namespace
void WasmGraphBuilder::AddInt64LoweringReplacement(
@@ -4157,6 +4137,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF64x2Qfms:
return graph()->NewNode(mcgraph()->machine()->F64x2Qfms(), inputs[0],
inputs[1], inputs[2]);
+ case wasm::kExprF64x2Pmin:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Pmin(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Pmax:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Pmax(), inputs[0],
+ inputs[1]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4222,6 +4208,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Qfms:
return graph()->NewNode(mcgraph()->machine()->F32x4Qfms(), inputs[0],
inputs[1], inputs[2]);
+ case wasm::kExprF32x4Pmin:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Pmin(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Pmax:
+ return graph()->NewNode(mcgraph()->machine()->F32x4Pmax(), inputs[0],
+ inputs[1]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
@@ -5048,6 +5040,194 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
return BuildCallToRuntime(Runtime::kWasmTableFill, args, arraysize(args));
}
+namespace {
+
+MachineType FieldType(const wasm::StructType* type, uint32_t field_index) {
+ return MachineType::TypeForRepresentation(
+ type->field(field_index).machine_representation());
+}
+
+Node* FieldOffset(MachineGraph* graph, const wasm::StructType* type,
+ uint32_t field_index) {
+ int offset = WasmStruct::kHeaderSize + type->field_offset(field_index) -
+ kHeapObjectTag;
+ return graph->IntPtrConstant(offset);
+}
+
+// Set a field of a struct, without checking if the struct is null.
+// Helper method for StructNew and StructSet.
+Node* StoreStructFieldUnchecked(MachineGraph* graph, WasmGraphAssembler* gasm,
+ Node* struct_object,
+ const wasm::StructType* type,
+ uint32_t field_index, Node* value) {
+ WriteBarrierKind write_barrier = type->field(field_index).IsReferenceType()
+ ? kPointerWriteBarrier
+ : kNoWriteBarrier;
+ StoreRepresentation rep(type->field(field_index).machine_representation(),
+ write_barrier);
+ Node* offset = FieldOffset(graph, type, field_index);
+ return gasm->Store(rep, struct_object, offset, value);
+}
+
+Node* ArrayElementOffset(GraphAssembler* gasm, Node* index,
+ wasm::ValueType element_type) {
+ return gasm->Int32Add(
+ gasm->Int32Constant(WasmArray::kHeaderSize - kHeapObjectTag),
+ gasm->Int32Mul(index,
+ gasm->Int32Constant(element_type.element_size_bytes())));
+}
+
+Node* ArrayLength(GraphAssembler* gasm, Node* array) {
+ return gasm->Load(
+ MachineType::Uint32(), array,
+ gasm->Int32Constant(WasmArray::kLengthOffset - kHeapObjectTag));
+}
+
+} // namespace
+
+Node* WasmGraphBuilder::StructNew(uint32_t struct_index,
+ const wasm::StructType* type,
+ Vector<Node*> fields) {
+ // This logic is duplicated from module-instantiate.cc.
+ // TODO(jkummerow): Find a nicer solution.
+ int map_index = 0;
+ const std::vector<uint8_t>& type_kinds = env_->module->type_kinds;
+ for (uint32_t i = 0; i < struct_index; i++) {
+ if (type_kinds[i] == wasm::kWasmStructTypeCode ||
+ type_kinds[i] == wasm::kWasmArrayTypeCode) {
+ map_index++;
+ }
+ }
+ Node* s = CALL_BUILTIN(
+ WasmAllocateStruct,
+ graph()->NewNode(mcgraph()->common()->NumberConstant(map_index)),
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ StoreStructFieldUnchecked(mcgraph(), gasm_.get(), s, type, i, fields[i]);
+ }
+ return s;
+}
+
+Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
+ const wasm::ArrayType* type, Node* length,
+ Node* initial_value) {
+ // This logic is duplicated from module-instantiate.cc.
+ // TODO(jkummerow): Find a nicer solution.
+ int map_index = 0;
+ const std::vector<uint8_t>& type_kinds = env_->module->type_kinds;
+ for (uint32_t i = 0; i < array_index; i++) {
+ if (type_kinds[i] == wasm::kWasmStructTypeCode ||
+ type_kinds[i] == wasm::kWasmArrayTypeCode) {
+ map_index++;
+ }
+ }
+
+ wasm::ValueType element_type = type->element_type();
+ Node* a = CALL_BUILTIN(
+ WasmAllocateArray,
+ graph()->NewNode(mcgraph()->common()->NumberConstant(map_index)),
+ BuildChangeUint31ToSmi(length),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(
+ element_type.element_size_bytes())),
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ WriteBarrierKind write_barrier =
+ element_type.IsReferenceType() ? kPointerWriteBarrier : kNoWriteBarrier;
+ StoreRepresentation rep(element_type.machine_representation(), write_barrier);
+
+ auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
+ auto done = gasm_->MakeLabel();
+ Node* start_offset =
+ gasm_->Int32Constant(WasmArray::kHeaderSize - kHeapObjectTag);
+ Node* element_size = gasm_->Int32Constant(element_type.element_size_bytes());
+ Node* end_offset =
+ gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
+ // "Goto" requires the graph's end to have been set up.
+ // TODO(jkummerow): Figure out if there's a more elegant solution.
+ Graph* g = mcgraph()->graph();
+ if (!g->end()) {
+ g->SetEnd(g->NewNode(mcgraph()->common()->End(0)));
+ }
+ gasm_->Goto(&loop, start_offset);
+ gasm_->Bind(&loop);
+ {
+ Node* offset = loop.PhiAt(0);
+ Node* check = gasm_->Uint32LessThan(offset, end_offset);
+ gasm_->GotoIfNot(check, &done);
+ gasm_->Store(rep, a, offset, initial_value);
+ offset = gasm_->Int32Add(offset, element_size);
+ gasm_->Goto(&loop, offset);
+ }
+ gasm_->Bind(&done);
+ return a;
+}
+
+Node* WasmGraphBuilder::StructGet(Node* struct_object,
+ const wasm::StructType* struct_type,
+ uint32_t field_index, CheckForNull null_check,
+ wasm::WasmCodePosition position) {
+ if (null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(struct_object, RefNull()), position);
+ }
+ MachineType machine_type = FieldType(struct_type, field_index);
+ Node* offset = FieldOffset(mcgraph(), struct_type, field_index);
+ return gasm_->Load(machine_type, struct_object, offset);
+}
+
+Node* WasmGraphBuilder::StructSet(Node* struct_object,
+ const wasm::StructType* struct_type,
+ uint32_t field_index, Node* field_value,
+ CheckForNull null_check,
+ wasm::WasmCodePosition position) {
+ if (null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(struct_object, RefNull()), position);
+ }
+ return StoreStructFieldUnchecked(mcgraph(), gasm_.get(), struct_object,
+ struct_type, field_index, field_value);
+}
+
+void WasmGraphBuilder::BoundsCheck(Node* array, Node* index,
+ wasm::WasmCodePosition position) {
+ Node* length = ArrayLength(gasm_.get(), array);
+ TrapIfFalse(wasm::kTrapArrayOutOfBounds, gasm_->Uint32LessThan(index, length),
+ position);
+}
+
+Node* WasmGraphBuilder::ArrayGet(Node* array_object,
+ const wasm::ArrayType* type, Node* index,
+ wasm::WasmCodePosition position) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(array_object, RefNull()), position);
+ BoundsCheck(array_object, index, position);
+ MachineType machine_type = MachineType::TypeForRepresentation(
+ type->element_type().machine_representation());
+ Node* offset = ArrayElementOffset(gasm_.get(), index, type->element_type());
+ return gasm_->Load(machine_type, array_object, offset);
+}
+
+Node* WasmGraphBuilder::ArraySet(Node* array_object,
+ const wasm::ArrayType* type, Node* index,
+ Node* value, wasm::WasmCodePosition position) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(array_object, RefNull()), position);
+ BoundsCheck(array_object, index, position);
+ WriteBarrierKind write_barrier = type->element_type().IsReferenceType()
+ ? kPointerWriteBarrier
+ : kNoWriteBarrier;
+ StoreRepresentation rep(type->element_type().machine_representation(),
+ write_barrier);
+ Node* offset = ArrayElementOffset(gasm_.get(), index, type->element_type());
+ return gasm_->Store(rep, array_object, offset, value);
+}
+
+Node* WasmGraphBuilder::ArrayLen(Node* array_object,
+ wasm::WasmCodePosition position) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(array_object, RefNull()), position);
+ return ArrayLength(gasm_.get(), array_object);
+}
+
class WasmDecorator final : public GraphDecorator {
public:
explicit WasmDecorator(NodeOriginTable* origins, wasm::Decoder* decoder)
@@ -5113,11 +5293,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return bigint_to_i64_descriptor_;
}
- Node* GetBuiltinPointerTarget(Builtins::Name builtin_id) {
- static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
- return graph()->NewNode(mcgraph()->common()->NumberConstant(builtin_id));
- }
-
Node* GetTargetForBuiltinCall(wasm::WasmCode::RuntimeStubId wasm_stub,
Builtins::Name builtin_id) {
return (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
@@ -5126,52 +5301,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
: GetBuiltinPointerTarget(builtin_id);
}
- Node* BuildAllocateHeapNumberWithValue(Node* value) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- CommonOperatorBuilder* common = mcgraph()->common();
- Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kAllocateHeapNumber,
- Builtins::kAllocateHeapNumber);
- if (!allocate_heap_number_operator_.is_set()) {
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow, stub_mode_);
- allocate_heap_number_operator_.set(common->Call(call_descriptor));
- }
- Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, effect(), control());
- SetEffect(
- graph()->NewNode(machine->Store(StoreRepresentation(
- MachineRepresentation::kFloat64, kNoWriteBarrier)),
- heap_number, BuildHeapNumberValueIndexConstant(),
- value, heap_number, control()));
- return heap_number;
- }
-
- Node* BuildChangeSmiToFloat32(Node* value) {
- return graph()->NewNode(mcgraph()->machine()->RoundInt32ToFloat32(),
- BuildChangeSmiToInt32(value));
- }
-
- Node* BuildChangeSmiToFloat64(Node* value) {
- return graph()->NewNode(mcgraph()->machine()->ChangeInt32ToFloat64(),
- BuildChangeSmiToInt32(value));
- }
-
- Node* BuildTestHeapObject(Node* value) {
- return graph()->NewNode(mcgraph()->machine()->WordAnd(), value,
- mcgraph()->IntPtrConstant(kHeapObjectTag));
- }
-
- Node* BuildLoadHeapNumberValue(Node* value) {
- return SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Float64()), value,
- BuildHeapNumberValueIndexConstant(), effect(), control()));
- }
-
- Node* BuildHeapNumberValueIndexConstant() {
- return mcgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
- }
-
Node* BuildLoadUndefinedValueFromInstance() {
if (undefined_value_node_ == nullptr) {
Node* isolate_root = graph()->NewNode(
@@ -5188,93 +5317,122 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return undefined_value_node_.get();
}
- Node* BuildChangeInt32ToTagged(Node* value) {
+ Node* BuildChangeInt32ToNumber(Node* value) {
+ // We expect most integers at runtime to be Smis, so it is important for
+ // wrapper performance that Smi conversion be inlined.
if (SmiValuesAre32Bits()) {
return BuildChangeInt32ToSmi(value);
}
DCHECK(SmiValuesAre31Bits());
- auto allocate_heap_number = gasm_->MakeDeferredLabel();
+ auto builtin = gasm_->MakeDeferredLabel();
auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
- // The smi value is {2 * value}. If that overflows, we need to allocate a
- // heap number.
+ // Double value to test if value can be a Smi, and if so, to convert it.
Node* add = gasm_->Int32AddWithOverflow(value, value);
Node* ovf = gasm_->Projection(1, add);
- gasm_->GotoIf(ovf, &allocate_heap_number);
+ gasm_->GotoIf(ovf, &builtin);
// If it didn't overflow, the result is {2 * value} as pointer-sized value.
Node* smi_tagged = BuildChangeInt32ToIntPtr(gasm_->Projection(0, add));
gasm_->Goto(&done, smi_tagged);
- gasm_->Bind(&allocate_heap_number);
- Node* heap_number =
- BuildAllocateHeapNumberWithValue(gasm_->ChangeInt32ToFloat64(value));
- gasm_->Goto(&done, heap_number);
-
+ // Otherwise, call builtin, to convert to a HeapNumber.
+ gasm_->Bind(&builtin);
+ CommonOperatorBuilder* common = mcgraph()->common();
+ Node* target =
+ GetTargetForBuiltinCall(wasm::WasmCode::kWasmInt32ToHeapNumber,
+ Builtins::kWasmInt32ToHeapNumber);
+ if (!int32_to_heapnumber_operator_.is_set()) {
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), WasmInt32ToHeapNumberDescriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ int32_to_heapnumber_operator_.set(common->Call(call_descriptor));
+ }
+ Node* call =
+ gasm_->Call(int32_to_heapnumber_operator_.get(), target, value);
+ gasm_->Goto(&done, call);
gasm_->Bind(&done);
return done.PhiAt(0);
}
- Node* BuildChangeFloat64ToTagged(Node* value) {
- // Check several conditions:
- // i32?
- // ā”œā”€ true: zero?
- // ā”‚ ā”œā”€ true: positive?
- // ā”‚ ā”‚ ā”œā”€ true: potentially Smi
- // ā”‚ ā”‚ ā””ā”€ false: box (-0)
- // ā”‚ ā””ā”€ false: potentially Smi
- // ā””ā”€ false: box (non-int)
- // For potential Smi values, depending on whether Smis are 31 or 32 bit, we
- // still need to check whether the value fits in a Smi.
-
- auto box_value = gasm_->MakeDeferredLabel();
- auto potentially_smi = gasm_->MakeLabel();
- auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
-
- Node* value32 = gasm_->RoundFloat64ToInt32(value);
- Node* check_i32 =
- gasm_->Float64Equal(value, gasm_->ChangeInt32ToFloat64(value32));
- gasm_->GotoIfNot(check_i32, &box_value);
+ Node* BuildChangeTaggedToInt32(Node* value, Node* context) {
+ // We expect most integers at runtime to be Smis, so it is important for
+ // wrapper performance that Smi conversion be inlined.
+ auto builtin = gasm_->MakeDeferredLabel();
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
- // We only need to check for -0 if the {value} can potentially contain -0.
- Node* check_zero = gasm_->Word32Equal(value32, gasm_->Int32Constant(0));
- gasm_->GotoIfNot(check_zero, &potentially_smi);
+ // Test if value is a Smi.
+ Node* is_smi =
+ gasm_->Word32Equal(gasm_->Word32And(BuildTruncateIntPtrToInt32(value),
+ gasm_->Int32Constant(kSmiTagMask)),
+ gasm_->Int32Constant(0));
+ gasm_->GotoIfNot(is_smi, &builtin);
- // In case of 0, we need to check the MSB (sign bit).
- Node* check_positive = gasm_->Word32Equal(
- gasm_->Float64ExtractHighWord32(value), gasm_->Int32Constant(0));
- gasm_->Branch(check_positive, &potentially_smi, &box_value);
+ // If Smi, convert to int32.
+ Node* smi = BuildChangeSmiToInt32(value);
+ gasm_->Goto(&done, smi);
- gasm_->Bind(&potentially_smi);
-
- // On 64-bit machines we can just wrap the 32-bit integer in a smi, for
- // 32-bit machines we need to deal with potential overflow and fallback to
- // boxing.
- if (SmiValuesAre32Bits()) {
- gasm_->Goto(&done, BuildChangeInt32ToSmi(value32));
- } else {
- DCHECK(SmiValuesAre31Bits());
- // The smi value is {2 * value}. If that overflows, we need to box.
- Node* smi_tag = gasm_->Int32AddWithOverflow(value32, value32);
-
- Node* check_ovf = gasm_->Projection(1, smi_tag);
- gasm_->GotoIf(check_ovf, &box_value);
+ // Otherwise, call builtin which changes non-Smi to Int32.
+ gasm_->Bind(&builtin);
+ CommonOperatorBuilder* common = mcgraph()->common();
+ Node* target =
+ GetTargetForBuiltinCall(wasm::WasmCode::kWasmTaggedNonSmiToInt32,
+ Builtins::kWasmTaggedNonSmiToInt32);
+ if (!tagged_non_smi_to_int32_operator_.is_set()) {
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), WasmTaggedNonSmiToInt32Descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ tagged_non_smi_to_int32_operator_.set(common->Call(call_descriptor));
+ }
+ Node* call = gasm_->Call(tagged_non_smi_to_int32_operator_.get(), target,
+ value, context);
+ SetSourcePosition(call, 1);
+ gasm_->Goto(&done, call);
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
- Node* smi_value = gasm_->Projection(0, smi_tag);
- // With pointer compression, only the lower 32 bits are used.
- if (!COMPRESS_POINTERS_BOOL) {
- smi_value = BuildChangeInt32ToIntPtr(smi_value);
- }
- gasm_->Goto(&done, smi_value);
+ Node* BuildChangeFloat32ToNumber(Node* value) {
+ CommonOperatorBuilder* common = mcgraph()->common();
+ Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kWasmFloat32ToNumber,
+ Builtins::kWasmFloat32ToNumber);
+ if (!float32_to_number_operator_.is_set()) {
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), WasmFloat32ToNumberDescriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ float32_to_number_operator_.set(common->Call(call_descriptor));
}
+ return gasm_->Call(float32_to_number_operator_.get(), target, value);
+ }
- // Allocate the box for the {value}.
- gasm_->Bind(&box_value);
- gasm_->Goto(&done, BuildAllocateHeapNumberWithValue(value));
+ Node* BuildChangeFloat64ToNumber(Node* value) {
+ CommonOperatorBuilder* common = mcgraph()->common();
+ Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kWasmFloat64ToNumber,
+ Builtins::kWasmFloat64ToNumber);
+ if (!float64_to_number_operator_.is_set()) {
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), WasmFloat64ToNumberDescriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ float64_to_number_operator_.set(common->Call(call_descriptor));
+ }
+ return gasm_->Call(float64_to_number_operator_.get(), target, value);
+ }
- gasm_->Bind(&done);
- return done.PhiAt(0);
+ Node* BuildChangeTaggedToFloat64(Node* value, Node* context) {
+ CommonOperatorBuilder* common = mcgraph()->common();
+ Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kWasmTaggedToFloat64,
+ Builtins::kWasmTaggedToFloat64);
+ if (!tagged_to_float64_operator_.is_set()) {
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), WasmTaggedToFloat64Descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ tagged_to_float64_operator_.set(common->Call(call_descriptor));
+ }
+ Node* call =
+ gasm_->Call(tagged_to_float64_operator_.get(), target, value, context);
+ SetSourcePosition(call, 1);
+ return call;
}
int AddArgumentNodes(Vector<Node*> args, int pos, int param_count,
@@ -5288,33 +5446,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return pos;
}
- // Converts a number (Smi or HeapNumber) to float64.
- Node* BuildChangeNumberToFloat64(Node* value) {
- // If the input is a HeapNumber, we load the value from it.
- Node* check_heap_object = BuildTestHeapObject(value);
- Diamond is_heapnumber(graph(), mcgraph()->common(), check_heap_object,
- BranchHint::kFalse);
- is_heapnumber.Chain(control());
-
- SetControl(is_heapnumber.if_true);
- Node* effect_orig = effect();
- Node* v_heapnumber = BuildLoadHeapNumberValue(value);
- Node* effect_heapnumber = effect();
-
- SetControl(is_heapnumber.merge);
- // If the input is Smi, just convert to float64.
- Node* v_smi = BuildChangeSmiToFloat64(value);
-
- SetEffect(is_heapnumber.EffectPhi(effect_heapnumber, effect_orig));
-
- return is_heapnumber.Phi(MachineRepresentation::kFloat64, v_heapnumber,
- v_smi);
- }
-
Node* ToJS(Node* node, wasm::ValueType type) {
switch (type.kind()) {
case wasm::ValueType::kI32:
- return BuildChangeInt32ToTagged(node);
+ return BuildChangeInt32ToNumber(node);
case wasm::ValueType::kS128:
UNREACHABLE();
case wasm::ValueType::kI64: {
@@ -5322,16 +5457,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildChangeInt64ToBigInt(node);
}
case wasm::ValueType::kF32:
- node = graph()->NewNode(mcgraph()->machine()->ChangeFloat32ToFloat64(),
- node);
- return BuildChangeFloat64ToTagged(node);
+ return BuildChangeFloat32ToNumber(node);
case wasm::ValueType::kF64:
- return BuildChangeFloat64ToTagged(node);
+ return BuildChangeFloat64ToNumber(node);
case wasm::ValueType::kAnyRef:
case wasm::ValueType::kFuncRef:
case wasm::ValueType::kNullRef:
case wasm::ValueType::kExnRef:
return node;
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kEqRef:
+ // TODO(7748): Implement properly. For now, we just expose the raw
+ // object for testing.
+ return node;
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -5380,41 +5519,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(call, target, input, context, effect(), control()));
}
- Node* BuildTestSmi(Node* value) {
- return gasm_->Word32Equal(
- gasm_->Word32And(BuildTruncateIntPtrToInt32(value),
- gasm_->Int32Constant(kSmiTagMask)),
- gasm_->Int32Constant(0));
- }
-
- Node* BuildFloat64ToWasm(Node* value, wasm::ValueType type) {
- switch (type.kind()) {
- case wasm::ValueType::kI32:
- return graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToWord32(),
- value);
- case wasm::ValueType::kF32:
- return graph()->NewNode(
- mcgraph()->machine()->TruncateFloat64ToFloat32(), value);
- case wasm::ValueType::kF64:
- return value;
- default:
- UNREACHABLE();
- }
- }
-
- Node* BuildSmiToWasm(Node* smi, wasm::ValueType type) {
- switch (type.kind()) {
- case wasm::ValueType::kI32:
- return BuildChangeSmiToInt32(smi);
- case wasm::ValueType::kF32:
- return BuildChangeSmiToFloat32(smi);
- case wasm::ValueType::kF64:
- return BuildChangeSmiToFloat64(smi);
- default:
- UNREACHABLE();
- }
- }
-
Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
switch (type.kind()) {
case wasm::ValueType::kAnyRef:
@@ -5460,65 +5564,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return input;
}
+ case wasm::ValueType::kF32:
+ return graph()->NewNode(
+ mcgraph()->machine()->TruncateFloat64ToFloat32(),
+ BuildChangeTaggedToFloat64(input, js_context));
+
+ case wasm::ValueType::kF64:
+ return BuildChangeTaggedToFloat64(input, js_context);
+
+ case wasm::ValueType::kI32:
+ return BuildChangeTaggedToInt32(input, js_context);
+
case wasm::ValueType::kI64:
// i64 values can only come from BigInt.
DCHECK(enabled_features_.has_bigint());
return BuildChangeBigIntToInt64(input, js_context);
default:
+ UNREACHABLE();
break;
}
-
- gasm_->InitializeEffectControl(effect(), control());
-
- // Handle Smis first. The rest goes through the ToNumber stub.
- // TODO(clemensb): Also handle HeapNumbers specially.
- STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
-
- // Build a graph implementing this diagram:
- // input smi?
- // ā”œā”€ true: ā”€ā”€<fast path>ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¬ā”€ smi-to-wasm ā”€ā”€ā”€ā”€ā”€ā”€ā”¬ā”€ result
- // ā””ā”€ false: ToNumber -> smi? ā”‚ ā”‚
- // ā”œā”€ true: ā”€ā”˜ ā”‚
- // ā””ā”€ false: load -> f64-to-wasm ā”€ā”˜
-
- auto smi_to_wasm = gasm_->MakeLabel(MachineRepresentation::kTaggedSigned);
- auto call_to_number =
- gasm_->MakeDeferredLabel(MachineRepresentation::kTaggedPointer);
- auto done = gasm_->MakeLabel(type.machine_representation());
-
- // Branch to smi conversion or the ToNumber call.
- gasm_->Branch(BuildTestSmi(input), &smi_to_wasm, &call_to_number, input);
-
- // Build the ToNumber path.
- gasm_->Bind(&call_to_number);
- auto to_number_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), TypeConversionDescriptor{}, 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
- Node* to_number_target =
- GetTargetForBuiltinCall(wasm::WasmCode::kToNumber, Builtins::kToNumber);
- Node* number =
- gasm_->Call(to_number_descriptor, to_number_target, input, js_context);
- SetSourcePosition(number, 1);
-
- // If the ToNumber result is Smi, convert to wasm.
- gasm_->GotoIf(BuildTestSmi(number), &smi_to_wasm, number);
-
- // Otherwise the ToNumber result is a HeapNumber. Load its value and convert
- // to wasm.
- Node* heap_number_value = gasm_->LoadHeapNumberValue(number);
- Node* converted_heap_number_value =
- BuildFloat64ToWasm(heap_number_value, type);
- gasm_->Goto(&done, converted_heap_number_value);
-
- // Implement the smi to wasm conversion.
- gasm_->Bind(&smi_to_wasm);
- Node* smi_to_wasm_result = BuildSmiToWasm(smi_to_wasm.PhiAt(0), type);
- gasm_->Goto(&done, smi_to_wasm_result);
-
- // Done. Update effect and control and return the final Phi.
- gasm_->Bind(&done);
- return done.PhiAt(0);
}
void BuildModifyThreadInWasmFlag(bool new_value) {
@@ -5595,18 +5660,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context) {
- Node* iterable_to_fixed_array =
- GetBuiltinPointerTarget(Builtins::kIterableToFixedArrayForWasm);
- IterableToFixedArrayForWasmDescriptor interface_descriptor;
Node* length = BuildChangeUint31ToSmi(
Uint32Constant(static_cast<uint32_t>(sig->return_count())));
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kNoProperties, StubCallMode::kCallBuiltinPointer);
- return SetEffect(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), iterable_to_fixed_array,
- iterable, length, context, effect(), control()));
+ return CALL_BUILTIN(IterableToFixedArrayForWasm, iterable, length, context);
+ }
+
+ // Extract the FixedArray implementing
+ // the backing storage of a JavaScript array.
+ Node* BuildLoadArrayBackingStorage(Node* js_array) {
+ return gasm_->Load(MachineType::AnyTagged(), js_array,
+ JSObject::kElementsOffset - kHeapObjectTag);
+ }
+
+ // Generate a call to the AllocateJSArray builtin.
+ Node* BuildCallAllocateJSArray(Node* array_length, Node* context) {
+ // Since we don't check that args will fit in an array,
+ // we make sure this is true based on statically known limits.
+ STATIC_ASSERT(wasm::kV8MaxWasmFunctionMultiReturns <=
+ JSArray::kInitialMaxFastElementArray);
+ return SetControl(CALL_BUILTIN(WasmAllocateJSArray, array_length, context));
}
void BuildJSToWasmWrapper(bool is_import) {
@@ -5692,15 +5764,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int32_t return_count = static_cast<int32_t>(sig_->return_count());
Node* size =
graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
- // TODO(thibaudm): Replace runtime calls with TurboFan code.
- Node* fixed_array =
- BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1);
+
+ jsval = BuildCallAllocateJSArray(size, js_context);
+
+ Node* fixed_array = BuildLoadArrayBackingStorage(jsval);
+
for (int i = 0; i < return_count; ++i) {
Node* value = ToJS(rets[i], sig_->GetReturn(i));
STORE_FIXED_ARRAY_SLOT_ANY(fixed_array, i, value);
}
- jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray,
- js_context, &fixed_array, 1);
}
Return(jsval);
if (ContainsInt64(sig_)) LowerInt64(kCalledFromJS);
@@ -6014,78 +6086,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
}
- void BuildWasmInterpreterEntry(int func_index) {
- int param_count = static_cast<int>(sig_->parameter_count());
-
- // Build the start and the parameter nodes.
- SetEffectControl(Start(param_count + 3));
-
- // Create the instance_node from the passed parameter.
- instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
-
- // Compute size for the argument buffer.
- int args_size_bytes = 0;
- for (wasm::ValueType type : sig_->parameters()) {
- args_size_bytes += type.element_size_bytes();
- }
-
- // The return value is also passed via this buffer:
- int return_size_bytes = 0;
- for (wasm::ValueType type : sig_->returns()) {
- return_size_bytes += type.element_size_bytes();
- }
-
- // Get a stack slot for the arguments.
- Node* arg_buffer =
- args_size_bytes == 0 && return_size_bytes == 0
- ? mcgraph()->IntPtrConstant(0)
- : graph()->NewNode(mcgraph()->machine()->StackSlot(
- std::max(args_size_bytes, return_size_bytes), 8));
-
- // Now store all our arguments to the buffer.
- int offset = 0;
-
- for (int i = 0; i < param_count; ++i) {
- wasm::ValueType type = sig_->GetParam(i);
- // Start from the parameter with index 1 to drop the instance_node.
- SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
- Int32Constant(offset), Param(i + 1), effect(),
- control()));
- offset += type.element_size_bytes();
- }
- DCHECK_EQ(args_size_bytes, offset);
-
- // We are passing the raw arg_buffer here. To the GC and other parts, it
- // looks like a Smi (lowest bit not set). In the runtime function however,
- // don't call Smi::value on it, but just cast it to a byte pointer.
- Node* parameters[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(func_index)),
- arg_buffer};
- BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters,
- arraysize(parameters));
-
- // Read back the return value.
- DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
- size_t return_count = sig_->return_count();
- if (return_count == 0) {
- Return(Int32Constant(0));
- } else {
- base::SmallVector<Node*, 8> returns(return_count);
- offset = 0;
- for (size_t i = 0; i < return_count; ++i) {
- wasm::ValueType type = sig_->GetReturn(i);
- Node* val = SetEffect(
- graph()->NewNode(GetSafeLoadOperator(offset, type), arg_buffer,
- Int32Constant(offset), effect(), control()));
- returns[i] = val;
- offset += type.element_size_bytes();
- }
- Return(VectorOf(returns));
- }
-
- if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
- }
-
void BuildJSToJSWrapper(Isolate* isolate) {
int wasm_count = static_cast<int>(sig_->parameter_count());
@@ -6161,16 +6161,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int32_t return_count = static_cast<int32_t>(sig_->return_count());
Node* size =
graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
- Node* result_fixed_array =
- BuildCallToRuntime(Runtime::kWasmNewMultiReturnFixedArray, &size, 1);
+ jsval = BuildCallAllocateJSArray(size, context);
+ Node* result_fixed_array = BuildLoadArrayBackingStorage(jsval);
for (unsigned i = 0; i < sig_->return_count(); ++i) {
const auto& type = sig_->GetReturn(i);
Node* elem = LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i);
Node* cast = ToJS(FromJS(elem, context, type), type);
STORE_FIXED_ARRAY_SLOT_ANY(result_fixed_array, i, cast);
}
- jsval = BuildCallToRuntimeWithContext(Runtime::kWasmNewMultiReturnJSArray,
- context, &result_fixed_array, 1);
}
Return(jsval);
}
@@ -6262,7 +6260,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
private:
StubCallMode stub_mode_;
SetOncePointer<Node> undefined_value_node_;
- SetOncePointer<const Operator> allocate_heap_number_operator_;
+ SetOncePointer<const Operator> int32_to_heapnumber_operator_;
+ SetOncePointer<const Operator> tagged_non_smi_to_int32_operator_;
+ SetOncePointer<const Operator> float32_to_number_operator_;
+ SetOncePointer<const Operator> float64_to_number_operator_;
+ SetOncePointer<const Operator> tagged_to_float64_operator_;
wasm::WasmFeatures enabled_features_;
CallDescriptor* bigint_to_i64_descriptor_ = nullptr;
CallDescriptor* i64_to_bigint_descriptor_ = nullptr;
@@ -6425,6 +6427,8 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
+namespace {
+
wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
const char** name_ptr) {
#define CASE(name) \
@@ -6525,6 +6529,8 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
return result;
}
+} // namespace
+
wasm::WasmCompilationResult CompileWasmImportCallWrapper(
wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
WasmImportCallKind kind, const wasm::FunctionSig* sig,
@@ -6626,50 +6632,10 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
- wasm::ExecutionTier::kNone);
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
return native_module->PublishCode(std::move(wasm_code));
}
-wasm::WasmCompilationResult CompileWasmInterpreterEntry(
- wasm::WasmEngine* wasm_engine, const wasm::WasmFeatures& enabled_features,
- uint32_t func_index, const wasm::FunctionSig* sig) {
- //----------------------------------------------------------------------------
- // Create the Graph
- //----------------------------------------------------------------------------
- Zone zone(wasm_engine->allocator(), ZONE_NAME);
- Graph* graph = new (&zone) Graph(&zone);
- CommonOperatorBuilder* common = new (&zone) CommonOperatorBuilder(&zone);
- MachineOperatorBuilder* machine = new (&zone) MachineOperatorBuilder(
- &zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements());
- MachineGraph* mcgraph = new (&zone) MachineGraph(graph, common, machine);
-
- WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, nullptr,
- StubCallMode::kCallWasmRuntimeStub,
- enabled_features);
- builder.BuildWasmInterpreterEntry(func_index);
-
- // Schedule and compile to machine code.
- CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
- if (machine->Is32()) {
- incoming = GetI32WasmCallDescriptor(&zone, incoming);
- }
-
- EmbeddedVector<char, 32> func_name;
- func_name.Truncate(
- SNPrintF(func_name, "wasm-interpreter-entry#%d", func_index));
-
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- wasm_engine, incoming, mcgraph, Code::WASM_INTERPRETER_ENTRY,
- wasm::WasmCode::kInterpreterEntry, func_name.begin(),
- WasmStubAssemblerOptions());
- result.result_tier = wasm::ExecutionTier::kInterpreter;
- result.kind = wasm::WasmCompilationResult::kInterpreterEntry;
-
- return result;
-}
-
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
@@ -6886,34 +6852,16 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
&info, wasm_engine, mcgraph, call_descriptor, source_positions,
node_origins, func_body, env->module, func_index);
- // TODO(bradnelson): Improve histogram handling of size_t.
- counters->wasm_compile_function_peak_memory_bytes()->AddSample(
- static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
+ if (counters) {
+ counters->wasm_compile_function_peak_memory_bytes()->AddSample(
+ static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
+ }
auto result = info.ReleaseWasmCompilationResult();
CHECK_NOT_NULL(result); // Compilation expected to succeed.
DCHECK_EQ(wasm::ExecutionTier::kTurbofan, result->result_tier);
return std::move(*result);
}
-wasm::WasmCompilationResult ExecuteInterpreterEntryCompilation(
- wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
- const wasm::FunctionBody& func_body, int func_index, Counters* counters,
- wasm::WasmFeatures* detected) {
- Zone zone(wasm_engine->allocator(), ZONE_NAME);
- const wasm::WasmModule* module = env ? env->module : nullptr;
- wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::EmptyInterface> decoder(
- &zone, module, env->enabled_features, detected, func_body);
- decoder.Decode();
- if (decoder.failed()) return wasm::WasmCompilationResult{};
-
- wasm::WasmCompilationResult result = CompileWasmInterpreterEntry(
- wasm_engine, env->enabled_features, func_index, func_body.sig);
- DCHECK(result.succeeded());
- DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
-
- return result;
-}
-
namespace {
// Helper for allocating either an GP or FP reg, or the next stack slot.
class LinkageLocationAllocator {
@@ -7153,6 +7101,7 @@ AssemblerOptions WasmStubAssemblerOptions() {
}
#undef FATAL_UNSUPPORTED_OPCODE
+#undef CALL_BUILTIN
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_INSTANCE_FIELD
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 176fdb63c4..6d662e674d 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -54,10 +54,6 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&,
int func_index, Counters*, wasm::WasmFeatures* detected);
-wasm::WasmCompilationResult ExecuteInterpreterEntryCompilation(
- wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&,
- int func_index, Counters*, wasm::WasmFeatures* detected);
-
// Calls to Wasm imports are handled in several different ways, depending on the
// type of the target function/callable and whether the signature matches the
// argument arity.
@@ -126,12 +122,6 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
const wasm::FunctionSig* sig, bool is_import,
const wasm::WasmFeatures& enabled_features);
-// Compiles a stub that redirects a call to a wasm function to the wasm
-// interpreter. It's ABI compatible with the compiled wasm function.
-V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmInterpreterEntry(
- wasm::WasmEngine*, const wasm::WasmFeatures& enabled_features,
- uint32_t func_index, const wasm::FunctionSig*);
-
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
@@ -172,6 +162,10 @@ class WasmGraphBuilder {
kRetpoline = true,
kNoRetpoline = false
};
+ enum CheckForNull : bool { // --
+ kWithNullCheck = true,
+ kWithoutNullCheck = false
+ };
V8_EXPORT_PRIVATE WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
@@ -197,6 +191,7 @@ class WasmGraphBuilder {
Node* EffectPhi(unsigned count, Node** effects_and_control);
Node* RefNull();
Node* RefFunc(uint32_t function_index);
+ Node* RefAsNonNull(Node* arg, wasm::WasmCodePosition position);
Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
@@ -255,7 +250,7 @@ class WasmGraphBuilder {
Node* arr[] = {fst, more...};
return Return(ArrayVector(arr));
}
- Node* Unreachable(wasm::WasmCodePosition position);
+ Node* Trap(wasm::TrapReason reason, wasm::WasmCodePosition position);
Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position);
@@ -267,6 +262,9 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
Vector<Node*> args, wasm::WasmCodePosition position);
+ // Return value is not expected to be used,
+ // but we need it for compatibility with graph-builder-interface.
+ Node* BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
Node* Invert(Node* node);
@@ -321,14 +319,6 @@ class WasmGraphBuilder {
void GetBaseAndOffsetForImportedMutableAnyRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset);
- void BoundsCheckTable(uint32_t table_index, Node* index,
- wasm::WasmCodePosition position,
- wasm::TrapReason trap_reason, Node** base_node);
-
- void GetTableBaseAndOffset(uint32_t table_index, Node* index,
- wasm::WasmCodePosition position, Node** base_node,
- Node** offset_node);
-
// Utilities to manipulate sets of instance cache nodes.
void InitInstanceCache(WasmInstanceCacheNodes* instance_cache);
void PrepareInstanceCacheForLoop(WasmInstanceCacheNodes* instance_cache,
@@ -385,6 +375,23 @@ class WasmGraphBuilder {
Node* TableSize(uint32_t table_index);
Node* TableFill(uint32_t table_index, Node* start, Node* value, Node* count);
+ Node* StructNew(uint32_t struct_index, const wasm::StructType* type,
+ Vector<Node*> fields);
+ Node* StructGet(Node* struct_object, const wasm::StructType* struct_type,
+ uint32_t field_index, CheckForNull null_check,
+ wasm::WasmCodePosition position);
+ Node* StructSet(Node* struct_object, const wasm::StructType* struct_type,
+ uint32_t field_index, Node* value, CheckForNull null_check,
+ wasm::WasmCodePosition position);
+ Node* ArrayNew(uint32_t array_index, const wasm::ArrayType* type,
+ Node* length, Node* initial_value);
+ void BoundsCheck(Node* array, Node* index, wasm::WasmCodePosition position);
+ Node* ArrayGet(Node* array_object, const wasm::ArrayType* type, Node* index,
+ wasm::WasmCodePosition position);
+ Node* ArraySet(Node* array_object, const wasm::ArrayType* type, Node* index,
+ Node* value, wasm::WasmCodePosition position);
+ Node* ArrayLen(Node* array_object, wasm::WasmCodePosition position);
+
bool has_simd() const { return has_simd_; }
wasm::UseTrapHandler use_trap_handler() const {
@@ -460,6 +467,7 @@ class WasmGraphBuilder {
Node* BuildImportCall(const wasm::FunctionSig* sig, Vector<Node*> args,
Vector<Node*> rets, wasm::WasmCodePosition position,
Node* func_index, IsReturnCall continuation);
+ Node* GetBuiltinPointerTarget(int builtin_id);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
diff --git a/deps/v8/src/d8/d8-platforms.cc b/deps/v8/src/d8/d8-platforms.cc
index 48ea3c9481..5ea43b1476 100644
--- a/deps/v8/src/d8/d8-platforms.cc
+++ b/deps/v8/src/d8/d8-platforms.cc
@@ -43,9 +43,17 @@ class PredictablePlatform : public Platform {
int NumberOfWorkerThreads() override { return 0; }
void CallOnWorkerThread(std::unique_ptr<Task> task) override {
- // It's not defined when background tasks are being executed, so we can just
- // execute them right away.
- task->Run();
+ // We post worker tasks on the foreground task runner of the
+ // {kProcessGlobalPredictablePlatformWorkerTaskQueue} isolate. The task
+ // queue of the {kProcessGlobalPredictablePlatformWorkerTaskQueue} isolate
+ // is then executed on the main thread to achieve predictable behavior.
+ //
+ // In this context here it is okay to call {GetForegroundTaskRunner} from a
+ // background thread. The reason is that code is executed sequentially with
+ // the PredictablePlatform, and that the {DefaultPlatform} does not access
+ // the isolate but only uses it as the key in a HashMap.
+ GetForegroundTaskRunner(kProcessGlobalPredictablePlatformWorkerTaskQueue)
+ ->PostTask(std::move(task));
}
void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
diff --git a/deps/v8/src/d8/d8-platforms.h b/deps/v8/src/d8/d8-platforms.h
index a658f0a47c..3d7d3f8cab 100644
--- a/deps/v8/src/d8/d8-platforms.h
+++ b/deps/v8/src/d8/d8-platforms.h
@@ -10,6 +10,7 @@
namespace v8 {
+class Isolate;
class Platform;
// Returns a predictable v8::Platform implementation.
@@ -24,6 +25,13 @@ std::unique_ptr<Platform> MakePredictablePlatform(
std::unique_ptr<Platform> MakeDelayedTasksPlatform(
std::unique_ptr<Platform> platform, int64_t random_seed);
+// We use the task queue of {kProcessGlobalPredictablePlatformWorkerTaskQueue}
+// for worker tasks of the {PredictablePlatform}. At the moment, {nullptr} is a
+// valid value for the isolate. If this ever changes, we either have to allocate
+// a core isolate, or refactor the implementation of worker tasks in the
+// {PredictablePlatform}.
+constexpr Isolate* kProcessGlobalPredictablePlatformWorkerTaskQueue = nullptr;
+
} // namespace v8
#endif // V8_D8_D8_PLATFORMS_H_
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index dd00c58288..fe1bb58e4a 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -49,6 +49,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/profiler/profile-generator.h"
#include "src/sanitizer/msan.h"
+#include "src/snapshot/snapshot.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
@@ -452,54 +453,6 @@ ArrayBuffer::Allocator* Shell::array_buffer_allocator;
ShellOptions Shell::options;
base::OnceType Shell::quit_once_ = V8_ONCE_INIT;
-// Dummy external source stream which returns the whole source in one go.
-class DummySourceStream : public v8::ScriptCompiler::ExternalSourceStream {
- public:
- DummySourceStream(Local<String> source, Isolate* isolate) : done_(false) {
- source_length_ = source->Utf8Length(isolate);
- source_buffer_.reset(new uint8_t[source_length_]);
- source->WriteUtf8(isolate, reinterpret_cast<char*>(source_buffer_.get()),
- source_length_);
- }
-
- size_t GetMoreData(const uint8_t** src) override {
- if (done_) {
- return 0;
- }
- *src = source_buffer_.release();
- done_ = true;
-
- return source_length_;
- }
-
- private:
- int source_length_;
- std::unique_ptr<uint8_t[]> source_buffer_;
- bool done_;
-};
-
-class BackgroundCompileThread : public base::Thread {
- public:
- BackgroundCompileThread(Isolate* isolate, Local<String> source)
- : base::Thread(GetThreadOptions("BackgroundCompileThread")),
- source_(source),
- streamed_source_(std::make_unique<DummySourceStream>(source, isolate),
- v8::ScriptCompiler::StreamedSource::UTF8),
- task_(v8::ScriptCompiler::StartStreamingScript(isolate,
- &streamed_source_)) {}
-
- void Run() override { task_->Run(); }
-
- v8::ScriptCompiler::StreamedSource* streamed_source() {
- return &streamed_source_;
- }
-
- private:
- Local<String> source_;
- v8::ScriptCompiler::StreamedSource streamed_source_;
- std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask> task_;
-};
-
ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
Local<Value> source) {
base::MutexGuard lock_guard(cached_code_mutex_.Pointer());
@@ -544,16 +497,22 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
i::Handle<i::String> str = Utils::OpenHandle(*(source));
// Set up ParseInfo.
- i::ParseInfo parse_info(i_isolate);
- parse_info.set_toplevel();
- parse_info.set_allow_lazy_parsing();
- parse_info.set_language_mode(
- i::construct_language_mode(i::FLAG_use_strict));
-
- i::Handle<i::Script> script =
- parse_info.CreateScript(i_isolate, str, options.compile_options);
- if (!i::parsing::ParseProgram(&parse_info, script, i::kNullMaybeHandle,
- i_isolate)) {
+ i::UnoptimizedCompileState compile_state(i_isolate);
+
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForToplevelCompile(
+ i_isolate, true, i::construct_language_mode(i::FLAG_use_strict),
+ i::REPLMode::kNo);
+
+ if (options.compile_options == v8::ScriptCompiler::kEagerCompile) {
+ flags.set_is_eager(true);
+ }
+
+ i::ParseInfo parse_info(i_isolate, flags, &compile_state);
+
+ i::Handle<i::Script> script = parse_info.CreateScript(
+ i_isolate, str, i::kNullMaybeHandle, ScriptOriginOptions());
+ if (!i::parsing::ParseProgram(&parse_info, script, i_isolate)) {
fprintf(stderr, "Failed parsing\n");
return false;
}
@@ -588,23 +547,6 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
maybe_script = ScriptCompiler::Compile(
context, &script_source, ScriptCompiler::kNoCompileOptions);
}
- } else if (options.stress_background_compile) {
- // Start a background thread compiling the script.
- BackgroundCompileThread background_compile_thread(isolate, source);
- CHECK(background_compile_thread.Start());
-
- // In parallel, compile on the main thread to flush out any data races.
- {
- TryCatch ignore_try_catch(isolate);
- ScriptCompiler::Source script_source(source, origin);
- USE(ScriptCompiler::Compile(context, &script_source,
- ScriptCompiler::kNoCompileOptions));
- }
-
- // Join with background thread and finalize compilation.
- background_compile_thread.Join();
- maybe_script = v8::ScriptCompiler::Compile(
- context, background_compile_thread.streamed_source(), source, origin);
} else {
ScriptCompiler::Source script_source(source, origin);
maybe_script = ScriptCompiler::Compile(context, &script_source,
@@ -2239,7 +2181,7 @@ void Shell::OnExit(v8::Isolate* isolate) {
if (i::FLAG_dump_counters_nvp) {
// Dump counters as name-value pairs.
- for (auto pair : counters) {
+ for (const auto& pair : counters) {
std::string key = pair.first;
Counter* counter = pair.second;
if (counter->is_histogram()) {
@@ -2260,7 +2202,7 @@ void Shell::OnExit(v8::Isolate* isolate) {
<< std::string(kValueBoxSize - 6, ' ') << "|\n";
std::cout << "+" << std::string(kNameBoxSize, '-') << "+"
<< std::string(kValueBoxSize, '-') << "+\n";
- for (auto pair : counters) {
+ for (const auto& pair : counters) {
std::string key = pair.first;
Counter* counter = pair.second;
if (counter->is_histogram()) {
@@ -2909,12 +2851,12 @@ bool Shell::SetOptions(int argc, char* argv[]) {
strcmp(argv[i], "--no-stress-opt") == 0) {
options.stress_opt = false;
argv[i] = nullptr;
- } else if (strcmp(argv[i], "--stress-background-compile") == 0) {
- options.stress_background_compile = true;
+ } else if (strcmp(argv[i], "--stress-snapshot") == 0) {
+ options.stress_snapshot = true;
argv[i] = nullptr;
- } else if (strcmp(argv[i], "--nostress-background-compile") == 0 ||
- strcmp(argv[i], "--no-stress-background-compile") == 0) {
- options.stress_background_compile = false;
+ } else if (strcmp(argv[i], "--nostress-snapshot") == 0 ||
+ strcmp(argv[i], "--no-stress-snapshot") == 0) {
+ options.stress_snapshot = false;
argv[i] = nullptr;
} else if (strcmp(argv[i], "--noalways-opt") == 0 ||
strcmp(argv[i], "--no-always-opt") == 0) {
@@ -3080,7 +3022,7 @@ bool Shell::SetOptions(int argc, char* argv[]) {
return true;
}
-int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
+int Shell::RunMain(Isolate* isolate, bool last_run) {
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
@@ -3108,6 +3050,18 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
DisposeModuleEmbedderData(context);
}
WriteLcovData(isolate, options.lcov_file);
+ if (last_run && options.stress_snapshot) {
+ static constexpr bool kClearRecompilableData = true;
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ // TODO(jgruber,v8:10500): Don't deoptimize once we support serialization
+ // of optimized code.
+ i::Deoptimizer::DeoptimizeAll(i_isolate);
+ i::Snapshot::ClearReconstructableDataForSerialization(
+ i_isolate, kClearRecompilableData);
+ i::Snapshot::SerializeDeserializeAndVerifyForTesting(i_isolate,
+ i_context);
+ }
}
CollectGarbage(isolate);
for (int i = 1; i < options.num_isolates; ++i) {
@@ -3174,6 +3128,17 @@ bool ProcessMessages(
while (v8::platform::PumpMessageLoop(g_default_platform, isolate,
behavior())) {
MicrotasksScope::PerformCheckpoint(isolate);
+
+ if (i::FLAG_verify_predictable) {
+ // In predictable mode we push all background tasks into the foreground
+ // task queue of the {kProcessGlobalPredictablePlatformWorkerTaskQueue}
+ // isolate. We execute the tasks after one foreground task has been
+ // executed.
+ while (v8::platform::PumpMessageLoop(
+ g_default_platform,
+ kProcessGlobalPredictablePlatformWorkerTaskQueue, behavior())) {
+ }
+ }
}
if (g_default_platform->IdleTasksEnabled(isolate)) {
v8::platform::RunIdleTasks(g_default_platform, isolate,
@@ -3675,7 +3640,7 @@ int Shell::Main(int argc, char* argv[]) {
options.stress_runs);
D8Testing::PrepareStressRun(i);
bool last_run = i == options.stress_runs - 1;
- result = RunMain(isolate, argc, argv, last_run);
+ result = RunMain(isolate, last_run);
}
printf("======== Full Deoptimization =======\n");
D8Testing::DeoptimizeAll(isolate);
@@ -3685,7 +3650,7 @@ int Shell::Main(int argc, char* argv[]) {
printf("============ Run %d/%d ============\n", i + 1,
options.stress_runs);
bool last_run = i == options.stress_runs - 1;
- result = RunMain(isolate, argc, argv, last_run);
+ result = RunMain(isolate, last_run);
}
} else if (options.code_cache_options !=
ShellOptions::CodeCacheOptions::kNoProduceCache) {
@@ -3702,7 +3667,7 @@ int Shell::Main(int argc, char* argv[]) {
PerIsolateData data(isolate2);
Isolate::Scope isolate_scope(isolate2);
- result = RunMain(isolate2, argc, argv, false);
+ result = RunMain(isolate2, false);
}
isolate2->Dispose();
@@ -3715,11 +3680,11 @@ int Shell::Main(int argc, char* argv[]) {
printf("============ Run: Consume code cache ============\n");
// Second run to consume the cache in current isolate
- result = RunMain(isolate, argc, argv, true);
+ result = RunMain(isolate, true);
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
} else {
bool last_run = true;
- result = RunMain(isolate, argc, argv, last_run);
+ result = RunMain(isolate, last_run);
}
// Run interactive shell if explicitly requested or if no script has been
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 2d27f62e58..bd49b81fd0 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -268,6 +268,7 @@ class ShellOptions {
bool wait_for_wasm = true;
bool stress_opt = false;
int stress_runs = 1;
+ bool stress_snapshot = false;
bool interactive_shell = false;
bool test_shell = false;
bool expected_to_throw = false;
@@ -278,7 +279,6 @@ class ShellOptions {
int num_isolates = 1;
v8::ScriptCompiler::CompileOptions compile_options =
v8::ScriptCompiler::kNoCompileOptions;
- bool stress_background_compile = false;
CodeCacheOptions code_cache_options = CodeCacheOptions::kNoProduceCache;
SourceGroup* isolate_sources = nullptr;
const char* icu_data_file = nullptr;
@@ -323,7 +323,7 @@ class Shell : public i::AllStatic {
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Local<String> ReadFile(Isolate* isolate, const char* name);
static Local<Context> CreateEvaluationContext(Isolate* isolate);
- static int RunMain(Isolate* isolate, int argc, char* argv[], bool last_run);
+ static int RunMain(Isolate* isolate, bool last_run);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
static void OnExit(Isolate* isolate);
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index fcf9b8448a..473bac1bf9 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -256,7 +256,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Use macro to include only the non-inlined version of an intrinsic.
#define INTRINSIC_WHITELIST(V) \
/* Conversions */ \
- V(NumberToString) \
+ V(NumberToStringSlow) \
V(ToBigInt) \
V(ToLength) \
V(ToNumber) \
@@ -313,7 +313,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ArrayIncludes_Slow) \
V(ArrayIndexOf) \
V(ArrayIsArray) \
- V(ClassOf) \
V(GetFunctionName) \
V(GetOwnPropertyDescriptor) \
V(GlobalPrint) \
@@ -782,11 +781,6 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kFunctionPrototypeApply:
// Error builtins.
case Builtins::kErrorConstructor:
- case Builtins::kMakeError:
- case Builtins::kMakeTypeError:
- case Builtins::kMakeSyntaxError:
- case Builtins::kMakeRangeError:
- case Builtins::kMakeURIError:
// RegExp builtins.
case Builtins::kRegExpConstructor:
// Internal.
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 19178d34ce..3f79f5ee3f 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -43,12 +43,6 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
DCHECK_NOT_NULL(js_frame);
deoptimized_frame_.reset(Deoptimizer::DebuggerInspectableFrame(
js_frame, inlined_frame_index, isolate));
- } else if (frame_->is_wasm_interpreter_entry()) {
- wasm_interpreted_frame_ =
- WasmInterpreterEntryFrame::cast(frame_)
- ->debug_info()
- .GetInterpretedFrame(frame_->fp(), inlined_frame_index);
- DCHECK(wasm_interpreted_frame_);
}
}
@@ -63,14 +57,11 @@ JavaScriptFrame* FrameInspector::javascript_frame() {
int FrameInspector::GetParametersCount() {
if (is_optimized_) return deoptimized_frame_->parameters_count();
- if (wasm_interpreted_frame_)
- return wasm_interpreted_frame_->GetParameterCount();
return frame_->ComputeParametersCount();
}
Handle<Object> FrameInspector::GetParameter(int index) {
if (is_optimized_) return deoptimized_frame_->GetParameter(index);
- // TODO(clemensb): Handle wasm_interpreted_frame_.
return handle(frame_->GetParameter(index), isolate_);
}
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 78248614e2..541ee1dc1d 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -11,13 +11,13 @@
#include "src/execution/isolate.h"
#include "src/execution/v8threads.h"
#include "src/objects/objects.h"
-#include "src/wasm/wasm-interpreter.h"
namespace v8 {
namespace internal {
class JavaScriptFrame;
class StandardFrame;
+class WasmFrame;
class FrameInspector {
public:
@@ -52,7 +52,6 @@ class FrameInspector {
StandardFrame* frame_;
int inlined_frame_index_;
std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
- wasm::WasmInterpreter::FramePtr wasm_interpreted_frame_;
Isolate* isolate_;
Handle<Script> script_;
Handle<Object> receiver_;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index f6cfe31d32..3a46cf9b39 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -10,9 +10,9 @@
#include "include/v8-inspector.h"
#include "include/v8-util.h"
#include "include/v8.h"
-
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
+#include "src/utils/vector.h"
namespace v8 {
@@ -173,6 +173,9 @@ class WasmScript : public Script {
public:
static WasmScript* Cast(Script* script);
+ enum class DebugSymbolsType { None, SourceMap, EmbeddedDWARF, ExternalDWARF };
+ DebugSymbolsType GetDebugSymbolType() const;
+ MemorySpan<const char> ExternalSymbolsURL() const;
int NumFunctions() const;
int NumImportedFunctions() const;
MemorySpan<const uint8_t> Bytecode() const;
@@ -446,6 +449,7 @@ class V8_EXPORT_PRIVATE ScopeIterator {
class V8_EXPORT_PRIVATE StackTraceIterator {
public:
+ static bool SupportsWasmDebugEvaluate();
static std::unique_ptr<StackTraceIterator> Create(Isolate* isolate,
int index = 0);
StackTraceIterator() = default;
@@ -466,6 +470,8 @@ class V8_EXPORT_PRIVATE StackTraceIterator {
virtual bool Restart() = 0;
virtual v8::MaybeLocal<v8::Value> Evaluate(v8::Local<v8::String> source,
bool throw_on_side_effect) = 0;
+ virtual v8::MaybeLocal<v8::String> EvaluateWasm(
+ internal::Vector<const internal::byte> source, int frame_index) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(StackTraceIterator);
@@ -587,6 +593,23 @@ class PropertyIterator {
virtual bool is_array_index() = 0;
};
+// Wrapper around v8::internal::WasmValue.
+class V8_EXPORT_PRIVATE WasmValue : public v8::Value {
+ public:
+ static bool IsWasmValue(v8::Local<v8::Value> obj);
+ V8_INLINE static WasmValue* Cast(v8::Value* obj);
+ int value_type();
+ // Get the underlying values as a byte array, this is only valid if value_type
+ // is i32, i64, f32, f64, or s128.
+ v8::Local<v8::Array> bytes();
+ // Get the underlying anyref, only valid if value_type is anyref.
+ v8::Local<v8::Value> ref();
+
+ private:
+ WasmValue();
+ static void CheckCast(v8::Value* obj);
+};
+
AccessorPair* AccessorPair::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -594,6 +617,13 @@ AccessorPair* AccessorPair::Cast(v8::Value* value) {
return static_cast<AccessorPair*>(value);
}
+WasmValue* WasmValue::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmValue*>(value);
+}
+
MaybeLocal<Message> GetMessageFromPromise(Local<Promise> promise);
} // namespace debug
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index 28d8ed0c8f..ab3191d11d 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -129,12 +129,10 @@ bool DebugScopeIterator::SetVariableValue(v8::Local<v8::String> name,
}
DebugWasmScopeIterator::DebugWasmScopeIterator(Isolate* isolate,
- StandardFrame* frame,
- int inlined_frame_index)
+ WasmFrame* frame)
: isolate_(isolate),
frame_(frame),
- inlined_frame_index_(inlined_frame_index),
- type_(debug::ScopeIterator::ScopeTypeGlobal) {}
+ type_(debug::ScopeIterator::ScopeTypeModule) {}
bool DebugWasmScopeIterator::Done() {
return type_ == debug::ScopeIterator::ScopeTypeWith;
@@ -143,8 +141,11 @@ bool DebugWasmScopeIterator::Done() {
void DebugWasmScopeIterator::Advance() {
DCHECK(!Done());
switch (type_) {
- case ScopeTypeGlobal:
- type_ = debug::ScopeIterator::ScopeTypeLocal;
+ case ScopeTypeModule:
+ // Skip local scope and expression stack scope if the frame is not
+ // inspectable.
+ type_ = frame_->is_inspectable() ? debug::ScopeIterator::ScopeTypeLocal
+ : debug::ScopeIterator::ScopeTypeWith;
break;
case ScopeTypeLocal:
type_ = debug::ScopeIterator::ScopeTypeWasmExpressionStack;
@@ -166,36 +167,20 @@ v8::debug::ScopeIterator::ScopeType DebugWasmScopeIterator::GetType() {
v8::Local<v8::Object> DebugWasmScopeIterator::GetObject() {
DCHECK(!Done());
switch (type_) {
- case debug::ScopeIterator::ScopeTypeGlobal: {
+ case debug::ScopeIterator::ScopeTypeModule: {
Handle<WasmInstanceObject> instance =
FrameSummary::GetTop(frame_).AsWasm().wasm_instance();
- return Utils::ToLocal(wasm::GetGlobalScopeObject(instance));
+ return Utils::ToLocal(wasm::GetModuleScopeObject(instance));
}
case debug::ScopeIterator::ScopeTypeLocal: {
- if (frame_->is_wasm_interpreter_entry()) {
- Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame_)->debug_info(), isolate_);
- return Utils::ToLocal(WasmDebugInfo::GetLocalScopeObject(
- debug_info, frame_->fp(), inlined_frame_index_));
- }
- // Compiled code.
- DCHECK(frame_->is_wasm_compiled());
- wasm::DebugInfo* debug_info =
- WasmCompiledFrame::cast(frame_)->native_module()->GetDebugInfo();
+ DCHECK(frame_->is_wasm());
+ wasm::DebugInfo* debug_info = frame_->native_module()->GetDebugInfo();
return Utils::ToLocal(debug_info->GetLocalScopeObject(
isolate_, frame_->pc(), frame_->fp(), frame_->callee_fp()));
}
case debug::ScopeIterator::ScopeTypeWasmExpressionStack: {
- if (frame_->is_wasm_interpreter_entry()) {
- Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame_)->debug_info(), isolate_);
- return Utils::ToLocal(WasmDebugInfo::GetStackScopeObject(
- debug_info, frame_->fp(), inlined_frame_index_));
- }
- // Compiled code.
- DCHECK(frame_->is_wasm_compiled());
- wasm::DebugInfo* debug_info =
- WasmCompiledFrame::cast(frame_)->native_module()->GetDebugInfo();
+ DCHECK(frame_->is_wasm());
+ wasm::DebugInfo* debug_info = frame_->native_module()->GetDebugInfo();
return Utils::ToLocal(debug_info->GetStackScopeObject(
isolate_, frame_->pc(), frame_->fp(), frame_->callee_fp()));
}
diff --git a/deps/v8/src/debug/debug-scope-iterator.h b/deps/v8/src/debug/debug-scope-iterator.h
index 44d6c49860..a2b5ebcdf8 100644
--- a/deps/v8/src/debug/debug-scope-iterator.h
+++ b/deps/v8/src/debug/debug-scope-iterator.h
@@ -39,8 +39,7 @@ class DebugScopeIterator final : public debug::ScopeIterator {
class DebugWasmScopeIterator final : public debug::ScopeIterator {
public:
- DebugWasmScopeIterator(Isolate* isolate, StandardFrame* frame,
- int inlined_frame_index);
+ DebugWasmScopeIterator(Isolate* isolate, WasmFrame* frame);
bool Done() override;
void Advance() override;
@@ -56,8 +55,7 @@ class DebugWasmScopeIterator final : public debug::ScopeIterator {
v8::Local<v8::Value> value) override;
private:
Isolate* isolate_;
- StandardFrame* frame_;
- int inlined_frame_index_;
+ WasmFrame* frame_;
ScopeType type_;
};
} // namespace internal
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index c5061ecb81..6b838a69af 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -40,7 +40,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
TryParseAndRetrieveScopes(strategy);
}
-ScopeIterator::~ScopeIterator() { delete info_; }
+ScopeIterator::~ScopeIterator() = default;
Handle<Object> ScopeIterator::GetFunctionDebugName() const {
if (!function_.is_null()) return JSFunction::GetDebugName(function_);
@@ -236,37 +236,43 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
// Depending on the choosen strategy, the whole script or just
// the closure is re-parsed for function scopes.
Handle<Script> script(Script::cast(shared_info->script()), isolate_);
- if (scope_info->scope_type() == FUNCTION_SCOPE &&
- strategy == ReparseStrategy::kFunctionLiteral) {
- info_ = new ParseInfo(isolate_, *shared_info);
- } else {
- info_ = new ParseInfo(isolate_, *script);
- info_->set_eager();
- }
+
+ // Pick between flags for a single function compilation, or an eager
+ // compilation of the whole script.
+ UnoptimizedCompileFlags flags =
+ (scope_info->scope_type() == FUNCTION_SCOPE &&
+ strategy == ReparseStrategy::kFunctionLiteral)
+ ? UnoptimizedCompileFlags::ForFunctionCompile(isolate_, *shared_info)
+ : UnoptimizedCompileFlags::ForScriptCompile(isolate_, *script)
+ .set_is_eager(true);
MaybeHandle<ScopeInfo> maybe_outer_scope;
if (scope_info->scope_type() == EVAL_SCOPE || script->is_wrapped()) {
- info_->set_eval();
+ flags.set_is_eval(true);
if (!context_->IsNativeContext()) {
maybe_outer_scope = handle(context_->scope_info(), isolate_);
}
// Language mode may be inherited from the eval caller.
// Retrieve it from shared function info.
- info_->set_language_mode(shared_info->language_mode());
+ flags.set_outer_language_mode(shared_info->language_mode());
} else if (scope_info->scope_type() == MODULE_SCOPE) {
- DCHECK(info_->is_module());
+ DCHECK(flags.is_module());
} else {
DCHECK(scope_info->scope_type() == SCRIPT_SCOPE ||
scope_info->scope_type() == FUNCTION_SCOPE);
}
+ UnoptimizedCompileState compile_state(isolate_);
+
+ info_ = std::make_unique<ParseInfo>(isolate_, flags, &compile_state);
+
const bool parse_result =
- info_->is_toplevel()
- ? parsing::ParseProgram(info_, script, maybe_outer_scope, isolate_)
- : parsing::ParseFunction(info_, shared_info, isolate_);
+ flags.is_toplevel()
+ ? parsing::ParseProgram(info_.get(), script, maybe_outer_scope,
+ isolate_)
+ : parsing::ParseFunction(info_.get(), shared_info, isolate_);
- if (parse_result && Rewriter::Rewrite(info_)) {
- info_->ast_value_factory()->Internalize(isolate_);
+ if (parse_result) {
DeclarationScope* literal_scope = info_->literal()->scope();
ScopeChainRetriever scope_chain_retriever(literal_scope, function_,
@@ -280,10 +286,12 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
? scope_chain_retriever.ClosureScope()
: literal_scope;
- CHECK(DeclarationScope::Analyze(info_));
if (ignore_nested_scopes) {
current_scope_ = closure_scope_;
start_scope_ = current_scope_;
+ // ignore_nested_scopes is only used for the return-position breakpoint,
+ // so we can safely assume that the closure context for the current
+ // function exists if it needs one.
if (closure_scope_->NeedsContext()) {
context_ = handle(context_->closure_context(), isolate_);
}
@@ -305,7 +313,7 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
}
void ScopeIterator::UnwrapEvaluationContext() {
- if (context_->is_null() || !context_->IsDebugEvaluateContext()) return;
+ if (!context_->IsDebugEvaluateContext()) return;
Context current = *context_;
do {
Object wrapped = current.get(Context::WRAPPED_CONTEXT_INDEX);
@@ -374,11 +382,25 @@ bool ScopeIterator::DeclaresLocals(Mode mode) const {
}
bool ScopeIterator::HasContext() const {
- return !InInnerScope() || current_scope_->NeedsContext();
+ return !InInnerScope() || NeedsAndHasContext();
+}
+
+bool ScopeIterator::NeedsAndHasContext() const {
+ if (!current_scope_->NeedsContext()) return false;
+ // Generally, if a scope needs a context, then we can assume that it has a
+ // context. However, the stack check during function entry happens before the
+ // function has a chance to create and push its own context, so we must check
+ // for the case where the function is executing in its parent context. This
+ // case is only possible in function scopes; top-level code (modules and
+ // non-module scripts) begin execution in the context they need and don't have
+ // a separate step to push the correct context.
+ return !(current_scope_ == closure_scope_ &&
+ current_scope_->is_function_scope() && !function_.is_null() &&
+ function_->context() == *context_);
}
void ScopeIterator::AdvanceOneScope() {
- if (current_scope_->NeedsContext()) {
+ if (NeedsAndHasContext()) {
DCHECK(!context_->previous().is_null());
context_ = handle(context_->previous(), isolate_);
}
@@ -406,7 +428,7 @@ void ScopeIterator::AdvanceContext() {
current_scope_ = current_scope_->outer_scope();
CollectLocalsFromCurrentScope();
- } while (!current_scope_->NeedsContext());
+ } while (!NeedsAndHasContext());
}
void ScopeIterator::Next() {
@@ -422,22 +444,23 @@ void ScopeIterator::Next() {
return;
}
- bool inner = InInnerScope();
- if (current_scope_ == closure_scope_) function_ = Handle<JSFunction>();
+ bool leaving_closure = current_scope_ == closure_scope_;
if (scope_type == ScopeTypeScript) {
- DCHECK_IMPLIES(InInnerScope(), current_scope_->is_script_scope());
+ DCHECK_IMPLIES(InInnerScope() && !leaving_closure,
+ current_scope_->is_script_scope());
seen_script_scope_ = true;
if (context_->IsScriptContext()) {
context_ = handle(context_->previous(), isolate_);
}
- } else if (!inner) {
+ } else if (!InInnerScope()) {
AdvanceContext();
} else {
DCHECK_NOT_NULL(current_scope_);
AdvanceToNonHiddenScope();
- if (!InInnerScope() && current_scope_ != closure_scope_) {
+ if (leaving_closure) {
+ DCHECK(current_scope_ != closure_scope_);
// Edge case when we just go past {closure_scope_}. This case
// already needs to start collecting locals for the blacklist.
locals_ = StringSet::New(isolate_);
@@ -445,6 +468,8 @@ void ScopeIterator::Next() {
}
}
+ if (leaving_closure) function_ = Handle<JSFunction>();
+
UnwrapEvaluationContext();
}
@@ -454,34 +479,29 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
if (InInnerScope()) {
switch (current_scope_->scope_type()) {
case FUNCTION_SCOPE:
- DCHECK_IMPLIES(current_scope_->NeedsContext(),
+ DCHECK_IMPLIES(NeedsAndHasContext(),
context_->IsFunctionContext() ||
context_->IsDebugEvaluateContext());
return ScopeTypeLocal;
case MODULE_SCOPE:
- DCHECK_IMPLIES(current_scope_->NeedsContext(),
- context_->IsModuleContext());
+ DCHECK_IMPLIES(NeedsAndHasContext(), context_->IsModuleContext());
return ScopeTypeModule;
case SCRIPT_SCOPE:
- DCHECK_IMPLIES(
- current_scope_->NeedsContext(),
- context_->IsScriptContext() || context_->IsNativeContext());
+ DCHECK_IMPLIES(NeedsAndHasContext(), context_->IsScriptContext() ||
+ context_->IsNativeContext());
return ScopeTypeScript;
case WITH_SCOPE:
- DCHECK_IMPLIES(current_scope_->NeedsContext(),
- context_->IsWithContext());
+ DCHECK_IMPLIES(NeedsAndHasContext(), context_->IsWithContext());
return ScopeTypeWith;
case CATCH_SCOPE:
DCHECK(context_->IsCatchContext());
return ScopeTypeCatch;
case BLOCK_SCOPE:
case CLASS_SCOPE:
- DCHECK_IMPLIES(current_scope_->NeedsContext(),
- context_->IsBlockContext());
+ DCHECK_IMPLIES(NeedsAndHasContext(), context_->IsBlockContext());
return ScopeTypeBlock;
case EVAL_SCOPE:
- DCHECK_IMPLIES(current_scope_->NeedsContext(),
- context_->IsEvalContext());
+ DCHECK_IMPLIES(NeedsAndHasContext(), context_->IsEvalContext());
return ScopeTypeEval;
}
UNREACHABLE();
@@ -583,7 +603,7 @@ bool ScopeIterator::SetVariableValue(Handle<String> name,
DCHECK_EQ(ScopeTypeLocal, Type());
if (SetLocalVariableValue(name, value)) return true;
// There may not be an associated context since we're InInnerScope().
- if (!current_scope_->NeedsContext()) return false;
+ if (!NeedsAndHasContext()) return false;
} else {
DCHECK_EQ(ScopeTypeClosure, Type());
if (SetContextVariableValue(name, value)) return true;
@@ -627,7 +647,7 @@ void ScopeIterator::DebugPrint() {
case ScopeIterator::ScopeTypeLocal: {
os << "Local:\n";
- if (current_scope_->NeedsContext()) {
+ if (NeedsAndHasContext()) {
context_->Print(os);
if (context_->has_extension()) {
Handle<HeapObject> extension(context_->extension(), isolate_);
@@ -766,7 +786,9 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
Variable* function_var =
current_scope_->AsDeclarationScope()->function_var();
if (function_var != nullptr) {
- Handle<JSFunction> function = frame_inspector_->GetFunction();
+ Handle<JSFunction> function = frame_inspector_ == nullptr
+ ? function_
+ : frame_inspector_->GetFunction();
Handle<String> name = function_var->name();
if (visitor(name, function)) return true;
}
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index f53457ef46..a0357c7383 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -102,6 +102,7 @@ class ScopeIterator {
bool InInnerScope() const { return !function_.is_null(); }
bool HasContext() const;
+ bool NeedsAndHasContext() const;
Handle<Context> CurrentContext() const {
DCHECK(HasContext());
return context_;
@@ -109,10 +110,14 @@ class ScopeIterator {
private:
Isolate* isolate_;
- ParseInfo* info_ = nullptr;
+ std::unique_ptr<ParseInfo> info_;
FrameInspector* const frame_inspector_ = nullptr;
Handle<JSGeneratorObject> generator_;
+
+ // The currently-executing function from the inspected frame, or null if this
+ // ScopeIterator has already iterated to any Scope outside that function.
Handle<JSFunction> function_;
+
Handle<Context> context_;
Handle<Script> script_;
Handle<StringSet> locals_;
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 04a2e63ae5..28d595853c 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -6,14 +6,22 @@
#include "src/api/api-inl.h"
#include "src/debug/debug-evaluate.h"
+#include "src/debug/debug-interface.h"
#include "src/debug/debug-scope-iterator.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/execution/frames-inl.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate.h"
+#include "src/wasm/wasm-debug-evaluate.h"
+#include "src/wasm/wasm-debug.h"
namespace v8 {
+bool debug::StackTraceIterator::SupportsWasmDebugEvaluate() {
+ return i::FLAG_wasm_expose_debug_eval;
+}
+
std::unique_ptr<debug::StackTraceIterator> debug::StackTraceIterator::Create(
v8::Isolate* isolate, int index) {
return std::unique_ptr<debug::StackTraceIterator>(
@@ -160,8 +168,8 @@ DebugStackTraceIterator::GetScopeIterator() const {
DCHECK(!Done());
StandardFrame* frame = iterator_.frame();
if (frame->is_wasm()) {
- return std::make_unique<DebugWasmScopeIterator>(isolate_, iterator_.frame(),
- inlined_frame_index_);
+ return std::make_unique<DebugWasmScopeIterator>(isolate_,
+ WasmFrame::cast(frame));
}
return std::make_unique<DebugScopeIterator>(isolate_, frame_inspector_.get());
}
@@ -186,5 +194,26 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
}
return Utils::ToLocal(value);
}
+
+v8::MaybeLocal<v8::String> DebugStackTraceIterator::EvaluateWasm(
+ internal::Vector<const internal::byte> source, int frame_index) {
+ DCHECK(!Done());
+ if (!i::FLAG_wasm_expose_debug_eval || !iterator_.is_wasm()) {
+ return v8::MaybeLocal<v8::String>();
+ }
+ Handle<String> value;
+ i::SafeForInterruptsScope safe_for_interrupt_scope(isolate_);
+
+ FrameSummary summary = FrameSummary::Get(iterator_.frame(), 0);
+ const FrameSummary::WasmFrameSummary& wasmSummary = summary.AsWasm();
+ Handle<WasmInstanceObject> instance = wasmSummary.wasm_instance();
+
+ if (!v8::internal::wasm::DebugEvaluate(source, instance, iterator_.frame())
+ .ToHandle(&value)) {
+ isolate_->OptionalRescheduleException(false);
+ return v8::MaybeLocal<v8::String>();
+ }
+ return Utils::ToLocal(value);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.h b/deps/v8/src/debug/debug-stack-trace-iterator.h
index 3319bc15f5..2317af35f5 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.h
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.h
@@ -34,6 +34,8 @@ class DebugStackTraceIterator final : public debug::StackTraceIterator {
bool Restart() override;
v8::MaybeLocal<v8::Value> Evaluate(v8::Local<v8::String> source,
bool throw_on_side_effect) override;
+ v8::MaybeLocal<v8::String> EvaluateWasm(
+ internal::Vector<const internal::byte> source, int frame_index) override;
private:
Isolate* isolate_;
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index a7a3d2fd81..627ccc7c56 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -622,6 +622,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<BreakPoint> break_point =
isolate_->factory()->NewBreakPoint(*id, condition);
if (script->type() == Script::TYPE_WASM) {
+ RecordWasmScriptWithBreakpoints(script);
return WasmScript::SetBreakPoint(script, source_position, break_point);
}
@@ -777,12 +778,53 @@ void Debug::RemoveBreakpointForWasmScript(Handle<Script> script, int id) {
}
}
+void Debug::RecordWasmScriptWithBreakpoints(Handle<Script> script) {
+ if (wasm_scripts_with_breakpoints_.is_null()) {
+ Handle<WeakArrayList> new_list = isolate_->factory()->NewWeakArrayList(4);
+ wasm_scripts_with_breakpoints_ =
+ isolate_->global_handles()->Create(*new_list);
+ }
+ {
+ DisallowHeapAllocation no_gc;
+ for (int idx = wasm_scripts_with_breakpoints_->length() - 1; idx >= 0;
+ --idx) {
+ HeapObject wasm_script;
+ if (wasm_scripts_with_breakpoints_->Get(idx).GetHeapObject(
+ &wasm_script) &&
+ wasm_script == *script) {
+ return;
+ }
+ }
+ }
+ Handle<WeakArrayList> new_list = WeakArrayList::Append(
+ isolate_, wasm_scripts_with_breakpoints_, MaybeObjectHandle{script});
+ if (*new_list != *wasm_scripts_with_breakpoints_) {
+ isolate_->global_handles()->Destroy(
+ wasm_scripts_with_breakpoints_.location());
+ wasm_scripts_with_breakpoints_ =
+ isolate_->global_handles()->Create(*new_list);
+ }
+}
+
// Clear out all the debug break code.
void Debug::ClearAllBreakPoints() {
ClearAllDebugInfos([=](Handle<DebugInfo> info) {
ClearBreakPoints(info);
info->ClearBreakInfo(isolate_);
});
+ // Clear all wasm breakpoints.
+ if (!wasm_scripts_with_breakpoints_.is_null()) {
+ DisallowHeapAllocation no_gc;
+ for (int idx = wasm_scripts_with_breakpoints_->length() - 1; idx >= 0;
+ --idx) {
+ HeapObject raw_wasm_script;
+ if (wasm_scripts_with_breakpoints_->Get(idx).GetHeapObject(
+ &raw_wasm_script)) {
+ WasmScript::ClearAllBreakpoints(Script::cast(raw_wasm_script));
+ }
+ }
+ wasm_scripts_with_breakpoints_ = Handle<WeakArrayList>{};
+ }
}
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
@@ -872,20 +914,6 @@ void Debug::PrepareStepIn(Handle<JSFunction> function) {
if (in_debug_scope()) return;
if (break_disabled()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
- // If stepping from JS into Wasm, and we are using the wasm interpreter for
- // debugging, prepare the interpreter for step in.
- if (shared->HasWasmExportedFunctionData() && !FLAG_debug_in_liftoff) {
- auto imported_function = Handle<WasmExportedFunction>::cast(function);
- Handle<WasmInstanceObject> wasm_instance(imported_function->instance(),
- isolate_);
- Handle<WasmDebugInfo> wasm_debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(wasm_instance);
- int func_index = shared->wasm_exported_function_data().function_index();
- WasmDebugInfo::PrepareStepIn(wasm_debug_info, func_index);
- // We need to reset all of this since break would be
- // handled in Wasm Interpreter now. Otherwise it would be a loop here.
- ClearStepping();
- }
if (IsBlackboxed(shared)) return;
if (*function == thread_local_.ignore_step_into_function_) return;
thread_local_.ignore_step_into_function_ = Smi::zero();
@@ -1040,17 +1068,9 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.last_frame_count_ = current_frame_count;
// No longer perform the current async step.
clear_suspended_generator();
- } else if (frame->is_wasm_interpreter_entry()) {
- // Handle stepping in wasm functions via the wasm interpreter.
- WasmInterpreterEntryFrame* wasm_frame =
- WasmInterpreterEntryFrame::cast(frame);
- if (wasm_frame->NumberOfActiveFrames() > 0) {
- wasm_frame->debug_info().PrepareStep(step_action);
- return;
- }
- } else if (FLAG_debug_in_liftoff && frame->is_wasm_compiled()) {
+ } else if (frame->is_wasm()) {
// Handle stepping in Liftoff code.
- WasmCompiledFrame* wasm_frame = WasmCompiledFrame::cast(frame);
+ WasmFrame* wasm_frame = WasmFrame::cast(frame);
wasm::WasmCodeRefScope code_ref_scope;
wasm::WasmCode* code = wasm_frame->wasm_code();
if (code->is_liftoff()) {
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 4bb2008c4d..a8adb77697 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -248,6 +248,8 @@ class V8_EXPORT_PRIVATE Debug {
void RemoveBreakpoint(int id);
void RemoveBreakpointForWasmScript(Handle<Script> script, int id);
+ void RecordWasmScriptWithBreakpoints(Handle<Script> script);
+
// Find breakpoints from the debug info and the break location and check
// whether they are hit. Return an empty handle if not, or a FixedArray with
// hit BreakPoint objects.
@@ -546,6 +548,9 @@ class V8_EXPORT_PRIVATE Debug {
// Storage location for registers when handling debug break calls
ThreadLocal thread_local_;
+ // This is a global handle, lazily initialized.
+ Handle<WeakArrayList> wasm_scripts_with_breakpoints_;
+
Isolate* isolate_;
friend class Isolate;
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index aad5f5d9e6..cd40eae656 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -750,7 +750,6 @@ class CollectFunctionLiterals final
bool ParseScript(Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
bool compile_as_well, std::vector<FunctionLiteral*>* literals,
debug::LiveEditResult* result) {
- parse_info->set_eager();
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
Handle<SharedFunctionInfo> shared;
bool success = false;
@@ -759,10 +758,6 @@ bool ParseScript(Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
.ToHandle(&shared);
} else {
success = parsing::ParseProgram(parse_info, script, isolate);
- if (success) {
- success = Compiler::Analyze(parse_info);
- parse_info->ast_value_factory()->Internalize(isolate);
- }
}
if (!success) {
isolate->OptionalRescheduleException(false);
@@ -1058,15 +1053,23 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
return;
}
- ParseInfo parse_info(isolate, *script);
+ UnoptimizedCompileState compile_state(isolate);
+ UnoptimizedCompileFlags flags =
+ UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_eager(true);
+ ParseInfo parse_info(isolate, flags, &compile_state);
std::vector<FunctionLiteral*> literals;
if (!ParseScript(isolate, script, &parse_info, false, &literals, result))
return;
Handle<Script> new_script = isolate->factory()->CloneScript(script);
new_script->set_source(*new_source);
+ UnoptimizedCompileState new_compile_state(isolate);
+ UnoptimizedCompileFlags new_flags =
+ UnoptimizedCompileFlags::ForScriptCompile(isolate, *new_script);
+ new_flags.set_is_eager(true);
+ ParseInfo new_parse_info(isolate, new_flags, &new_compile_state);
std::vector<FunctionLiteral*> new_literals;
- ParseInfo new_parse_info(isolate, *new_script);
if (!ParseScript(isolate, new_script, &new_parse_info, true, &new_literals,
result)) {
return;
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index be32d3ba28..eeed2d8e3e 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -36,7 +36,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ mr(fp, r4);
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
- __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lhz(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mr(r5, r3);
diff --git a/deps/v8/src/debug/wasm/gdb-server/OWNERS b/deps/v8/src/debug/wasm/gdb-server/OWNERS
new file mode 100644
index 0000000000..4b8c1919e8
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/OWNERS
@@ -0,0 +1,3 @@
+paolosev@microsoft.com
+
+# COMPONENT: Blink>JavaScript>WebAssembly \ No newline at end of file
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.cc
new file mode 100644
index 0000000000..b4880ed1ff
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.cc
@@ -0,0 +1,104 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
+using std::string;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+// GDB expects lower case values.
+static const char kHexChars[] = "0123456789abcdef";
+
+void UInt8ToHex(uint8_t byte, char chars[2]) {
+ DCHECK(chars);
+ chars[0] = kHexChars[byte >> 4];
+ chars[1] = kHexChars[byte & 0xF];
+}
+
+bool HexToUInt8(const char chars[2], uint8_t* byte) {
+ uint8_t o1, o2;
+ if (NibbleToUInt8(chars[0], &o1) && NibbleToUInt8(chars[1], &o2)) {
+ *byte = (o1 << 4) + o2;
+ return true;
+ }
+
+ return false;
+}
+
+bool NibbleToUInt8(char ch, uint8_t* byte) {
+ DCHECK(byte);
+
+ // Check for nibble of a-f
+ if ((ch >= 'a') && (ch <= 'f')) {
+ *byte = (ch - 'a' + 10);
+ return true;
+ }
+
+ // Check for nibble of A-F
+ if ((ch >= 'A') && (ch <= 'F')) {
+ *byte = (ch - 'A' + 10);
+ return true;
+ }
+
+ // Check for nibble of 0-9
+ if ((ch >= '0') && (ch <= '9')) {
+ *byte = (ch - '0');
+ return true;
+ }
+
+ // Not a valid nibble representation
+ return false;
+}
+
+std::vector<std::string> StringSplit(const string& instr, const char* delim) {
+ std::vector<std::string> result;
+
+ const char* in = instr.data();
+ if (nullptr == in) return result;
+
+ // Check if we have nothing to do
+ if (nullptr == delim) {
+ result.push_back(string(in));
+ return result;
+ }
+
+ while (*in) {
+ // Toss all preceeding delimiters
+ while (*in && strchr(delim, *in)) in++;
+
+ // If we still have something to process
+ if (*in) {
+ const char* start = in;
+ size_t len = 0;
+ // Keep moving forward for all valid chars
+ while (*in && (strchr(delim, *in) == nullptr)) {
+ len++;
+ in++;
+ }
+
+ // Build this token and add it to the array.
+ result.push_back(string{start, len});
+ }
+ }
+ return result;
+}
+
+std::string Mem2Hex(const uint8_t* mem, size_t count) {
+ std::vector<char> result(count * 2 + 1);
+ for (size_t i = 0; i < count; i++) UInt8ToHex(*mem++, &result[i * 2]);
+ result[count * 2] = '\0';
+ return result.data();
+}
+
+std::string Mem2Hex(const std::string& str) {
+ return Mem2Hex(reinterpret_cast<const uint8_t*>(str.data()), str.size());
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.h b/deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.h
new file mode 100644
index 0000000000..88a2715ff6
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-remote-util.h
@@ -0,0 +1,72 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_GDB_REMOTE_UTIL_H_
+#define V8_DEBUG_WASM_GDB_SERVER_GDB_REMOTE_UTIL_H_
+
+#include <string>
+#include <vector>
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+#define TRACE_GDB_REMOTE(...) \
+ do { \
+ if (FLAG_trace_wasm_gdb_remote) PrintF("[gdb-remote] " __VA_ARGS__); \
+ } while (false)
+
+// Convert from 0-255 to a pair of ASCII chars (0-9,a-f).
+void UInt8ToHex(uint8_t byte, char chars[2]);
+
+// Convert a pair of hex chars into a value 0-255 or return false if either
+// input character is not a valid nibble.
+bool HexToUInt8(const char chars[2], uint8_t* byte);
+
+// Convert from ASCII (0-9,a-f,A-F) to 4b unsigned or return false if the
+// input char is unexpected.
+bool NibbleToUInt8(char ch, uint8_t* byte);
+
+std::vector<std::string> V8_EXPORT_PRIVATE StringSplit(const std::string& instr,
+ const char* delim);
+
+// Convert the memory pointed to by {mem} into a hex string in GDB-remote
+// format.
+std::string Mem2Hex(const uint8_t* mem, size_t count);
+std::string Mem2Hex(const std::string& str);
+
+// For LLDB debugging, an address in a Wasm module code space is represented
+// with 64 bits, where the first 32 bits identify the module id:
+// +--------------------+--------------------+
+// | module_id | offset |
+// +--------------------+--------------------+
+// <----- 32 bit -----> <----- 32 bit ----->
+class wasm_addr_t {
+ public:
+ wasm_addr_t(uint32_t module_id, uint32_t offset)
+ : module_id_(module_id), offset_(offset) {}
+ explicit wasm_addr_t(uint64_t address)
+ : module_id_(address >> 32), offset_(address & 0xffffffff) {}
+
+ inline uint32_t ModuleId() const { return module_id_; }
+ inline uint32_t Offset() const { return offset_; }
+
+ inline operator uint64_t() const {
+ return static_cast<uint64_t>(module_id_) << 32 | offset_;
+ }
+
+ private:
+ uint32_t module_id_;
+ uint32_t offset_;
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_GDB_REMOTE_UTIL_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc
index a9f1b58f6c..03b9b8fd3e 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.cc
@@ -32,7 +32,7 @@ bool GdbServerThread::StartAndInitialize() {
// this operation happensat most once per process and only when the
// --wasm-gdb-remote flag is set.
start_semaphore_.Wait();
- return true;
+ return !!target_;
}
void GdbServerThread::CleanupThread() {
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h
index f31756cbb3..cca1e4a6de 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server-thread.h
@@ -34,6 +34,8 @@ class GdbServerThread : public v8::base::Thread {
// closes any active debugging session.
void Stop();
+ Target& GetTarget() { return *target_; }
+
private:
void CleanupThread();
@@ -47,7 +49,7 @@ class GdbServerThread : public v8::base::Thread {
base::Mutex mutex_;
// Protected by {mutex_}:
- std::unique_ptr<Transport> transport_;
+ std::unique_ptr<TransportBase> transport_;
std::unique_ptr<Target> target_;
DISALLOW_COPY_AND_ASSIGN(GdbServerThread);
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
index bad7f439eb..96e2308cee 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
@@ -4,34 +4,420 @@
#include "src/debug/wasm/gdb-server/gdb-server.h"
+#include <inttypes.h>
+#include <functional>
+#include "src/api/api-inl.h"
+#include "src/api/api.h"
+#include "src/debug/debug.h"
#include "src/debug/wasm/gdb-server/gdb-server-thread.h"
-#include "src/wasm/wasm-engine.h"
+#include "src/utils/locked-queue-inl.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace gdb_server {
-GdbServer::GdbServer() {
- DCHECK(!thread_);
+static const uint32_t kMaxWasmCallStack = 20;
+
+// A TaskRunner is an object that runs posted tasks (in the form of closure
+// objects). Tasks are queued and run, in order, in the thread where the
+// TaskRunner::RunMessageLoop() is called.
+class TaskRunner {
+ public:
+ // Class Task wraps a std::function with a semaphore to signal its completion.
+ // This logic would be neatly implemented with std::packaged_tasks but we
+ // cannot use <future> in V8.
+ class Task {
+ public:
+ Task(base::Semaphore* ready_semaphore, std::function<void()> func)
+ : ready_semaphore_(ready_semaphore), func_(func) {}
+
+ void Run() {
+ func_();
+ ready_semaphore_->Signal();
+ }
+
+ // A semaphore object passed by the thread that posts a task.
+ // The sender can Wait on this semaphore to block until the task has
+ // completed execution in the TaskRunner thread.
+ base::Semaphore* ready_semaphore_;
+
+ // The function to run.
+ std::function<void()> func_;
+ };
+
+ TaskRunner()
+ : process_queue_semaphore_(0),
+ nested_loop_count_(0),
+ is_terminated_(false) {}
+
+ // Starts the task runner. All tasks posted are run, in order, in the thread
+ // that calls this function.
+ void Run() {
+ is_terminated_ = false;
+ int loop_number = ++nested_loop_count_;
+ while (nested_loop_count_ == loop_number && !is_terminated_) {
+ std::shared_ptr<Task> task = GetNext();
+ if (task) {
+ task->Run();
+ }
+ }
+ }
+
+ // Terminates the task runner. Tasks that are still pending in the queue are
+ // not discarded and will be executed when the task runner is restarted.
+ void Terminate() {
+ DCHECK_LT(0, nested_loop_count_);
+ --nested_loop_count_;
+
+ is_terminated_ = true;
+ process_queue_semaphore_.Signal();
+ }
+
+ // Posts a task to the task runner, to be executed in the task runner thread.
+ template <typename Functor>
+ auto Append(base::Semaphore* ready_semaphore, Functor&& task) {
+ queue_.Enqueue(std::make_shared<Task>(ready_semaphore, task));
+ process_queue_semaphore_.Signal();
+ }
+
+ private:
+ std::shared_ptr<Task> GetNext() {
+ while (!is_terminated_) {
+ if (queue_.IsEmpty()) {
+ process_queue_semaphore_.Wait();
+ }
+
+ std::shared_ptr<Task> task;
+ if (queue_.Dequeue(&task)) {
+ return task;
+ }
+ }
+ return nullptr;
+ }
+
+ LockedQueue<std::shared_ptr<Task>> queue_;
+ v8::base::Semaphore process_queue_semaphore_;
+ int nested_loop_count_;
+ std::atomic<bool> is_terminated_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskRunner);
+};
+
+GdbServer::GdbServer() { task_runner_ = std::make_unique<TaskRunner>(); }
+
+template <typename Functor>
+auto GdbServer::RunSyncTask(Functor&& callback) const {
+ // Executed in the GDBServerThread.
+ v8::base::Semaphore ready_semaphore(0);
+ task_runner_->Append(&ready_semaphore, callback);
+ ready_semaphore.Wait();
+}
+
+// static
+std::unique_ptr<GdbServer> GdbServer::Create() {
DCHECK(FLAG_wasm_gdb_remote);
- thread_ = std::make_unique<GdbServerThread>(this);
- // TODO(paolosev): does StartSynchronously hurt performances?
- if (!thread_->StartAndInitialize()) {
+ std::unique_ptr<GdbServer> gdb_server(new GdbServer());
+
+ // Spawns the GDB-stub thread where all the communication with the debugger
+ // happens.
+ gdb_server->thread_ = std::make_unique<GdbServerThread>(gdb_server.get());
+ if (!gdb_server->thread_->StartAndInitialize()) {
TRACE_GDB_REMOTE(
"Cannot initialize thread, GDB-remote debugging will be disabled.\n");
- thread_ = nullptr;
+ return nullptr;
}
+ return gdb_server;
}
GdbServer::~GdbServer() {
+ // All Isolates have been deregistered.
+ DCHECK(isolate_delegates_.empty());
+
if (thread_) {
+ // Waits for the GDB-stub thread to terminate.
thread_->Stop();
thread_->Join();
}
}
+void GdbServer::RunMessageLoopOnPause() { task_runner_->Run(); }
+
+void GdbServer::QuitMessageLoopOnPause() { task_runner_->Terminate(); }
+
+std::vector<GdbServer::WasmModuleInfo> GdbServer::GetLoadedModules() {
+ // Executed in the GDBServerThread.
+ std::vector<GdbServer::WasmModuleInfo> modules;
+
+ RunSyncTask([this, &modules]() {
+ // Executed in the isolate thread.
+ for (const auto& pair : scripts_) {
+ uint32_t module_id = pair.first;
+ const WasmModuleDebug& module_debug = pair.second;
+ modules.push_back({module_id, module_debug.GetModuleName()});
+ }
+ });
+ return modules;
+}
+
+bool GdbServer::GetModuleDebugHandler(uint32_t module_id,
+ WasmModuleDebug** wasm_module_debug) {
+ // Always executed in the isolate thread.
+ ScriptsMap::iterator scriptIterator = scripts_.find(module_id);
+ if (scriptIterator != scripts_.end()) {
+ *wasm_module_debug = &scriptIterator->second;
+ return true;
+ }
+ wasm_module_debug = nullptr;
+ return false;
+}
+
+bool GdbServer::GetWasmGlobal(uint32_t frame_index, uint32_t index,
+ uint8_t* buffer, uint32_t buffer_size,
+ uint32_t* size) {
+ // Executed in the GDBServerThread.
+ bool result = false;
+ RunSyncTask([this, &result, frame_index, index, buffer, buffer_size, size]() {
+ // Executed in the isolate thread.
+ result = WasmModuleDebug::GetWasmGlobal(GetTarget().GetCurrentIsolate(),
+ frame_index, index, buffer,
+ buffer_size, size);
+ });
+ return result;
+}
+
+bool GdbServer::GetWasmLocal(uint32_t frame_index, uint32_t index,
+ uint8_t* buffer, uint32_t buffer_size,
+ uint32_t* size) {
+ // Executed in the GDBServerThread.
+ bool result = false;
+ RunSyncTask([this, &result, frame_index, index, buffer, buffer_size, size]() {
+ // Executed in the isolate thread.
+ result = WasmModuleDebug::GetWasmLocal(GetTarget().GetCurrentIsolate(),
+ frame_index, index, buffer,
+ buffer_size, size);
+ });
+ return result;
+}
+
+bool GdbServer::GetWasmStackValue(uint32_t frame_index, uint32_t index,
+ uint8_t* buffer, uint32_t buffer_size,
+ uint32_t* size) {
+ // Executed in the GDBServerThread.
+ bool result = false;
+ RunSyncTask([this, &result, frame_index, index, buffer, buffer_size, size]() {
+ // Executed in the isolate thread.
+ result = WasmModuleDebug::GetWasmStackValue(GetTarget().GetCurrentIsolate(),
+ frame_index, index, buffer,
+ buffer_size, size);
+ });
+ return result;
+}
+
+uint32_t GdbServer::GetWasmMemory(uint32_t frame_index, uint32_t offset,
+ uint8_t* buffer, uint32_t size) {
+ // Executed in the GDBServerThread.
+ uint32_t bytes_read = 0;
+ RunSyncTask([this, &bytes_read, frame_index, offset, buffer, size]() {
+ // Executed in the isolate thread.
+ bytes_read = WasmModuleDebug::GetWasmMemory(
+ GetTarget().GetCurrentIsolate(), frame_index, offset, buffer, size);
+ });
+ return bytes_read;
+}
+
+uint32_t GdbServer::GetWasmModuleBytes(wasm_addr_t wasm_addr, uint8_t* buffer,
+ uint32_t size) {
+ // Executed in the GDBServerThread.
+ uint32_t bytes_read = 0;
+ RunSyncTask([this, &bytes_read, wasm_addr, buffer, size]() {
+ // Executed in the isolate thread.
+ WasmModuleDebug* module_debug;
+ if (GetModuleDebugHandler(wasm_addr.ModuleId(), &module_debug)) {
+ bytes_read = module_debug->GetWasmModuleBytes(wasm_addr, buffer, size);
+ }
+ });
+ return bytes_read;
+}
+
+bool GdbServer::AddBreakpoint(uint32_t wasm_module_id, uint32_t offset) {
+ // Executed in the GDBServerThread.
+ bool result = false;
+ RunSyncTask([this, &result, wasm_module_id, offset]() {
+ // Executed in the isolate thread.
+ WasmModuleDebug* module_debug;
+ if (GetModuleDebugHandler(wasm_module_id, &module_debug)) {
+ int breakpoint_id = 0;
+ if (module_debug->AddBreakpoint(offset, &breakpoint_id)) {
+ breakpoints_[wasm_addr_t(wasm_module_id, offset)] = breakpoint_id;
+ result = true;
+ }
+ }
+ });
+ return result;
+}
+
+bool GdbServer::RemoveBreakpoint(uint32_t wasm_module_id, uint32_t offset) {
+ // Executed in the GDBServerThread.
+ bool result = false;
+ RunSyncTask([this, &result, wasm_module_id, offset]() {
+ // Executed in the isolate thread.
+ BreakpointsMap::iterator it =
+ breakpoints_.find(wasm_addr_t(wasm_module_id, offset));
+ if (it != breakpoints_.end()) {
+ int breakpoint_id = it->second;
+ breakpoints_.erase(it);
+
+ WasmModuleDebug* module_debug;
+ if (GetModuleDebugHandler(wasm_module_id, &module_debug)) {
+ module_debug->RemoveBreakpoint(offset, breakpoint_id);
+ result = true;
+ }
+ }
+ });
+ return result;
+}
+
+std::vector<wasm_addr_t> GdbServer::GetWasmCallStack() const {
+ // Executed in the GDBServerThread.
+ std::vector<wasm_addr_t> result;
+ RunSyncTask([this, &result]() {
+ // Executed in the isolate thread.
+ result = GetTarget().GetCallStack();
+ });
+ return result;
+}
+
+void GdbServer::AddIsolate(Isolate* isolate) {
+ // Executed in the isolate thread.
+ if (isolate_delegates_.find(isolate) == isolate_delegates_.end()) {
+ isolate_delegates_[isolate] =
+ std::make_unique<DebugDelegate>(isolate, this);
+ }
+}
+
+void GdbServer::RemoveIsolate(Isolate* isolate) {
+ // Executed in the isolate thread.
+ auto it = isolate_delegates_.find(isolate);
+ if (it != isolate_delegates_.end()) {
+ for (auto it = scripts_.begin(); it != scripts_.end();) {
+ if (it->second.GetIsolate() == isolate) {
+ it = scripts_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ isolate_delegates_.erase(it);
+ }
+}
+
+void GdbServer::Suspend() {
+ // Executed in the GDBServerThread.
+ auto it = isolate_delegates_.begin();
+ if (it != isolate_delegates_.end()) {
+ Isolate* isolate = it->first;
+ v8::Isolate* v8Isolate = (v8::Isolate*)isolate;
+ v8Isolate->RequestInterrupt(
+ // Executed in the isolate thread.
+ [](v8::Isolate* isolate, void*) {
+ if (v8::debug::AllFramesOnStackAreBlackboxed(isolate)) {
+ v8::debug::SetBreakOnNextFunctionCall(isolate);
+ } else {
+ v8::debug::BreakRightNow(isolate);
+ }
+ },
+ this);
+ }
+}
+
+void GdbServer::PrepareStep() {
+ // Executed in the GDBServerThread.
+ wasm_addr_t pc = GetTarget().GetCurrentPc();
+ RunSyncTask([this, pc]() {
+ // Executed in the isolate thread.
+ WasmModuleDebug* module_debug;
+ if (GetModuleDebugHandler(pc.ModuleId(), &module_debug)) {
+ module_debug->PrepareStep();
+ }
+ });
+}
+
+void GdbServer::AddWasmModule(uint32_t module_id,
+ Local<debug::WasmScript> wasm_script) {
+ // Executed in the isolate thread.
+ DCHECK_EQ(Script::TYPE_WASM, Utils::OpenHandle(*wasm_script)->type());
+ v8::Isolate* isolate = wasm_script->GetIsolate();
+ scripts_.insert(
+ std::make_pair(module_id, WasmModuleDebug(isolate, wasm_script)));
+
+ if (FLAG_wasm_pause_waiting_for_debugger && scripts_.size() == 1) {
+ TRACE_GDB_REMOTE("Paused, waiting for a debugger to attach...\n");
+ Suspend();
+ }
+}
+
+Target& GdbServer::GetTarget() const { return thread_->GetTarget(); }
+
+// static
+std::atomic<uint32_t> GdbServer::DebugDelegate::id_s;
+
+GdbServer::DebugDelegate::DebugDelegate(Isolate* isolate, GdbServer* gdb_server)
+ : isolate_(isolate), id_(id_s++), gdb_server_(gdb_server) {
+ isolate_->SetCaptureStackTraceForUncaughtExceptions(
+ true, kMaxWasmCallStack, v8::StackTrace::kOverview);
+
+ // Register the delegate
+ isolate_->debug()->SetDebugDelegate(this);
+ v8::debug::TierDownAllModulesPerIsolate((v8::Isolate*)isolate_);
+ v8::debug::ChangeBreakOnException((v8::Isolate*)isolate_,
+ v8::debug::BreakOnUncaughtException);
+}
+
+GdbServer::DebugDelegate::~DebugDelegate() {
+ // Deregister the delegate
+ isolate_->debug()->SetDebugDelegate(nullptr);
+}
+
+void GdbServer::DebugDelegate::ScriptCompiled(Local<debug::Script> script,
+ bool is_live_edited,
+ bool has_compile_error) {
+ // Executed in the isolate thread.
+ if (script->IsWasm()) {
+ DCHECK_EQ(reinterpret_cast<v8::Isolate*>(isolate_), script->GetIsolate());
+ gdb_server_->AddWasmModule(GetModuleId(script->Id()),
+ script.As<debug::WasmScript>());
+ }
+}
+
+void GdbServer::DebugDelegate::BreakProgramRequested(
+ // Executed in the isolate thread.
+ Local<v8::Context> paused_context,
+ const std::vector<debug::BreakpointId>& inspector_break_points_hit) {
+ gdb_server_->GetTarget().OnProgramBreak(
+ isolate_, WasmModuleDebug::GetCallStack(id_, isolate_));
+ gdb_server_->RunMessageLoopOnPause();
+}
+
+void GdbServer::DebugDelegate::ExceptionThrown(
+ // Executed in the isolate thread.
+ Local<v8::Context> paused_context, Local<Value> exception,
+ Local<Value> promise, bool is_uncaught,
+ debug::ExceptionType exception_type) {
+ if (exception_type == v8::debug::kException && is_uncaught) {
+ gdb_server_->GetTarget().OnException(
+ isolate_, WasmModuleDebug::GetCallStack(id_, isolate_));
+ gdb_server_->RunMessageLoopOnPause();
+ }
+}
+
+bool GdbServer::DebugDelegate::IsFunctionBlackboxed(
+ // Executed in the isolate thread.
+ Local<debug::Script> script, const debug::Location& start,
+ const debug::Location& end) {
+ return false;
+}
+
} // namespace gdb_server
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.h b/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
index 59d8a17375..50939af69c 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
@@ -5,14 +5,18 @@
#ifndef V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_H_
#define V8_DEBUG_WASM_GDB_SERVER_GDB_SERVER_H_
+#include <map>
#include <memory>
#include "src/debug/wasm/gdb-server/gdb-server-thread.h"
+#include "src/debug/wasm/gdb-server/wasm-module-debug.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace gdb_server {
+class TaskRunner;
+
// class GdbServer acts as a manager for the GDB-remote stub. It is instantiated
// as soon as the first Wasm module is loaded in the Wasm engine and spawns a
// separate thread to accept connections and exchange messages with a debugger.
@@ -20,18 +24,172 @@ namespace gdb_server {
// the Wasm engine.
class GdbServer {
public:
- // Spawns a "GDB-remote" thread that will be used to communicate with the
- // debugger. This should be called once, the first time a Wasm module is
- // loaded in the Wasm engine.
- GdbServer();
+ // Factory method: creates and returns a GdbServer. Spawns a "GDB-remote"
+ // thread that will be used to communicate with the debugger.
+ // May return null on failure.
+ // This should be called once, the first time a Wasm module is loaded in the
+ // Wasm engine.
+ static std::unique_ptr<GdbServer> Create();
// Stops the "GDB-remote" thread and waits for it to complete. This should be
// called once, when the Wasm engine shuts down.
~GdbServer();
+ // Queries the set of the Wasm modules currently loaded. Each module is
+ // identified by a unique integer module id.
+ struct WasmModuleInfo {
+ uint32_t module_id;
+ std::string module_name;
+ };
+ std::vector<WasmModuleInfo> GetLoadedModules();
+
+ // Queries the value of the {index} global value in the Wasm module identified
+ // by {frame_index}.
+ //
+ bool GetWasmGlobal(uint32_t frame_index, uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ // Queries the value of the {index} local value in the {frame_index}th stack
+ // frame in the Wasm module identified by {frame_index}.
+ //
+ bool GetWasmLocal(uint32_t frame_index, uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ // Queries the value of the {index} value in the operand stack.
+ //
+ bool GetWasmStackValue(uint32_t frame_index, uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ // Reads {size} bytes, starting from {offset}, from the Memory instance
+ // associated to the Wasm module identified by {frame_index}.
+ // Returns the number of bytes copied to {buffer}, or 0 is case of error.
+ // Note: only one Memory for Module is currently supported.
+ //
+ uint32_t GetWasmMemory(uint32_t frame_index, uint32_t offset, uint8_t* buffer,
+ uint32_t size);
+
+ // Reads {size} bytes, starting from the low dword of {address}, from the Code
+ // space of th Wasm module identified by high dword of {address}.
+ // Returns the number of bytes copied to {buffer}, or 0 is case of error.
+ uint32_t GetWasmModuleBytes(wasm_addr_t address, uint8_t* buffer,
+ uint32_t size);
+
+ // Inserts a breakpoint at the offset {offset} of the Wasm module identified
+ // by {wasm_module_id}.
+ // Returns true if the breakpoint was successfully added.
+ bool AddBreakpoint(uint32_t wasm_module_id, uint32_t offset);
+
+ // Removes a breakpoint at the offset {offset} of the Wasm module identified
+ // by {wasm_module_id}.
+ // Returns true if the breakpoint was successfully removed.
+ bool RemoveBreakpoint(uint32_t wasm_module_id, uint32_t offset);
+
+ // Returns the current call stack as a vector of program counters.
+ std::vector<wasm_addr_t> GetWasmCallStack() const;
+
+ // Manage the set of Isolates for this GdbServer.
+ void AddIsolate(Isolate* isolate);
+ void RemoveIsolate(Isolate* isolate);
+
+ // Requests that the thread suspend execution at the next Wasm instruction.
+ void Suspend();
+
+ // Handle stepping in wasm functions via the wasm interpreter.
+ void PrepareStep();
+
+ // Called when the target debuggee can resume execution (for example after
+ // having been suspended on a breakpoint). Terminates the task runner leaving
+ // all pending tasks in the queue.
+ void QuitMessageLoopOnPause();
+
private:
+ GdbServer();
+
+ // When the target debuggee is suspended for a breakpoint or exception, blocks
+ // the main (isolate) thread and enters in a message loop. Here it waits on a
+ // queue of Task objects that are posted by the GDB-stub thread and that
+ // represent queries received from the debugger via the GDB-remote protocol.
+ void RunMessageLoopOnPause();
+
+ // Post a task to run a callback in the isolate thread.
+ template <typename Callback>
+ auto RunSyncTask(Callback&& callback) const;
+
+ void AddWasmModule(uint32_t module_id, Local<debug::WasmScript> wasm_script);
+
+ // Given a Wasm module id, retrieves the corresponding debugging WasmScript
+ // object.
+ bool GetModuleDebugHandler(uint32_t module_id,
+ WasmModuleDebug** wasm_module_debug);
+
+ // Returns the debugging target.
+ Target& GetTarget() const;
+
+ // Class DebugDelegate implements the debug::DebugDelegate interface to
+ // receive notifications when debug events happen in a given isolate, like a
+ // script being loaded, a breakpoint being hit, an exception being thrown.
+ class DebugDelegate : public debug::DebugDelegate {
+ public:
+ DebugDelegate(Isolate* isolate, GdbServer* gdb_server);
+ ~DebugDelegate();
+
+ // debug::DebugDelegate
+ void ScriptCompiled(Local<debug::Script> script, bool is_live_edited,
+ bool has_compile_error) override;
+ void BreakProgramRequested(Local<v8::Context> paused_context,
+ const std::vector<debug::BreakpointId>&
+ inspector_break_points_hit) override;
+ void ExceptionThrown(Local<v8::Context> paused_context,
+ Local<Value> exception, Local<Value> promise,
+ bool is_uncaught,
+ debug::ExceptionType exception_type) override;
+ bool IsFunctionBlackboxed(Local<debug::Script> script,
+ const debug::Location& start,
+ const debug::Location& end) override;
+
+ private:
+ // Calculates module_id as:
+ // +--------------------+------------------- +
+ // | DebugDelegate::id_ | Script::Id() |
+ // +--------------------+------------------- +
+ // <----- 16 bit -----> <----- 16 bit ----->
+ uint32_t GetModuleId(uint32_t script_id) const {
+ DCHECK_LT(script_id, 0x10000);
+ DCHECK_LT(id_, 0x10000);
+ return id_ << 16 | script_id;
+ }
+
+ Isolate* isolate_;
+ uint32_t id_;
+ GdbServer* gdb_server_;
+
+ static std::atomic<uint32_t> id_s;
+ };
+
+ // The GDB-stub thread where all the communication with the debugger happens.
std::unique_ptr<GdbServerThread> thread_;
+ // Used to transform the queries that arrive in the GDB-stub thread into
+ // tasks executed in the main (isolate) thread.
+ std::unique_ptr<TaskRunner> task_runner_;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Always accessed in the isolate thread.
+
+ // Set of breakpoints currently defines in Wasm code.
+ typedef std::map<uint64_t, int> BreakpointsMap;
+ BreakpointsMap breakpoints_;
+
+ typedef std::map<uint32_t, WasmModuleDebug> ScriptsMap;
+ ScriptsMap scripts_;
+
+ typedef std::map<Isolate*, std::unique_ptr<DebugDelegate>>
+ IsolateDebugDelegateMap;
+ IsolateDebugDelegateMap isolate_delegates_;
+
+ // End of fields always accessed in the isolate thread.
+ //////////////////////////////////////////////////////////////////////////////
+
DISALLOW_COPY_AND_ASSIGN(GdbServer);
};
diff --git a/deps/v8/src/debug/wasm/gdb-server/packet.cc b/deps/v8/src/debug/wasm/gdb-server/packet.cc
new file mode 100644
index 0000000000..f8306c4b1e
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/packet.cc
@@ -0,0 +1,364 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/packet.h"
+#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+Packet::Packet() {
+ seq_ = -1;
+ Clear();
+}
+
+void Packet::Clear() {
+ data_.clear();
+ read_index_ = 0;
+}
+
+void Packet::Rewind() { read_index_ = 0; }
+
+bool Packet::EndOfPacket() const { return (read_index_ >= GetPayloadSize()); }
+
+void Packet::AddRawChar(char ch) { data_.push_back(ch); }
+
+void Packet::AddWord8(uint8_t byte) {
+ char seq[2];
+ UInt8ToHex(byte, seq);
+ AddRawChar(seq[0]);
+ AddRawChar(seq[1]);
+}
+
+void Packet::AddBlock(const void* ptr, uint32_t len) {
+ DCHECK(ptr);
+
+ const char* p = (const char*)ptr;
+
+ for (uint32_t offs = 0; offs < len; offs++) {
+ AddWord8(p[offs]);
+ }
+}
+
+void Packet::AddString(const char* str) {
+ DCHECK(str);
+
+ while (*str) {
+ AddRawChar(*str);
+ str++;
+ }
+}
+
+void Packet::AddHexString(const char* str) {
+ DCHECK(str);
+
+ while (*str) {
+ AddWord8(*str);
+ str++;
+ }
+}
+
+void Packet::AddNumberSep(uint64_t val, char sep) {
+ char out[sizeof(val) * 2];
+ char temp[2];
+
+ // Check for -1 optimization
+ if (val == static_cast<uint64_t>(-1)) {
+ AddRawChar('-');
+ AddRawChar('1');
+ } else {
+ int nibbles = 0;
+
+ // In the GDB remote protocol numbers are formatted as big-endian hex
+ // strings. Leading zeros can be skipped.
+ // For example the value 0x00001234 is formatted as "1234".
+ for (size_t a = 0; a < sizeof(val); a++) {
+ uint8_t byte = static_cast<uint8_t>(val & 0xFF);
+
+ // Stream in with bytes reversed, starting with the least significant.
+ // So if we have the value 0x00001234, we store 4, then 3, 2, 1.
+ // Note that the characters are later reversed to be in big-endian order.
+ UInt8ToHex(byte, temp);
+ out[nibbles++] = temp[1];
+ out[nibbles++] = temp[0];
+
+ // Get the next 8 bits;
+ val >>= 8;
+
+ // Suppress leading zeros, so we are done when val hits zero
+ if (val == 0) {
+ break;
+ }
+ }
+
+ // Strip the high zero for this byte if present.
+ if ((nibbles > 1) && (out[nibbles - 1] == '0')) nibbles--;
+
+ // Now write it out reverse to correct the order
+ while (nibbles) {
+ nibbles--;
+ AddRawChar(out[nibbles]);
+ }
+ }
+
+ // If we asked for a separator, insert it
+ if (sep) AddRawChar(sep);
+}
+
+bool Packet::GetNumberSep(uint64_t* val, char* sep) {
+ uint64_t out = 0;
+ char ch;
+ if (!GetRawChar(&ch)) {
+ return false;
+ }
+
+ // Numbers are formatted as a big-endian hex strings.
+ // The literals "0" and "-1" as special cases.
+
+ // Check for -1
+ if (ch == '-') {
+ if (!GetRawChar(&ch)) {
+ return false;
+ }
+
+ if (ch == '1') {
+ *val = (uint64_t)-1;
+
+ ch = 0;
+ GetRawChar(&ch);
+ if (sep) {
+ *sep = ch;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ do {
+ uint8_t nib;
+
+ // Check for separator
+ if (!NibbleToUInt8(ch, &nib)) {
+ break;
+ }
+
+ // Add this nibble.
+ out = (out << 4) + nib;
+
+ // Get the next character (if availible)
+ ch = 0;
+ if (!GetRawChar(&ch)) {
+ break;
+ }
+ } while (1);
+
+ // Set the value;
+ *val = out;
+
+ // Add the separator if the user wants it...
+ if (sep != nullptr) *sep = ch;
+
+ return true;
+}
+
+bool Packet::GetRawChar(char* ch) {
+ DCHECK(ch != nullptr);
+
+ if (read_index_ >= GetPayloadSize()) return false;
+
+ *ch = data_[read_index_++];
+
+ // Check for RLE X*N, where X is the value, N is the reps.
+ if (*ch == '*') {
+ if (read_index_ < 2) {
+ TRACE_GDB_REMOTE("Unexpected RLE at start of packet.\n");
+ return false;
+ }
+
+ if (read_index_ >= GetPayloadSize()) {
+ TRACE_GDB_REMOTE("Unexpected EoP during RLE.\n");
+ return false;
+ }
+
+ // GDB does not use "CTRL" characters in the stream, so the
+ // number of reps is encoded as the ASCII value beyond 28
+ // (which when you add a min rep size of 4, forces the rep
+ // character to be ' ' (32) or greater).
+ int32_t cnt = (data_[read_index_] - 28);
+ if (cnt < 3) {
+ TRACE_GDB_REMOTE("Unexpected RLE length.\n");
+ return false;
+ }
+
+ // We have just read '*' and incremented the read pointer,
+ // so here is the old state, and expected new state.
+ //
+ // Assume N = 5, we grow by N - size of encoding (3).
+ //
+ // OldP: R W
+ // OldD: 012X*N89 = 8 chars
+ // Size: 012X*N89__ = 10 chars
+ // Move: 012X*__N89 = 10 chars
+ // Fill: 012XXXXX89 = 10 chars
+ // NewP: R W (shifted 5 - 3)
+
+ // First, store the remaining characters to the right into a temp string.
+ std::string right = data_.substr(read_index_ + 1);
+ // Discard the '*' we just read
+ data_.erase(read_index_ - 1);
+ // Append (N-1) 'X' chars
+ *ch = data_[read_index_ - 2];
+ data_.append(cnt - 1, *ch);
+ // Finally, append the remaining characters
+ data_.append(right);
+ }
+ return true;
+}
+
+bool Packet::GetWord8(uint8_t* value) {
+ DCHECK(value);
+
+ // Get two ASCII hex values and convert them to ints
+ char seq[2];
+ if (!GetRawChar(&seq[0]) || !GetRawChar(&seq[1])) {
+ return false;
+ }
+ return HexToUInt8(seq, value);
+}
+
+bool Packet::GetBlock(void* ptr, uint32_t len) {
+ DCHECK(ptr);
+
+ uint8_t* p = reinterpret_cast<uint8_t*>(ptr);
+ bool res = true;
+
+ for (uint32_t offs = 0; offs < len; offs++) {
+ res = GetWord8(&p[offs]);
+ if (false == res) {
+ break;
+ }
+ }
+
+ return res;
+}
+
+bool Packet::GetString(std::string* str) {
+ if (EndOfPacket()) {
+ return false;
+ }
+
+ *str = data_.substr(read_index_);
+ read_index_ = GetPayloadSize();
+ return true;
+}
+
+bool Packet::GetHexString(std::string* str) {
+ // Decode a string encoded as a series of 2-hex digit pairs.
+
+ if (EndOfPacket()) {
+ return false;
+ }
+
+ // Pull values until we hit a separator
+ str->clear();
+ char ch1;
+ while (GetRawChar(&ch1)) {
+ uint8_t nib1;
+ if (!NibbleToUInt8(ch1, &nib1)) {
+ read_index_--;
+ break;
+ }
+ char ch2;
+ uint8_t nib2;
+ if (!GetRawChar(&ch2) || !NibbleToUInt8(ch2, &nib2)) {
+ return false;
+ }
+ *str += static_cast<char>((nib1 << 4) + nib2);
+ }
+ return true;
+}
+
+const char* Packet::GetPayload() const { return data_.c_str(); }
+
+size_t Packet::GetPayloadSize() const { return data_.size(); }
+
+bool Packet::GetSequence(int32_t* ch) const {
+ DCHECK(ch);
+
+ if (seq_ != -1) {
+ *ch = seq_;
+ return true;
+ }
+
+ return false;
+}
+
+void Packet::ParseSequence() {
+ size_t saved_read_index = read_index_;
+ unsigned char seq;
+ char ch;
+ if (GetWord8(&seq) && GetRawChar(&ch)) {
+ if (ch == ':') {
+ SetSequence(seq);
+ return;
+ }
+ }
+ // No sequence number present, so reset to original position.
+ read_index_ = saved_read_index;
+}
+
+void Packet::SetSequence(int32_t val) { seq_ = val; }
+
+void Packet::SetError(ErrDef error) {
+ Clear();
+ AddRawChar('E');
+ AddWord8(static_cast<uint8_t>(error));
+}
+
+std::string Packet::GetPacketData() const {
+ char chars[2];
+ const char* ptr = GetPayload();
+ size_t size = GetPayloadSize();
+
+ std::stringstream outstr;
+
+ // Signal start of response
+ outstr << '$';
+
+ char run_xsum = 0;
+
+ // If there is a sequence, send as two nibble 8bit value + ':'
+ int32_t seq;
+ if (GetSequence(&seq)) {
+ UInt8ToHex(seq, chars);
+ outstr << chars[0];
+ run_xsum += chars[0];
+ outstr << chars[1];
+ run_xsum += chars[1];
+
+ outstr << ':';
+ run_xsum += ':';
+ }
+
+ // Send the main payload
+ for (size_t offs = 0; offs < size; ++offs) {
+ outstr << ptr[offs];
+ run_xsum += ptr[offs];
+ }
+
+ // Send XSUM as two nibble 8bit value preceeded by '#'
+ outstr << '#';
+ UInt8ToHex(run_xsum, chars);
+ outstr << chars[0];
+ outstr << chars[1];
+
+ return outstr.str();
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/packet.h b/deps/v8/src/debug/wasm/gdb-server/packet.h
new file mode 100644
index 0000000000..4308081cad
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/packet.h
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_PACKET_H_
+#define V8_DEBUG_WASM_GDB_SERVER_PACKET_H_
+
+#include <string>
+#include <vector>
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+class V8_EXPORT_PRIVATE Packet {
+ public:
+ Packet();
+
+ // Empty the vector and reset the read/write pointers.
+ void Clear();
+
+ // Reset the read pointer, allowing the packet to be re-read.
+ void Rewind();
+
+ // Return true of the read pointer has reached the write pointer.
+ bool EndOfPacket() const;
+
+ // Store a single raw 8 bit value
+ void AddRawChar(char ch);
+
+ // Store a block of data as hex pairs per byte
+ void AddBlock(const void* ptr, uint32_t len);
+
+ // Store a byte as a 2 chars block.
+ void AddWord8(uint8_t val);
+
+ // Store a number up to 64 bits, formatted as a big-endian hex string with
+ // preceeding zeros removed. Since zeros can be removed, the width of this
+ // number is unknown, and the number is always followed by a NULL or a
+ // separator (non hex digit).
+ void AddNumberSep(uint64_t val, char sep);
+
+ // Add a raw string.
+ void AddString(const char* str);
+
+ // Add a string stored as a stream of ASCII hex digit pairs. It is safe
+ // to use any non-null character in this stream. If this does not terminate
+ // the packet, there should be a separator (non hex digit) immediately
+ // following.
+ void AddHexString(const char* str);
+
+ // Retrieve a single character if available
+ bool GetRawChar(char* ch);
+
+ // Retrieve "len" ASCII character pairs.
+ bool GetBlock(void* ptr, uint32_t len);
+
+ // Retrieve a 8, 16, 32, or 64 bit word as pairs of hex digits. These
+ // functions will always consume bits/4 characters from the stream.
+ bool GetWord8(uint8_t* val);
+
+ // Retrieve a number (formatted as a big-endian hex string) and a separator.
+ // If 'sep' is null, the separator is consumed but thrown away.
+ bool GetNumberSep(uint64_t* val, char* sep);
+
+ // Get a string from the stream
+ bool GetString(std::string* str);
+ bool GetHexString(std::string* str);
+
+ // Return a pointer to the entire packet payload
+ const char* GetPayload() const;
+ size_t GetPayloadSize() const;
+
+ // Returns true and the sequence number, or false if it is unset.
+ bool GetSequence(int32_t* seq) const;
+
+ // Parses sequence number in package data and moves read pointer past it.
+ void ParseSequence();
+
+ // Set the sequence number.
+ void SetSequence(int32_t seq);
+
+ enum class ErrDef { None = 0, BadFormat = 1, BadArgs = 2, Failed = 3 };
+ void SetError(ErrDef);
+
+ // Returns the full content of a GDB-remote packet, in the format:
+ // $payload#checksum
+ // where the two-digit checksum is computed as the modulo 256 sum of all
+ // characters between the leading ā€˜$ā€™ and the trailing ā€˜#ā€™.
+ std::string GetPacketData() const;
+
+ private:
+ int32_t seq_;
+ std::string data_;
+ size_t read_index_;
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_PACKET_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/session.cc b/deps/v8/src/debug/wasm/gdb-server/session.cc
index 3e98e093af..b052934071 100644
--- a/deps/v8/src/debug/wasm/gdb-server/session.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/session.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/debug/wasm/gdb-server/session.h"
+#include "src/debug/wasm/gdb-server/packet.h"
#include "src/debug/wasm/gdb-server/transport.h"
namespace v8 {
@@ -10,7 +11,8 @@ namespace internal {
namespace wasm {
namespace gdb_server {
-Session::Session(Transport* transport) : io_(transport), connected_(true) {}
+Session::Session(TransportBase* transport)
+ : io_(transport), connected_(true), ack_enabled_(true) {}
void Session::WaitForDebugStubEvent() { io_->WaitForDebugStubEvent(); }
@@ -34,14 +36,112 @@ bool Session::GetChar(char* ch) {
return true;
}
-bool Session::GetPacket() {
+bool Session::SendPacket(Packet* pkt, bool expect_ack) {
char ch;
- if (!GetChar(&ch)) return false;
+ do {
+ std::string data = pkt->GetPacketData();
+
+ TRACE_GDB_REMOTE("TX %s\n", data.size() < 160
+ ? data.c_str()
+ : (data.substr(0, 160) + "...").c_str());
+ if (!io_->Write(data.data(), static_cast<int32_t>(data.length()))) {
+ return false;
+ }
+
+ // If ACKs are off, we are done.
+ if (!expect_ack || !ack_enabled_) {
+ break;
+ }
+
+ // Otherwise, poll for '+'
+ if (!GetChar(&ch)) {
+ return false;
+ }
+
+ // Retry if we didn't get a '+'
+ } while (ch != '+');
- // discard the input
return true;
}
+bool Session::GetPayload(Packet* pkt, uint8_t* checksum) {
+ pkt->Clear();
+ *checksum = 0;
+
+ // Stream in the characters
+ char ch;
+ while (GetChar(&ch)) {
+ if (ch == '#') {
+ // If we see a '#' we must be done with the data.
+ return true;
+ } else if (ch == '$') {
+ // If we see a '$' we must have missed the last cmd, let's retry.
+ TRACE_GDB_REMOTE("RX Missing $, retry.\n");
+ *checksum = 0;
+ pkt->Clear();
+ } else {
+ // Keep a running XSUM.
+ *checksum += ch;
+ pkt->AddRawChar(ch);
+ }
+ }
+ return false;
+}
+
+bool Session::GetPacket(Packet* pkt) {
+ while (true) {
+ // Toss characters until we see a start of command
+ char ch;
+ do {
+ if (!GetChar(&ch)) {
+ return false;
+ }
+ } while (ch != '$');
+
+ uint8_t running_checksum = 0;
+ if (!GetPayload(pkt, &running_checksum)) {
+ return false;
+ }
+
+ // Get two nibble checksum
+ uint8_t trailing_checksum = 0;
+ char chars[2];
+ if (!GetChar(&chars[0]) || !GetChar(&chars[1]) ||
+ !HexToUInt8(chars, &trailing_checksum)) {
+ return false;
+ }
+
+ TRACE_GDB_REMOTE("RX $%s#%c%c\n", pkt->GetPayload(), chars[0], chars[1]);
+
+ pkt->ParseSequence();
+
+ // If ACKs are off, we are done.
+ if (!ack_enabled_) {
+ return true;
+ }
+
+ // If the XSUMs don't match, signal bad packet
+ if (trailing_checksum == running_checksum) {
+ char out[3] = {'+', 0, 0};
+
+ // If we have a sequence number
+ int32_t seq;
+ if (pkt->GetSequence(&seq)) {
+ // Respond with sequence number
+ UInt8ToHex(seq, &out[1]);
+ return io_->Write(out, 3);
+ } else {
+ return io_->Write(out, 1);
+ }
+ } else {
+ // Resend a bad XSUM and look for retransmit
+ TRACE_GDB_REMOTE("RX Bad XSUM, retry\n");
+ io_->Write("-", 1);
+ // retry...
+ }
+ }
+}
+
} // namespace gdb_server
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/debug/wasm/gdb-server/session.h b/deps/v8/src/debug/wasm/gdb-server/session.h
index a76bb4a3b4..d7c22638e6 100644
--- a/deps/v8/src/debug/wasm/gdb-server/session.h
+++ b/deps/v8/src/debug/wasm/gdb-server/session.h
@@ -12,22 +12,24 @@ namespace internal {
namespace wasm {
namespace gdb_server {
-class Transport;
+class Packet;
+class TransportBase;
// Represents a gdb-remote debugging session.
-class Session {
+class V8_EXPORT_PRIVATE Session {
public:
- explicit Session(Transport* transport);
+ explicit Session(TransportBase* transport);
+
+ // Attempt to send a packet and optionally wait for an ACK from the receiver.
+ bool SendPacket(Packet* packet, bool expect_ack = true);
// Attempt to receive a packet.
- // For the moment this method is only used to check whether the TCP connection
- // is still active; all bytes read are discarded.
- bool GetPacket();
+ bool GetPacket(Packet* packet);
// Return true if there is data to read.
bool IsDataAvailable() const;
- // Return true if the connection still valid.
+ // Return true if the connection is still valid.
bool IsConnected() const;
// Shutdown the connection.
@@ -42,11 +44,23 @@ class Session {
// Signal that the debuggee execution stopped because of a trap or breakpoint.
bool SignalThreadEvent();
+ // By default, when either the debugger or the GDB-stub sends a packet,
+ // the first response expected is an acknowledgment: either '+' (to indicate
+ // the packet was received correctly) or '-' (to request retransmission).
+ // When a transport is reliable, the debugger may request that acknowledgement
+ // be disabled by means of the 'QStartNoAckMode' packet.
+ void EnableAck(bool ack_enabled) { ack_enabled_ = ack_enabled; }
+
private:
+ // Read a single character from the transport.
bool GetChar(char* ch);
- Transport* io_; // Transport object not owned by the Session.
- bool connected_; // Is the connection still valid.
+ // Read the content of a packet, from a leading '$' to a trailing '#'.
+ bool GetPayload(Packet* pkt, uint8_t* checksum);
+
+ TransportBase* io_; // Transport object not owned by the Session.
+ bool connected_; // Is the connection still valid.
+ bool ack_enabled_; // If true, emit or wait for '+' from RSP stream.
DISALLOW_COPY_AND_ASSIGN(Session);
};
diff --git a/deps/v8/src/debug/wasm/gdb-server/target.cc b/deps/v8/src/debug/wasm/gdb-server/target.cc
index ac5bf10a0e..6992fd1192 100644
--- a/deps/v8/src/debug/wasm/gdb-server/target.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/target.cc
@@ -4,31 +4,96 @@
#include "src/debug/wasm/gdb-server/target.h"
+#include <inttypes.h>
#include "src/base/platform/time.h"
+#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
#include "src/debug/wasm/gdb-server/gdb-server.h"
+#include "src/debug/wasm/gdb-server/packet.h"
#include "src/debug/wasm/gdb-server/session.h"
#include "src/debug/wasm/gdb-server/transport.h"
-#include "src/debug/wasm/gdb-server/util.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace gdb_server {
+static const int kThreadId = 1;
+
+// Signals.
+static const int kSigTrace = 5;
+static const int kSigSegv = 11;
+
Target::Target(GdbServer* gdb_server)
- : status_(Status::Running), session_(nullptr) {}
+ : gdb_server_(gdb_server),
+ status_(Status::Running),
+ cur_signal_(0),
+ session_(nullptr),
+ debugger_initial_suspension_(true),
+ semaphore_(0),
+ current_isolate_(nullptr) {
+ InitQueryPropertyMap();
+}
+
+void Target::InitQueryPropertyMap() {
+ // Request LLDB to send packets up to 4000 bytes for bulk transfers.
+ query_properties_["Supported"] =
+ "PacketSize=1000;vContSupported-;qXfer:libraries:read+;";
+
+ query_properties_["Attached"] = "1";
+
+ // There is only one register, named 'pc', in this architecture
+ query_properties_["RegisterInfo0"] =
+ "name:pc;alt-name:pc;bitsize:64;offset:0;encoding:uint;format:hex;set:"
+ "General Purpose Registers;gcc:16;dwarf:16;generic:pc;";
+ query_properties_["RegisterInfo1"] = "E45";
+
+ // ProcessInfo for wasm32
+ query_properties_["ProcessInfo"] =
+ "pid:1;ppid:1;uid:1;gid:1;euid:1;egid:1;name:6c6c6462;triple:" +
+ Mem2Hex("wasm32-unknown-unknown-wasm") + ";ptrsize:4;";
+ query_properties_["Symbol"] = "OK";
+
+ // Current thread info
+ char buff[16];
+ snprintf(buff, sizeof(buff), "QC%x", kThreadId);
+ query_properties_["C"] = buff;
+}
void Target::Terminate() {
- // Executed in the Isolate thread.
- status_ = Status::Terminated;
+ // Executed in the Isolate thread, when the process shuts down.
+ SetStatus(Status::Terminated);
+}
+
+void Target::OnProgramBreak(Isolate* isolate,
+ const std::vector<wasm_addr_t>& call_frames) {
+ OnSuspended(isolate, kSigTrace, call_frames);
+}
+void Target::OnException(Isolate* isolate,
+ const std::vector<wasm_addr_t>& call_frames) {
+ OnSuspended(isolate, kSigSegv, call_frames);
+}
+void Target::OnSuspended(Isolate* isolate, int signal,
+ const std::vector<wasm_addr_t>& call_frames) {
+ // This function will be called in the isolate thread, when the wasm
+ // interpreter gets suspended.
+
+ bool isWaitingForSuspension = (status_ == Status::WaitingForSuspension);
+ SetStatus(Status::Suspended, signal, call_frames, isolate);
+ if (isWaitingForSuspension) {
+ // Wake the GdbServer thread that was blocked waiting for the Target
+ // to suspend.
+ semaphore_.Signal();
+ } else if (session_) {
+ session_->SignalThreadEvent();
+ }
}
void Target::Run(Session* session) {
// Executed in the GdbServer thread.
-
session_ = session;
do {
WaitForDebugEvent();
+ ProcessDebugEvent();
ProcessCommands();
} while (!IsTerminated() && session_->IsConnected());
session_ = nullptr;
@@ -37,7 +102,7 @@ void Target::Run(Session* session) {
void Target::WaitForDebugEvent() {
// Executed in the GdbServer thread.
- if (status_ != Status::Terminated) {
+ if (status_ == Status::Running) {
// Wait for either:
// * the thread to fault (or single-step)
// * an interrupt from LLDB
@@ -45,18 +110,567 @@ void Target::WaitForDebugEvent() {
}
}
+void Target::ProcessDebugEvent() {
+ // Executed in the GdbServer thread
+
+ if (status_ == Status::Running) {
+ // Blocks, waiting for the engine to suspend.
+ Suspend();
+ }
+
+ // Here, the wasm interpreter has suspended and we have updated the current
+ // thread info.
+
+ if (debugger_initial_suspension_) {
+ // First time on a connection, we don't send the signal.
+ // All other times, send the signal that triggered us.
+ debugger_initial_suspension_ = false;
+ } else {
+ Packet pktOut;
+ SetStopReply(&pktOut);
+ session_->SendPacket(&pktOut, false);
+ }
+}
+
+void Target::Suspend() {
+ // Executed in the GdbServer thread
+ if (status_ == Status::Running) {
+ // TODO(paolosev) - this only suspends the wasm interpreter.
+ gdb_server_->Suspend();
+
+ status_ = Status::WaitingForSuspension;
+ }
+
+ while (status_ == Status::WaitingForSuspension) {
+ if (semaphore_.WaitFor(base::TimeDelta::FromMilliseconds(500))) {
+ // Here the wasm interpreter is suspended.
+ return;
+ }
+ }
+}
+
void Target::ProcessCommands() {
// GDB-remote messages are processed in the GDBServer thread.
if (IsTerminated()) {
return;
+ } else if (status_ != Status::Suspended) {
+ // Don't process commands if we haven't stopped.
+ return;
}
- // TODO(paolosev)
- // For the moment just discard any packet we receive from the debugger.
- do {
- if (!session_->GetPacket()) continue;
- } while (session_->IsConnected());
+ // Now we are ready to process commands.
+ // Loop through packets until we process a continue packet or a detach.
+ Packet recv, reply;
+ while (session_->IsConnected()) {
+ if (!session_->GetPacket(&recv)) {
+ continue;
+ }
+
+ reply.Clear();
+ ProcessPacketResult result = ProcessPacket(&recv, &reply);
+ switch (result) {
+ case ProcessPacketResult::Paused:
+ session_->SendPacket(&reply);
+ break;
+
+ case ProcessPacketResult::Continue:
+ DCHECK_EQ(status_, Status::Running);
+ // If this is a continue type command, break out of this loop.
+ gdb_server_->QuitMessageLoopOnPause();
+ return;
+
+ case ProcessPacketResult::Detach:
+ SetStatus(Status::Running);
+ session_->SendPacket(&reply);
+ session_->Disconnect();
+ gdb_server_->QuitMessageLoopOnPause();
+ return;
+
+ case ProcessPacketResult::Kill:
+ session_->SendPacket(&reply);
+ exit(-9);
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (!session_->IsConnected()) {
+ debugger_initial_suspension_ = true;
+ }
+}
+
+Target::ProcessPacketResult Target::ProcessPacket(Packet* pkt_in,
+ Packet* pkt_out) {
+ ErrorCode err = ErrorCode::None;
+
+ // Clear the outbound message.
+ pkt_out->Clear();
+
+ // Set the sequence number, if present.
+ int32_t seq = -1;
+ if (pkt_in->GetSequence(&seq)) {
+ pkt_out->SetSequence(seq);
+ }
+
+ // A GDB-remote packet begins with an upper- or lower-case letter, which
+ // generally represents a single command.
+ // The letters 'q' and 'Q' introduce a "General query packets" and are used
+ // to extend the protocol with custom commands.
+ // The format of GDB-remote commands is documented here:
+ // https://sourceware.org/gdb/onlinedocs/gdb/Overview.html#Overview.
+ char cmd;
+ pkt_in->GetRawChar(&cmd);
+
+ switch (cmd) {
+ // Queries the reason the target halted.
+ // IN : $?
+ // OUT: A Stop-reply packet
+ case '?':
+ SetStopReply(pkt_out);
+ break;
+
+ // Resumes execution
+ // IN : $c
+ // OUT: A Stop-reply packet is sent later, when the execution halts.
+ case 'c':
+ SetStatus(Status::Running);
+ return ProcessPacketResult::Continue;
+
+ // Detaches the debugger from this target
+ // IN : $D
+ // OUT: $OK
+ case 'D':
+ TRACE_GDB_REMOTE("Requested Detach.\n");
+ pkt_out->AddString("OK");
+ return ProcessPacketResult::Detach;
+
+ // Read general registers (We only support register 'pc' that contains
+ // the current instruction pointer).
+ // IN : $g
+ // OUT: $xx...xx
+ case 'g': {
+ uint64_t pc = GetCurrentPc();
+ pkt_out->AddBlock(&pc, sizeof(pc));
+ break;
+ }
+
+ // Write general registers - NOT SUPPORTED
+ // IN : $Gxx..xx
+ // OUT: $ (empty string)
+ case 'G': {
+ break;
+ }
+
+ // Set thread for subsequent operations. For Wasm targets, we currently
+ // assume that there is only one thread with id = kThreadId (= 1).
+ // IN : $H(c/g)(-1,0,xxxx)
+ // OUT: $OK
+ case 'H': {
+ // Type of the operation (ā€˜mā€™, ā€˜Mā€™, ā€˜gā€™, ā€˜Gā€™, ...)
+ char operation;
+ if (!pkt_in->GetRawChar(&operation)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+
+ uint64_t thread_id;
+ if (!pkt_in->GetNumberSep(&thread_id, 0)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+
+ // Ignore, only one thread supported for now.
+ pkt_out->AddString("OK");
+ break;
+ }
+
+ // Kills the debuggee.
+ // IN : $k
+ // OUT: $OK
+ case 'k':
+ TRACE_GDB_REMOTE("Requested Kill.\n");
+ pkt_out->AddString("OK");
+ return ProcessPacketResult::Kill;
+
+ // Reads {llll} addressable memory units starting at address {aaaa}.
+ // IN : $maaaa,llll
+ // OUT: $xx..xx
+ case 'm': {
+ uint64_t address;
+ if (!pkt_in->GetNumberSep(&address, 0)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+ wasm_addr_t wasm_addr(address);
+
+ uint64_t len;
+ if (!pkt_in->GetNumberSep(&len, 0)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+
+ if (len > Transport::kBufSize / 2) {
+ err = ErrorCode::BadArgs;
+ break;
+ }
+
+ uint32_t length = static_cast<uint32_t>(len);
+ uint8_t buff[Transport::kBufSize];
+ if (wasm_addr.ModuleId() > 0) {
+ uint32_t read =
+ gdb_server_->GetWasmModuleBytes(wasm_addr, buff, length);
+ if (read > 0) {
+ pkt_out->AddBlock(buff, read);
+ } else {
+ err = ErrorCode::Failed;
+ }
+ } else {
+ err = ErrorCode::BadArgs;
+ }
+ break;
+ }
+
+ // Writes {llll} addressable memory units starting at address {aaaa}.
+ // IN : $Maaaa,llll:xx..xx
+ // OUT: $OK
+ case 'M': {
+ // Writing to memory not supported for Wasm.
+ err = ErrorCode::Failed;
+ break;
+ }
+
+ // pN: Reads the value of register N.
+ // IN : $pxx
+ // OUT: $xx..xx
+ case 'p': {
+ uint64_t pc = GetCurrentPc();
+ pkt_out->AddBlock(&pc, sizeof(pc));
+ } break;
+
+ case 'q': {
+ err = ProcessQueryPacket(pkt_in, pkt_out);
+ break;
+ }
+
+ // Single step
+ // IN : $s
+ // OUT: A Stop-reply packet is sent later, when the execution halts.
+ case 's': {
+ if (status_ == Status::Suspended) {
+ gdb_server_->PrepareStep();
+ SetStatus(Status::Running);
+ }
+ return ProcessPacketResult::Continue;
+ }
+
+ // Find out if the thread 'id' is alive.
+ // IN : $T
+ // OUT: $OK if alive, $Enn if thread is dead.
+ case 'T': {
+ uint64_t id;
+ if (!pkt_in->GetNumberSep(&id, 0)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+ if (id != kThreadId) {
+ err = ErrorCode::BadArgs;
+ break;
+ }
+ pkt_out->AddString("OK");
+ break;
+ }
+
+ // Z: Adds a breakpoint
+ // IN : $Z<type>,<addr>,<kind>
+ // <type>: 0: sw breakpoint, 1: hw breakpoint, 2: watchpoint
+ // OUT: $OK (success) or $Enn (error)
+ case 'Z': {
+ uint64_t breakpoint_type;
+ uint64_t breakpoint_address;
+ uint64_t breakpoint_kind;
+ // Only software breakpoints are supported.
+ if (!pkt_in->GetNumberSep(&breakpoint_type, 0) || breakpoint_type != 0 ||
+ !pkt_in->GetNumberSep(&breakpoint_address, 0) ||
+ !pkt_in->GetNumberSep(&breakpoint_kind, 0)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+
+ wasm_addr_t wasm_breakpoint_addr(breakpoint_address);
+ if (!gdb_server_->AddBreakpoint(wasm_breakpoint_addr.ModuleId(),
+ wasm_breakpoint_addr.Offset())) {
+ err = ErrorCode::Failed;
+ break;
+ }
+
+ pkt_out->AddString("OK");
+ break;
+ }
+
+ // z: Removes a breakpoint
+ // IN : $z<type>,<addr>,<kind>
+ // <type>: 0: sw breakpoint, 1: hw breakpoint, 2: watchpoint
+ // OUT: $OK (success) or $Enn (error)
+ case 'z': {
+ uint64_t breakpoint_type;
+ uint64_t breakpoint_address;
+ uint64_t breakpoint_kind;
+ if (!pkt_in->GetNumberSep(&breakpoint_type, 0) || breakpoint_type != 0 ||
+ !pkt_in->GetNumberSep(&breakpoint_address, 0) ||
+ !pkt_in->GetNumberSep(&breakpoint_kind, 0)) {
+ err = ErrorCode::BadFormat;
+ break;
+ }
+
+ wasm_addr_t wasm_breakpoint_addr(breakpoint_address);
+ if (!gdb_server_->RemoveBreakpoint(wasm_breakpoint_addr.ModuleId(),
+ wasm_breakpoint_addr.Offset())) {
+ err = ErrorCode::Failed;
+ break;
+ }
+
+ pkt_out->AddString("OK");
+ break;
+ }
+
+ // If the command is not recognized, ignore it by sending an empty reply.
+ default: {
+ TRACE_GDB_REMOTE("Unknown command: %s\n", pkt_in->GetPayload());
+ }
+ }
+
+ // If there is an error, return the error code instead of a payload
+ if (err != ErrorCode::None) {
+ pkt_out->Clear();
+ pkt_out->AddRawChar('E');
+ pkt_out->AddWord8(static_cast<uint8_t>(err));
+ }
+ return ProcessPacketResult::Paused;
+}
+
+Target::ErrorCode Target::ProcessQueryPacket(const Packet* pkt_in,
+ Packet* pkt_out) {
+ const char* str = &pkt_in->GetPayload()[1];
+
+ // Get first thread query
+ // IN : $qfThreadInfo
+ // OUT: $m<tid>
+ //
+ // Get next thread query
+ // IN : $qsThreadInfo
+ // OUT: $m<tid> or l to denote end of list.
+ if (!strcmp(str, "fThreadInfo") || !strcmp(str, "sThreadInfo")) {
+ if (str[0] == 'f') {
+ pkt_out->AddString("m");
+ pkt_out->AddNumberSep(kThreadId, 0);
+ } else {
+ pkt_out->AddString("l");
+ }
+ return ErrorCode::None;
+ }
+
+ // Get a list of loaded libraries
+ // IN : $qXfer:libraries:read
+ // OUT: an XML document which lists loaded libraries, with this format:
+ // <library-list>
+ // <library name="foo.wasm">
+ // <section address="0x100000000"/>
+ // </library>
+ // <library name="bar.wasm">
+ // <section address="0x200000000"/>
+ // </library>
+ // </library-list>
+ // Note that LLDB must be compiled with libxml2 support to handle this packet.
+ std::string tmp = "Xfer:libraries:read";
+ if (!strncmp(str, tmp.data(), tmp.length())) {
+ std::vector<GdbServer::WasmModuleInfo> modules =
+ gdb_server_->GetLoadedModules();
+ std::string result("l<library-list>");
+ for (const auto& module : modules) {
+ wasm_addr_t address(module.module_id, 0);
+ char address_string[32];
+ snprintf(address_string, sizeof(address_string), "%" PRIu64,
+ static_cast<uint64_t>(address));
+ result += "<library name=\"";
+ result += module.module_name;
+ result += "\"><section address=\"";
+ result += address_string;
+ result += "\"/></library>";
+ }
+ result += "</library-list>";
+ pkt_out->AddString(result.c_str());
+ return ErrorCode::None;
+ }
+
+ // Get the current call stack.
+ // IN : $qWasmCallStack
+ // OUT: $xx..xxyy..yyzz..zz (A sequence of uint64_t values represented as
+ // consecutive 8-bytes blocks).
+ std::vector<std::string> toks = StringSplit(str, ":;");
+ if (toks[0] == "WasmCallStack") {
+ std::vector<wasm_addr_t> call_stack_pcs = gdb_server_->GetWasmCallStack();
+ std::vector<uint64_t> buffer;
+ for (wasm_addr_t pc : call_stack_pcs) {
+ buffer.push_back(pc);
+ }
+ pkt_out->AddBlock(buffer.data(),
+ static_cast<uint32_t>(sizeof(uint64_t) * buffer.size()));
+ return ErrorCode::None;
+ }
+
+ // Get a Wasm global value in the Wasm module specified.
+ // IN : $qWasmGlobal:frame_index;index
+ // OUT: $xx..xx
+ if (toks[0] == "WasmGlobal") {
+ if (toks.size() == 3) {
+ uint32_t frame_index =
+ static_cast<uint32_t>(strtol(toks[1].data(), nullptr, 10));
+ uint32_t index =
+ static_cast<uint32_t>(strtol(toks[2].data(), nullptr, 10));
+ uint8_t buff[16];
+ uint32_t size = 0;
+ if (gdb_server_->GetWasmGlobal(frame_index, index, buff, 16, &size)) {
+ pkt_out->AddBlock(buff, size);
+ return ErrorCode::None;
+ } else {
+ return ErrorCode::Failed;
+ }
+ }
+ return ErrorCode::BadFormat;
+ }
+
+ // Get a Wasm local value in the stack frame specified.
+ // IN : $qWasmLocal:frame_index;index
+ // OUT: $xx..xx
+ if (toks[0] == "WasmLocal") {
+ if (toks.size() == 3) {
+ uint32_t frame_index =
+ static_cast<uint32_t>(strtol(toks[1].data(), nullptr, 10));
+ uint32_t index =
+ static_cast<uint32_t>(strtol(toks[2].data(), nullptr, 10));
+ uint8_t buff[16];
+ uint32_t size = 0;
+ if (gdb_server_->GetWasmLocal(frame_index, index, buff, 16, &size)) {
+ pkt_out->AddBlock(buff, size);
+ return ErrorCode::None;
+ } else {
+ return ErrorCode::Failed;
+ }
+ }
+ return ErrorCode::BadFormat;
+ }
+
+ // Get a Wasm local from the operand stack at the index specified.
+ // IN : qWasmStackValue:frame_index;index
+ // OUT: $xx..xx
+ if (toks[0] == "WasmStackValue") {
+ if (toks.size() == 3) {
+ uint32_t frame_index =
+ static_cast<uint32_t>(strtol(toks[1].data(), nullptr, 10));
+ uint32_t index =
+ static_cast<uint32_t>(strtol(toks[2].data(), nullptr, 10));
+ uint8_t buff[16];
+ uint32_t size = 0;
+ if (gdb_server_->GetWasmStackValue(frame_index, index, buff, 16, &size)) {
+ pkt_out->AddBlock(buff, size);
+ return ErrorCode::None;
+ } else {
+ return ErrorCode::Failed;
+ }
+ }
+ return ErrorCode::BadFormat;
+ }
+
+ // Read Wasm memory.
+ // IN : $qWasmMem:frame_index;addr;len
+ // OUT: $xx..xx
+ if (toks[0] == "WasmMem") {
+ if (toks.size() == 4) {
+ uint32_t frame_index =
+ static_cast<uint32_t>(strtol(toks[1].data(), nullptr, 10));
+ uint32_t address =
+ static_cast<uint32_t>(strtol(toks[2].data(), nullptr, 16));
+ uint32_t length =
+ static_cast<uint32_t>(strtol(toks[3].data(), nullptr, 16));
+ if (length > Transport::kBufSize / 2) {
+ return ErrorCode::BadArgs;
+ }
+ uint8_t buff[Transport::kBufSize];
+ uint32_t read =
+ gdb_server_->GetWasmMemory(frame_index, address, buff, length);
+ if (read > 0) {
+ pkt_out->AddBlock(buff, read);
+ return ErrorCode::None;
+ } else {
+ return ErrorCode::Failed;
+ }
+ }
+ return ErrorCode::BadFormat;
+ }
+
+ // No match so far, check the property cache.
+ QueryPropertyMap::const_iterator it = query_properties_.find(toks[0]);
+ if (it != query_properties_.end()) {
+ pkt_out->AddString(it->second.data());
+ }
+ // If not found, just send an empty response.
+ return ErrorCode::None;
+}
+
+// A Stop-reply packet has the format:
+// Sxx
+// or:
+// Txx<name1>:<value1>;...;<nameN>:<valueN>
+// where 'xx' is a two-digit hex number that represents the stop signal
+// and the <name>:<value> pairs are used to report additional information,
+// like the thread id.
+void Target::SetStopReply(Packet* pkt_out) const {
+ pkt_out->AddRawChar('T');
+ pkt_out->AddWord8(cur_signal_);
+
+ // Adds 'thread-pcs:<pc1>,...,<pcN>;' A list of pc values for all threads that
+ // currently exist in the process.
+ char buff[64];
+ snprintf(buff, sizeof(buff), "thread-pcs:%" PRIx64 ";",
+ static_cast<uint64_t>(GetCurrentPc()));
+ pkt_out->AddString(buff);
+
+ // Adds 'thread:<tid>;' pair. Note that a terminating ';' is required.
+ pkt_out->AddString("thread:");
+ pkt_out->AddNumberSep(kThreadId, ';');
+}
+
+void Target::SetStatus(Status status, int8_t signal,
+ std::vector<wasm_addr_t> call_frames, Isolate* isolate) {
+ v8::base::MutexGuard guard(&mutex_);
+
+ DCHECK((status == Status::Suspended && signal != 0 &&
+ call_frames.size() > 0 && isolate != nullptr) ||
+ (status != Status::Suspended && signal == 0 &&
+ call_frames.size() == 0 && isolate == nullptr));
+
+ current_isolate_ = isolate;
+ status_ = status;
+ cur_signal_ = signal;
+ call_frames_ = call_frames;
+}
+
+const std::vector<wasm_addr_t> Target::GetCallStack() const {
+ v8::base::MutexGuard guard(&mutex_);
+
+ return call_frames_;
+}
+
+wasm_addr_t Target::GetCurrentPc() const {
+ v8::base::MutexGuard guard(&mutex_);
+
+ wasm_addr_t pc{0};
+ if (call_frames_.size() > 0) {
+ pc = call_frames_[0];
+ }
+ return pc;
}
} // namespace gdb_server
diff --git a/deps/v8/src/debug/wasm/gdb-server/target.h b/deps/v8/src/debug/wasm/gdb-server/target.h
index 93d02489c5..1af81d3dbe 100644
--- a/deps/v8/src/debug/wasm/gdb-server/target.h
+++ b/deps/v8/src/debug/wasm/gdb-server/target.h
@@ -6,7 +6,9 @@
#define V8_DEBUG_WASM_GDB_SERVER_TARGET_H_
#include <atomic>
+#include <map>
#include "src/base/macros.h"
+#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
namespace v8 {
namespace internal {
@@ -14,6 +16,7 @@ namespace wasm {
namespace gdb_server {
class GdbServer;
+class Packet;
class Session;
// Class Target represents a debugging target. It contains the logic to decode
@@ -30,21 +33,101 @@ class Target {
void Terminate();
bool IsTerminated() const { return status_ == Status::Terminated; }
+ // Notifies that the debuggee thread suspended at a breakpoint.
+ void OnProgramBreak(Isolate* isolate,
+ const std::vector<wasm_addr_t>& call_frames);
+ // Notifies that the debuggee thread suspended because of an unhandled
+ // exception.
+ void OnException(Isolate* isolate,
+ const std::vector<wasm_addr_t>& call_frames);
+
+ // Returns the state at the moment of the thread suspension.
+ const std::vector<wasm_addr_t> GetCallStack() const;
+ wasm_addr_t GetCurrentPc() const;
+ Isolate* GetCurrentIsolate() const { return current_isolate_; }
+
private:
+ void OnSuspended(Isolate* isolate, int signal,
+ const std::vector<wasm_addr_t>& call_frames);
+
+ // Initializes a map used to make fast lookups when handling query packets
+ // that have a constant response.
+ void InitQueryPropertyMap();
+
// Blocks waiting for one of these two events to occur:
// - A network packet arrives from the debugger, or the debugger connection is
// closed;
// - The debuggee suspends execution because of a trap or breakpoint.
void WaitForDebugEvent();
+ void ProcessDebugEvent();
// Processes GDB-remote packets that arrive from the debugger.
// This method should be called when the debuggee has suspended its execution.
void ProcessCommands();
- enum class Status { Running, Terminated };
+ // Requests that the thread suspends execution at the next Wasm instruction.
+ void Suspend();
+
+ enum class ErrorCode { None = 0, BadFormat = 1, BadArgs = 2, Failed = 3 };
+
+ enum class ProcessPacketResult {
+ Paused, // The command was processed, debuggee still paused.
+ Continue, // The debuggee should resume execution.
+ Detach, // Request to detach from the debugger.
+ Kill // Request to terminate the debuggee process.
+ };
+ // This function always succeedes, since all errors are reported as an error
+ // string "Exx" where xx is a two digit number.
+ // The return value indicates if the target can resume execution or it is
+ // still paused.
+ ProcessPacketResult ProcessPacket(Packet* pkt_in, Packet* pkt_out);
+
+ // Processes a general query packet
+ ErrorCode ProcessQueryPacket(const Packet* pkt_in, Packet* pkt_out);
+
+ // Formats a 'Stop-reply' packet, which is sent in response of a 'c'
+ // (continue), 's' (step) and '?' (query halt reason) commands.
+ void SetStopReply(Packet* pkt_out) const;
+
+ enum class Status { Running, WaitingForSuspension, Suspended, Terminated };
+
+ void SetStatus(Status status, int8_t signal = 0,
+ std::vector<wasm_addr_t> call_frames_ = {},
+ Isolate* isolate = nullptr);
+
+ GdbServer* gdb_server_;
+
std::atomic<Status> status_;
- Session* session_; // Session object not owned by the Target.
+ // Signal being processed.
+ std::atomic<int8_t> cur_signal_;
+
+ // Session object not owned by the Target.
+ Session* session_;
+
+ // Map used to make fast lookups when handling query packets.
+ typedef std::map<std::string, std::string> QueryPropertyMap;
+ QueryPropertyMap query_properties_;
+
+ bool debugger_initial_suspension_;
+
+ // Used to block waiting for suspension
+ v8::base::Semaphore semaphore_;
+
+ mutable v8::base::Mutex mutex_;
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {mutex_}:
+
+ // Current isolate. This is not null only when the target is in a Suspended
+ // state and it is the isolate associated to the current call stack and used
+ // for all debugging activities.
+ Isolate* current_isolate_;
+
+ // Call stack when the execution is suspended.
+ std::vector<wasm_addr_t> call_frames_;
+
+ // End of fields protected by {mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
DISALLOW_COPY_AND_ASSIGN(Target);
};
diff --git a/deps/v8/src/debug/wasm/gdb-server/transport.cc b/deps/v8/src/debug/wasm/gdb-server/transport.cc
index 385828ff69..f1aed96e5b 100644
--- a/deps/v8/src/debug/wasm/gdb-server/transport.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/transport.cc
@@ -73,8 +73,8 @@ SocketBinding SocketBinding::Bind(uint16_t tcp_port) {
return SocketBinding(socket_handle);
}
-std::unique_ptr<Transport> SocketBinding::CreateTransport() {
- return std::make_unique<Transport>(socket_handle_);
+std::unique_ptr<SocketTransport> SocketBinding::CreateTransport() {
+ return std::make_unique<SocketTransport>(socket_handle_);
}
uint16_t SocketBinding::GetBoundPort() {
@@ -101,20 +101,20 @@ void DisableNagleAlgorithm(SocketHandle socket) {
}
}
-TransportBase::TransportBase(SocketHandle s)
+Transport::Transport(SocketHandle s)
: buf_(new char[kBufSize]),
pos_(0),
size_(0),
handle_bind_(s),
handle_accept_(InvalidSocket) {}
-TransportBase::~TransportBase() {
+Transport::~Transport() {
if (handle_accept_ != InvalidSocket) {
CloseSocket(handle_accept_);
}
}
-void TransportBase::CopyFromBuffer(char** dst, int32_t* len) {
+void Transport::CopyFromBuffer(char** dst, int32_t* len) {
int32_t copy_bytes = std::min(*len, size_ - pos_);
memcpy(*dst, buf_.get() + pos_, copy_bytes);
pos_ += copy_bytes;
@@ -122,7 +122,7 @@ void TransportBase::CopyFromBuffer(char** dst, int32_t* len) {
*dst += copy_bytes;
}
-bool TransportBase::Read(char* dst, int32_t len) {
+bool Transport::Read(char* dst, int32_t len) {
if (pos_ < size_) {
CopyFromBuffer(&dst, &len);
}
@@ -137,7 +137,7 @@ bool TransportBase::Read(char* dst, int32_t len) {
return true;
}
-bool TransportBase::Write(const char* src, int32_t len) {
+bool Transport::Write(const char* src, int32_t len) {
while (len > 0) {
ssize_t result = ::send(handle_accept_, src, len, 0);
if (result > 0) {
@@ -156,7 +156,7 @@ bool TransportBase::Write(const char* src, int32_t len) {
}
// Return true if there is data to read.
-bool TransportBase::IsDataAvailable() const {
+bool Transport::IsDataAvailable() const {
if (pos_ < size_) {
return true;
}
@@ -180,18 +180,17 @@ bool TransportBase::IsDataAvailable() const {
return false;
}
-void TransportBase::Close() {
+void Transport::Close() {
::shutdown(handle_bind_, SD_BOTH);
CloseSocket(handle_bind_);
Disconnect();
}
-void TransportBase::Disconnect() {
+void Transport::Disconnect() {
if (handle_accept_ != InvalidSocket) {
// Shutdown the connection in both directions. This should
// always succeed, and nothing we can do if this fails.
::shutdown(handle_accept_, SD_BOTH);
-
CloseSocket(handle_accept_);
handle_accept_ = InvalidSocket;
}
@@ -199,29 +198,33 @@ void TransportBase::Disconnect() {
#if _WIN32
-Transport::Transport(SocketHandle s) : TransportBase(s) {
+SocketTransport::SocketTransport(SocketHandle s) : Transport(s) {
socket_event_ = WSA_INVALID_EVENT;
faulted_thread_event_ = ::CreateEvent(NULL, TRUE, FALSE, NULL);
if (faulted_thread_event_ == NULL) {
TRACE_GDB_REMOTE(
- "Transport::Transport: Failed to create event object for faulted"
- "thread\n");
+ "SocketTransport::SocketTransport: Failed to create event object for "
+ "faulted thread\n");
}
}
-Transport::~Transport() {
+SocketTransport::~SocketTransport() {
if (!CloseHandle(faulted_thread_event_)) {
- TRACE_GDB_REMOTE("Transport::~Transport: Failed to close event\n");
+ TRACE_GDB_REMOTE(
+ "SocketTransport::~SocketTransport: Failed to close "
+ "event\n");
}
if (socket_event_) {
if (!::WSACloseEvent(socket_event_)) {
- TRACE_GDB_REMOTE("Transport::~Transport: Failed to close socket event\n");
+ TRACE_GDB_REMOTE(
+ "SocketTransport::~SocketTransport: Failed to close "
+ "socket event\n");
}
}
}
-bool Transport::AcceptConnection() {
+bool SocketTransport::AcceptConnection() {
CHECK(handle_accept_ == InvalidSocket);
handle_accept_ = ::accept(handle_bind_, NULL, 0);
if (handle_accept_ != InvalidSocket) {
@@ -231,7 +234,7 @@ bool Transport::AcceptConnection() {
socket_event_ = ::WSACreateEvent();
if (socket_event_ == WSA_INVALID_EVENT) {
TRACE_GDB_REMOTE(
- "Transport::AcceptConnection: Failed to create socket event\n");
+ "SocketTransport::AcceptConnection: Failed to create socket event\n");
}
// Listen for close events in order to handle them correctly.
@@ -241,14 +244,15 @@ bool Transport::AcceptConnection() {
if (::WSAEventSelect(handle_accept_, socket_event_, FD_CLOSE | FD_READ) ==
SOCKET_ERROR) {
TRACE_GDB_REMOTE(
- "Transport::AcceptConnection: Failed to bind event to socket\n");
+ "SocketTransport::AcceptConnection: Failed to bind event to "
+ "socket\n");
}
return true;
}
return false;
}
-bool Transport::ReadSomeData() {
+bool SocketTransport::ReadSomeData() {
while (true) {
ssize_t result =
::recv(handle_accept_, buf_.get() + size_, kBufSize - size_, 0);
@@ -259,7 +263,6 @@ bool Transport::ReadSomeData() {
if (result == 0) {
return false; // The connection was gracefully closed.
}
-
// WSAEventSelect sets socket to non-blocking mode. This is essential
// for socket event notification to work, there is no workaround.
// See remarks section at the page
@@ -267,11 +270,11 @@ bool Transport::ReadSomeData() {
if (SocketGetLastError() == WSAEWOULDBLOCK) {
if (::WaitForSingleObject(socket_event_, INFINITE) == WAIT_FAILED) {
TRACE_GDB_REMOTE(
- "Transport::ReadSomeData: Failed to wait on socket event\n");
+ "SocketTransport::ReadSomeData: Failed to wait on socket event\n");
}
if (!::ResetEvent(socket_event_)) {
TRACE_GDB_REMOTE(
- "Transport::ReadSomeData: Failed to reset socket event\n");
+ "SocketTransport::ReadSomeData: Failed to reset socket event\n");
}
continue;
}
@@ -282,7 +285,7 @@ bool Transport::ReadSomeData() {
}
}
-void Transport::WaitForDebugStubEvent() {
+void SocketTransport::WaitForDebugStubEvent() {
// Don't wait if we already have data to read.
bool wait = !(pos_ < size_);
@@ -295,34 +298,37 @@ void Transport::WaitForDebugStubEvent() {
if (result == WAIT_OBJECT_0 + 1) {
if (!ResetEvent(socket_event_)) {
TRACE_GDB_REMOTE(
- "Transport::WaitForDebugStubEvent: Failed to reset socket event\n");
+ "SocketTransport::WaitForDebugStubEvent: Failed to reset socket "
+ "event\n");
}
return;
} else if (result == WAIT_OBJECT_0) {
if (!ResetEvent(faulted_thread_event_)) {
TRACE_GDB_REMOTE(
- "Transport::WaitForDebugStubEvent: Failed to reset event\n");
+ "SocketTransport::WaitForDebugStubEvent: Failed to reset event\n");
}
return;
} else if (result == WAIT_TIMEOUT) {
return;
}
TRACE_GDB_REMOTE(
- "Transport::WaitForDebugStubEvent: Wait for events failed\n");
+ "SocketTransport::WaitForDebugStubEvent: Wait for events failed\n");
}
-bool Transport::SignalThreadEvent() {
+bool SocketTransport::SignalThreadEvent() {
if (!SetEvent(faulted_thread_event_)) {
return false;
}
return true;
}
-void Transport::Disconnect() {
- TransportBase::Disconnect();
+void SocketTransport::Disconnect() {
+ Transport::Disconnect();
if (socket_event_ != WSA_INVALID_EVENT && !::WSACloseEvent(socket_event_)) {
- TRACE_GDB_REMOTE("Transport::~Transport: Failed to close socket event\n");
+ TRACE_GDB_REMOTE(
+ "SocketTransport::~SocketTransport: Failed to close "
+ "socket event\n");
}
socket_event_ = WSA_INVALID_EVENT;
SignalThreadEvent();
@@ -330,7 +336,7 @@ void Transport::Disconnect() {
#else // _WIN32
-Transport::Transport(SocketHandle s) : TransportBase(s) {
+SocketTransport::SocketTransport(SocketHandle s) : Transport(s) {
int fds[2];
#if defined(__linux__)
int ret = pipe2(fds, O_CLOEXEC);
@@ -339,22 +345,27 @@ Transport::Transport(SocketHandle s) : TransportBase(s) {
#endif
if (ret < 0) {
TRACE_GDB_REMOTE(
- "Transport::Transport: Failed to allocate pipe for faulted thread\n");
+ "SocketTransport::SocketTransport: Failed to allocate pipe for faulted "
+ "thread\n");
}
faulted_thread_fd_read_ = fds[0];
faulted_thread_fd_write_ = fds[1];
}
-Transport::~Transport() {
+SocketTransport::~SocketTransport() {
if (close(faulted_thread_fd_read_) != 0) {
- TRACE_GDB_REMOTE("Transport::~Transport: Failed to close event\n");
+ TRACE_GDB_REMOTE(
+ "SocketTransport::~SocketTransport: Failed to close "
+ "event\n");
}
if (close(faulted_thread_fd_write_) != 0) {
- TRACE_GDB_REMOTE("Transport::~Transport: Failed to close event\n");
+ TRACE_GDB_REMOTE(
+ "SocketTransport::~SocketTransport: Failed to close "
+ "event\n");
}
}
-bool Transport::AcceptConnection() {
+bool SocketTransport::AcceptConnection() {
CHECK(handle_accept_ == InvalidSocket);
handle_accept_ = ::accept(handle_bind_, NULL, 0);
if (handle_accept_ != InvalidSocket) {
@@ -364,7 +375,7 @@ bool Transport::AcceptConnection() {
return false;
}
-bool Transport::ReadSomeData() {
+bool SocketTransport::ReadSomeData() {
while (true) {
ssize_t result =
::recv(handle_accept_, buf_.get() + size_, kBufSize - size_, 0);
@@ -381,7 +392,7 @@ bool Transport::ReadSomeData() {
}
}
-void Transport::WaitForDebugStubEvent() {
+void SocketTransport::WaitForDebugStubEvent() {
// Don't wait if we already have data to read.
bool wait = !(pos_ < size_);
@@ -407,7 +418,7 @@ void Transport::WaitForDebugStubEvent() {
}
if (ret < 0) {
TRACE_GDB_REMOTE(
- "Transport::WaitForDebugStubEvent: Failed to wait for "
+ "SocketTransport::WaitForDebugStubEvent: Failed to wait for "
"debug stub event\n");
}
@@ -416,7 +427,7 @@ void Transport::WaitForDebugStubEvent() {
char buf[16];
if (read(faulted_thread_fd_read_, &buf, sizeof(buf)) < 0) {
TRACE_GDB_REMOTE(
- "Transport::WaitForDebugStubEvent: Failed to read from "
+ "SocketTransport::WaitForDebugStubEvent: Failed to read from "
"debug stub event pipe fd\n");
}
}
@@ -424,11 +435,13 @@ void Transport::WaitForDebugStubEvent() {
}
}
-bool Transport::SignalThreadEvent() {
+bool SocketTransport::SignalThreadEvent() {
// Notify the debug stub by marking the thread as faulted.
char buf = 0;
if (write(faulted_thread_fd_write_, &buf, sizeof(buf)) != sizeof(buf)) {
- TRACE_GDB_REMOTE("SignalThreadEvent: Can't send debug stub event\n");
+ TRACE_GDB_REMOTE(
+ "SocketTransport:SignalThreadEvent: Can't send debug stub "
+ "event\n");
return false;
}
return true;
diff --git a/deps/v8/src/debug/wasm/gdb-server/transport.h b/deps/v8/src/debug/wasm/gdb-server/transport.h
index 077b1d1097..42bf4383bb 100644
--- a/deps/v8/src/debug/wasm/gdb-server/transport.h
+++ b/deps/v8/src/debug/wasm/gdb-server/transport.h
@@ -8,7 +8,7 @@
#include <sstream>
#include <vector>
#include "src/base/macros.h"
-#include "src/debug/wasm/gdb-server/util.h"
+#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
#if _WIN32
#include <windows.h>
@@ -47,7 +47,7 @@ namespace internal {
namespace wasm {
namespace gdb_server {
-class Transport;
+class SocketTransport;
// Acts as a factory for Transport objects bound to a specified TCP port.
class SocketBinding {
@@ -61,7 +61,7 @@ class SocketBinding {
bool IsValid() const { return socket_handle_ != InvalidSocket; }
// Create a transport object from this socket binding
- std::unique_ptr<Transport> CreateTransport();
+ std::unique_ptr<SocketTransport> CreateTransport();
// Get port the socket is bound to.
uint16_t GetBoundPort();
@@ -70,10 +70,12 @@ class SocketBinding {
SocketHandle socket_handle_;
};
-class TransportBase {
+class V8_EXPORT_PRIVATE TransportBase {
public:
- explicit TransportBase(SocketHandle s);
- virtual ~TransportBase();
+ virtual ~TransportBase() {}
+
+ // Waits for an incoming connection on the bound port.
+ virtual bool AcceptConnection() = 0;
// Read {len} bytes from this transport, possibly blocking until enough data
// is available.
@@ -81,23 +83,48 @@ class TransportBase {
// Returns true on success.
// Returns false if the connection is closed; in that case the {dst} may have
// been partially overwritten.
- bool Read(char* dst, int32_t len);
+ virtual bool Read(char* dst, int32_t len) = 0;
// Write {len} bytes to this transport.
// Return true on success, false if the connection is closed.
- bool Write(const char* src, int32_t len);
+ virtual bool Write(const char* src, int32_t len) = 0;
// Return true if there is data to read.
- bool IsDataAvailable() const;
+ virtual bool IsDataAvailable() const = 0;
+
+ // If we are connected to a debugger, gracefully closes the connection.
+ // This should be called when a debugging session gets closed.
+ virtual void Disconnect() = 0;
// Shuts down this transport, gracefully closing the existing connection and
// also closing the listening socket. This should be called when the GDB stub
// shuts down, when the program terminates.
- void Close();
+ virtual void Close() = 0;
- // If a socket connection with a debugger is present, gracefully closes it.
- // This should be called when a debugging session gets closed.
- virtual void Disconnect();
+ // Blocks waiting for one of these two events to occur:
+ // - A network event (a new packet arrives, or the connection is dropped),
+ // - A thread event is signaled (the execution stopped because of a trap or
+ // breakpoint).
+ virtual void WaitForDebugStubEvent() = 0;
+
+ // Signal that this transport should leave an alertable wait state because
+ // the execution of the debuggee was stopped because of a trap or breakpoint.
+ virtual bool SignalThreadEvent() = 0;
+};
+
+class Transport : public TransportBase {
+ public:
+ explicit Transport(SocketHandle s);
+ ~Transport() override;
+
+ // TransportBase
+ bool Read(char* dst, int32_t len) override;
+ bool Write(const char* src, int32_t len) override;
+ bool IsDataAvailable() const override;
+ void Disconnect() override;
+ void Close() override;
+
+ static const int kBufSize = 4096;
protected:
// Copy buffered data to *dst up to len bytes and update dst and len.
@@ -106,7 +133,6 @@ class TransportBase {
// Read available data from the socket. Return false on EOF or error.
virtual bool ReadSomeData() = 0;
- static const int kBufSize = 4096;
std::unique_ptr<char[]> buf_;
int32_t pos_;
int32_t size_;
@@ -116,25 +142,16 @@ class TransportBase {
#if _WIN32
-class Transport : public TransportBase {
+class SocketTransport : public Transport {
public:
- explicit Transport(SocketHandle s);
- ~Transport() override;
-
- // Waits for an incoming connection on the bound port.
- bool AcceptConnection();
-
- // Blocks waiting for one of these two events to occur:
- // - A network event (a new packet arrives, or the connection is dropped),
- // - A thread event is signaled (the execution stopped because of a trap or
- // breakpoint).
- void WaitForDebugStubEvent();
-
- // Signal that this transport should leave an alertable wait state because
- // the execution of the debuggee was stopped because of a trap or breakpoint.
- bool SignalThreadEvent();
+ explicit SocketTransport(SocketHandle s);
+ ~SocketTransport() override;
+ // TransportBase
+ bool AcceptConnection() override;
void Disconnect() override;
+ void WaitForDebugStubEvent() override;
+ bool SignalThreadEvent() override;
private:
bool ReadSomeData() override;
@@ -142,27 +159,20 @@ class Transport : public TransportBase {
HANDLE socket_event_;
HANDLE faulted_thread_event_;
- DISALLOW_COPY_AND_ASSIGN(Transport);
+ DISALLOW_COPY_AND_ASSIGN(SocketTransport);
};
#else // _WIN32
-class Transport : public TransportBase {
+class SocketTransport : public Transport {
public:
- explicit Transport(SocketHandle s);
- ~Transport() override;
-
- // Waits for an incoming connection on the bound port.
- bool AcceptConnection();
+ explicit SocketTransport(SocketHandle s);
+ ~SocketTransport() override;
- // Blocks waiting for one of these two events to occur:
- // - A network event (a new packet arrives, or the connection is dropped),
- // - The debuggee suspends execution because of a trap or breakpoint.
- void WaitForDebugStubEvent();
-
- // Signal that this transport should leave an alertable wait state because
- // the execution of the debuggee was stopped because of a trap or breakpoint.
- bool SignalThreadEvent();
+ // TransportBase
+ bool AcceptConnection() override;
+ void WaitForDebugStubEvent() override;
+ bool SignalThreadEvent() override;
private:
bool ReadSomeData() override;
@@ -170,7 +180,7 @@ class Transport : public TransportBase {
int faulted_thread_fd_read_;
int faulted_thread_fd_write_;
- DISALLOW_COPY_AND_ASSIGN(Transport);
+ DISALLOW_COPY_AND_ASSIGN(SocketTransport);
};
#endif // _WIN32
diff --git a/deps/v8/src/debug/wasm/gdb-server/util.h b/deps/v8/src/debug/wasm/gdb-server/util.h
deleted file mode 100644
index 5e977741d8..0000000000
--- a/deps/v8/src/debug/wasm/gdb-server/util.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_DEBUG_WASM_GDB_SERVER_UTIL_H_
-#define V8_DEBUG_WASM_GDB_SERVER_UTIL_H_
-
-#include <string>
-#include "src/flags/flags.h"
-#include "src/utils/utils.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace gdb_server {
-
-#define TRACE_GDB_REMOTE(...) \
- do { \
- if (FLAG_trace_wasm_gdb_remote) PrintF("[gdb-remote] " __VA_ARGS__); \
- } while (false)
-
-} // namespace gdb_server
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_DEBUG_WASM_GDB_SERVER_UTIL_H_
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
new file mode 100644
index 0000000000..2bd9b1e5d4
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -0,0 +1,388 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/wasm/gdb-server/wasm-module-debug.h"
+
+#include "src/api/api-inl.h"
+#include "src/api/api.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/frames.h"
+#include "src/objects/script.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-value.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+WasmModuleDebug::WasmModuleDebug(v8::Isolate* isolate,
+ Local<debug::WasmScript> wasm_script) {
+ DCHECK_EQ(Script::TYPE_WASM, Utils::OpenHandle(*wasm_script)->type());
+
+ isolate_ = isolate;
+ wasm_script_ = Global<debug::WasmScript>(isolate, wasm_script);
+}
+
+std::string WasmModuleDebug::GetModuleName() const {
+ v8::Local<debug::WasmScript> wasm_script = wasm_script_.Get(isolate_);
+ v8::Local<v8::String> name;
+ std::string module_name;
+ if (wasm_script->Name().ToLocal(&name)) {
+ module_name = *(v8::String::Utf8Value(isolate_, name));
+ }
+ return module_name;
+}
+
+Handle<WasmInstanceObject> WasmModuleDebug::GetFirstWasmInstance() {
+ v8::Local<debug::WasmScript> wasm_script = wasm_script_.Get(isolate_);
+ Handle<Script> script = Utils::OpenHandle(*wasm_script);
+
+ Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
+ GetIsolate());
+ if (weak_instance_list->length() > 0) {
+ MaybeObject maybe_instance = weak_instance_list->Get(0);
+ if (maybe_instance->IsWeak()) {
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
+ GetIsolate());
+ return instance;
+ }
+ }
+ return Handle<WasmInstanceObject>::null();
+}
+
+int GetLEB128Size(Vector<const uint8_t> module_bytes, int offset) {
+ int index = offset;
+ while (module_bytes[index] & 0x80) index++;
+ return index + 1 - offset;
+}
+
+int ReturnPc(const NativeModule* native_module, int pc) {
+ Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ uint8_t opcode = wire_bytes[pc];
+ switch (opcode) {
+ case kExprCallFunction: {
+ // skip opcode
+ pc++;
+ // skip function index
+ return pc + GetLEB128Size(wire_bytes, pc);
+ }
+ case kExprCallIndirect: {
+ // skip opcode
+ pc++;
+ // skip signature index
+ pc += GetLEB128Size(wire_bytes, pc);
+ // skip table index
+ return pc + GetLEB128Size(wire_bytes, pc);
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// static
+std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
+ uint32_t debug_context_id, Isolate* isolate) {
+ std::vector<wasm_addr_t> call_stack;
+ for (StackFrameIterator frame_it(isolate); !frame_it.done();
+ frame_it.Advance()) {
+ StackFrame* const frame = frame_it.frame();
+ switch (frame->type()) {
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
+ case StackFrame::OPTIMIZED:
+ case StackFrame::INTERPRETED:
+ case StackFrame::BUILTIN:
+ case StackFrame::WASM: {
+ // A standard frame may include many summarized frames, due to inlining.
+ std::vector<FrameSummary> frames;
+ StandardFrame::cast(frame)->Summarize(&frames);
+ for (size_t i = frames.size(); i-- != 0;) {
+ int offset = 0;
+ Handle<Script> script;
+
+ auto& summary = frames[i];
+ if (summary.IsJavaScript()) {
+ FrameSummary::JavaScriptFrameSummary const& java_script =
+ summary.AsJavaScript();
+ offset = java_script.code_offset();
+ script = Handle<Script>::cast(java_script.script());
+ } else if (summary.IsWasm()) {
+ FrameSummary::WasmFrameSummary const& wasm = summary.AsWasm();
+ offset = GetWasmFunctionOffset(wasm.wasm_instance()->module(),
+ wasm.function_index()) +
+ wasm.byte_offset();
+ script = wasm.script();
+
+ bool zeroth_frame = call_stack.empty();
+ if (!zeroth_frame) {
+ const NativeModule* native_module =
+ wasm.wasm_instance()->module_object().native_module();
+ offset = ReturnPc(native_module, offset);
+ }
+ }
+
+ if (offset > 0) {
+ call_stack.push_back(
+ {debug_context_id << 16 | script->id(), uint32_t(offset)});
+ }
+ }
+ break;
+ }
+
+ case StackFrame::BUILTIN_EXIT:
+ default:
+ // ignore the frame.
+ break;
+ }
+ }
+ if (call_stack.empty()) call_stack.push_back({1, 0});
+ return call_stack;
+}
+
+// static
+std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
+ StackTraceFrameIterator* frame_it, uint32_t* frame_index) {
+ while (!frame_it->done()) {
+ StackFrame* const frame = frame_it->frame();
+ switch (frame->type()) {
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
+ case StackFrame::OPTIMIZED:
+ case StackFrame::INTERPRETED:
+ case StackFrame::BUILTIN:
+ case StackFrame::WASM: {
+ // A standard frame may include many summarized frames, due to inlining.
+ std::vector<FrameSummary> frames;
+ StandardFrame::cast(frame)->Summarize(&frames);
+ const size_t frame_count = frames.size();
+ DCHECK_GT(frame_count, 0);
+
+ if (frame_count > *frame_index) {
+ if (frame_it->is_wasm())
+ return frames;
+ else
+ return {};
+ } else {
+ *frame_index -= frame_count;
+ frame_it->Advance();
+ }
+ break;
+ }
+
+ case StackFrame::BUILTIN_EXIT:
+ default:
+ // ignore the frame.
+ break;
+ }
+ }
+ return {};
+}
+
+// static
+Handle<WasmInstanceObject> WasmModuleDebug::GetWasmInstance(
+ Isolate* isolate, uint32_t frame_index) {
+ StackTraceFrameIterator frame_it(isolate);
+ std::vector<FrameSummary> frames = FindWasmFrame(&frame_it, &frame_index);
+ if (frames.empty()) {
+ return Handle<WasmInstanceObject>::null();
+ }
+
+ int reversed_index = static_cast<int>(frames.size() - 1 - frame_index);
+ const FrameSummary::WasmFrameSummary& summary =
+ frames[reversed_index].AsWasm();
+ return summary.wasm_instance();
+}
+
+// static
+bool WasmModuleDebug::GetWasmGlobal(Isolate* isolate, uint32_t frame_index,
+ uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size) {
+ HandleScope handles(isolate);
+
+ Handle<WasmInstanceObject> instance = GetWasmInstance(isolate, frame_index);
+ if (!instance.is_null()) {
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ const wasm::WasmModule* module = module_object->module();
+ if (index < module->globals.size()) {
+ wasm::WasmValue wasm_value =
+ WasmInstanceObject::GetGlobalValue(instance, module->globals[index]);
+ return GetWasmValue(wasm_value, buffer, buffer_size, size);
+ }
+ }
+ return false;
+}
+
+// static
+bool WasmModuleDebug::GetWasmLocal(Isolate* isolate, uint32_t frame_index,
+ uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size) {
+ HandleScope handles(isolate);
+
+ StackTraceFrameIterator frame_it(isolate);
+ std::vector<FrameSummary> frames = FindWasmFrame(&frame_it, &frame_index);
+ if (frames.empty()) {
+ return false;
+ }
+
+ int reversed_index = static_cast<int>(frames.size() - 1 - frame_index);
+ const FrameSummary& summary = frames[reversed_index];
+ if (summary.IsWasm()) {
+ Handle<WasmInstanceObject> instance = summary.AsWasm().wasm_instance();
+ if (!instance.is_null()) {
+ Handle<WasmModuleObject> module_object(instance->module_object(),
+ isolate);
+ wasm::NativeModule* native_module = module_object->native_module();
+ DebugInfo* debug_info = native_module->GetDebugInfo();
+ if (static_cast<uint32_t>(debug_info->GetNumLocals(
+ isolate, frame_it.frame()->pc())) > index) {
+ wasm::WasmValue wasm_value = debug_info->GetLocalValue(
+ index, isolate, frame_it.frame()->pc(), frame_it.frame()->fp(),
+ frame_it.frame()->callee_fp());
+ return GetWasmValue(wasm_value, buffer, buffer_size, size);
+ }
+ }
+ }
+ return false;
+}
+
+// static
+bool WasmModuleDebug::GetWasmStackValue(Isolate* isolate, uint32_t frame_index,
+ uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size) {
+ HandleScope handles(isolate);
+
+ StackTraceFrameIterator frame_it(isolate);
+ std::vector<FrameSummary> frames = FindWasmFrame(&frame_it, &frame_index);
+ if (frames.empty()) {
+ return false;
+ }
+
+ int reversed_index = static_cast<int>(frames.size() - 1 - frame_index);
+ const FrameSummary& summary = frames[reversed_index];
+ if (summary.IsWasm()) {
+ Handle<WasmInstanceObject> instance = summary.AsWasm().wasm_instance();
+ if (!instance.is_null()) {
+ Handle<WasmModuleObject> module_object(instance->module_object(),
+ isolate);
+ wasm::NativeModule* native_module = module_object->native_module();
+ DebugInfo* debug_info = native_module->GetDebugInfo();
+ if (static_cast<uint32_t>(debug_info->GetStackDepth(
+ isolate, frame_it.frame()->pc())) > index) {
+ WasmValue wasm_value = debug_info->GetStackValue(
+ index, isolate, frame_it.frame()->pc(), frame_it.frame()->fp(),
+ frame_it.frame()->callee_fp());
+ return GetWasmValue(wasm_value, buffer, buffer_size, size);
+ }
+ }
+ }
+ return false;
+}
+
+// static
+uint32_t WasmModuleDebug::GetWasmMemory(Isolate* isolate, uint32_t frame_index,
+ uint32_t offset, uint8_t* buffer,
+ uint32_t size) {
+ HandleScope handles(isolate);
+
+ uint32_t bytes_read = 0;
+ Handle<WasmInstanceObject> instance = GetWasmInstance(isolate, frame_index);
+ if (!instance.is_null()) {
+ uint8_t* mem_start = instance->memory_start();
+ size_t mem_size = instance->memory_size();
+ if (static_cast<uint64_t>(offset) + size <= mem_size) {
+ memcpy(buffer, mem_start + offset, size);
+ bytes_read = size;
+ } else if (offset < mem_size) {
+ bytes_read = static_cast<uint32_t>(mem_size) - offset;
+ memcpy(buffer, mem_start + offset, bytes_read);
+ }
+ }
+ return bytes_read;
+}
+
+uint32_t WasmModuleDebug::GetWasmModuleBytes(wasm_addr_t wasm_addr,
+ uint8_t* buffer, uint32_t size) {
+ uint32_t bytes_read = 0;
+ // Any instance will work.
+ Handle<WasmInstanceObject> instance = GetFirstWasmInstance();
+ if (!instance.is_null()) {
+ Handle<WasmModuleObject> module_object(instance->module_object(),
+ GetIsolate());
+ wasm::NativeModule* native_module = module_object->native_module();
+ const wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ uint32_t offset = wasm_addr.Offset();
+ if (offset < wire_bytes.length()) {
+ uint32_t module_size = static_cast<uint32_t>(wire_bytes.length());
+ bytes_read = module_size - offset >= size ? size : module_size - offset;
+ memcpy(buffer, wire_bytes.start() + offset, bytes_read);
+ }
+ }
+ return bytes_read;
+}
+
+bool WasmModuleDebug::AddBreakpoint(uint32_t offset, int* breakpoint_id) {
+ v8::Local<debug::WasmScript> wasm_script = wasm_script_.Get(isolate_);
+ Handle<Script> script = Utils::OpenHandle(*wasm_script);
+ Handle<String> condition = GetIsolate()->factory()->empty_string();
+ int breakpoint_address = static_cast<int>(offset);
+ return GetIsolate()->debug()->SetBreakPointForScript(
+ script, condition, &breakpoint_address, breakpoint_id);
+}
+
+void WasmModuleDebug::RemoveBreakpoint(uint32_t offset, int breakpoint_id) {
+ v8::Local<debug::WasmScript> wasm_script = wasm_script_.Get(isolate_);
+ Handle<Script> script = Utils::OpenHandle(*wasm_script);
+ GetIsolate()->debug()->RemoveBreakpointForWasmScript(script, breakpoint_id);
+}
+
+void WasmModuleDebug::PrepareStep() {
+ i::Isolate* isolate = GetIsolate();
+ DebugScope debug_scope(isolate->debug());
+ debug::PrepareStep(reinterpret_cast<v8::Isolate*>(isolate),
+ debug::StepAction::StepIn);
+}
+
+template <typename T>
+bool StoreValue(const T& value, uint8_t* buffer, uint32_t buffer_size,
+ uint32_t* size) {
+ *size = sizeof(value);
+ if (*size > buffer_size) return false;
+ memcpy(buffer, &value, *size);
+ return true;
+}
+
+// static
+bool WasmModuleDebug::GetWasmValue(const wasm::WasmValue& wasm_value,
+ uint8_t* buffer, uint32_t buffer_size,
+ uint32_t* size) {
+ switch (wasm_value.type().kind()) {
+ case wasm::kWasmI32.kind():
+ return StoreValue(wasm_value.to_i32(), buffer, buffer_size, size);
+ case wasm::kWasmI64.kind():
+ return StoreValue(wasm_value.to_i64(), buffer, buffer_size, size);
+ case wasm::kWasmF32.kind():
+ return StoreValue(wasm_value.to_f32(), buffer, buffer_size, size);
+ case wasm::kWasmF64.kind():
+ return StoreValue(wasm_value.to_f64(), buffer, buffer_size, size);
+ case wasm::kWasmS128.kind():
+ return StoreValue(wasm_value.to_s128(), buffer, buffer_size, size);
+
+ case wasm::kWasmStmt.kind():
+ case wasm::kWasmAnyRef.kind():
+ case wasm::kWasmFuncRef.kind():
+ case wasm::kWasmNullRef.kind():
+ case wasm::kWasmExnRef.kind():
+ case wasm::kWasmBottom.kind():
+ default:
+ // Not supported
+ return false;
+ }
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h
new file mode 100644
index 0000000000..10e6a5da16
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.h
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_WASM_GDB_SERVER_WASM_MODULE_DEBUG_H_
+#define V8_DEBUG_WASM_GDB_SERVER_WASM_MODULE_DEBUG_H_
+
+#include "src/debug/debug.h"
+#include "src/debug/wasm/gdb-server/gdb-remote-util.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmValue;
+
+namespace gdb_server {
+
+// Represents the interface to access the Wasm engine state for a given module.
+// For the moment it only works with interpreted functions, in the future it
+// could be extended to also support Liftoff.
+class WasmModuleDebug {
+ public:
+ WasmModuleDebug(v8::Isolate* isolate, Local<debug::WasmScript> script);
+
+ std::string GetModuleName() const;
+ i::Isolate* GetIsolate() const {
+ return reinterpret_cast<i::Isolate*>(isolate_);
+ }
+
+ // Gets the value of the {index}th global value.
+ static bool GetWasmGlobal(Isolate* isolate, uint32_t frame_index,
+ uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ // Gets the value of the {index}th local value in the {frame_index}th stack
+ // frame.
+ static bool GetWasmLocal(Isolate* isolate, uint32_t frame_index,
+ uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ // Gets the value of the {index}th value in the operand stack.
+ static bool GetWasmStackValue(Isolate* isolate, uint32_t frame_index,
+ uint32_t index, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ // Reads {size} bytes, starting from {offset}, from the Memory instance
+ // associated to this module.
+ // Returns the number of byte copied to {buffer}, or 0 is case of error.
+ // Note: only one Memory for Module is currently supported.
+ static uint32_t GetWasmMemory(Isolate* isolate, uint32_t frame_index,
+ uint32_t offset, uint8_t* buffer,
+ uint32_t size);
+
+ // Gets {size} bytes, starting from {offset}, from the Code space of this
+ // module.
+ // Returns the number of byte copied to {buffer}, or 0 is case of error.
+ uint32_t GetWasmModuleBytes(wasm_addr_t wasm_addr, uint8_t* buffer,
+ uint32_t size);
+
+ // Inserts a breakpoint at the offset {offset} of this module.
+ // Returns {true} if the breakpoint was successfully added.
+ bool AddBreakpoint(uint32_t offset, int* breakpoint_id);
+
+ // Removes a breakpoint at the offset {offset} of the this module.
+ void RemoveBreakpoint(uint32_t offset, int breakpoint_id);
+
+ // Handle stepping in wasm functions via the wasm interpreter.
+ void PrepareStep();
+
+ // Returns the current stack trace as a vector of instruction pointers.
+ static std::vector<wasm_addr_t> GetCallStack(uint32_t debug_context_id,
+ Isolate* isolate);
+
+ private:
+ // Returns the module WasmInstance associated to the {frame_index}th frame
+ // in the call stack.
+ static Handle<WasmInstanceObject> GetWasmInstance(Isolate* isolate,
+ uint32_t frame_index);
+
+ // Returns its first WasmInstance for this Wasm module.
+ Handle<WasmInstanceObject> GetFirstWasmInstance();
+
+ // Iterates on current stack frames and return frame information for the
+ // {frame_index} specified.
+ // Returns an empty array if the frame specified does not correspond to a Wasm
+ // stack frame.
+ static std::vector<FrameSummary> FindWasmFrame(
+ StackTraceFrameIterator* frame_it, uint32_t* frame_index);
+
+ // Converts a WasmValue into an array of bytes.
+ static bool GetWasmValue(const wasm::WasmValue& wasm_value, uint8_t* buffer,
+ uint32_t buffer_size, uint32_t* size);
+
+ v8::Isolate* isolate_;
+ Global<debug::WasmScript> wasm_script_;
+};
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_WASM_GDB_SERVER_WASM_MODULE_DEBUG_H_
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 9fcccd483c..44c92f5570 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -48,7 +48,6 @@ class FrameWriter {
void PushRawValue(intptr_t value, const char* debug_hint) {
PushValue(value);
-
if (trace_scope_ != nullptr) {
DebugPrintOutputValue(value, debug_hint);
}
@@ -83,13 +82,10 @@ class FrameWriter {
void PushTranslatedValue(const TranslatedFrame::iterator& iterator,
const char* debug_hint = "") {
Object obj = iterator->GetRawValue();
-
PushRawObject(obj, debug_hint);
-
if (trace_scope_) {
PrintF(trace_scope_->file(), " (input #%d)\n", iterator.input_index());
}
-
deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj,
iterator);
}
@@ -2428,6 +2424,11 @@ int TranslatedValue::object_index() const {
Object TranslatedValue::GetRawValue() const {
// If we have a value, return it.
if (materialization_state() == kFinished) {
+ int smi;
+ if (storage_->IsHeapNumber() &&
+ DoubleToSmiInteger(storage_->Number(), &smi)) {
+ return Smi::FromInt(smi);
+ }
return *storage_;
}
@@ -2470,6 +2471,22 @@ Object TranslatedValue::GetRawValue() const {
}
}
+ case kFloat: {
+ int smi;
+ if (DoubleToSmiInteger(float_value().get_scalar(), &smi)) {
+ return Smi::FromInt(smi);
+ }
+ break;
+ }
+
+ case kDouble: {
+ int smi;
+ if (DoubleToSmiInteger(double_value().get_scalar(), &smi)) {
+ return Smi::FromInt(smi);
+ }
+ break;
+ }
+
default:
break;
}
@@ -2479,106 +2496,76 @@ Object TranslatedValue::GetRawValue() const {
return ReadOnlyRoots(isolate()).arguments_marker();
}
-void TranslatedValue::set_initialized_storage(Handle<Object> storage) {
+void TranslatedValue::set_initialized_storage(Handle<HeapObject> storage) {
DCHECK_EQ(kUninitialized, materialization_state());
storage_ = storage;
materialization_state_ = kFinished;
}
Handle<Object> TranslatedValue::GetValue() {
- // If we already have a value, then get it.
- if (materialization_state() == kFinished) return storage_;
-
- // Otherwise we have to materialize.
- switch (kind()) {
- case TranslatedValue::kTagged:
- case TranslatedValue::kInt32:
- case TranslatedValue::kInt64:
- case TranslatedValue::kUInt32:
- case TranslatedValue::kBoolBit:
- case TranslatedValue::kFloat:
- case TranslatedValue::kDouble: {
- MaterializeSimple();
- return storage_;
- }
-
- case TranslatedValue::kCapturedObject:
- case TranslatedValue::kDuplicatedObject: {
- // We need to materialize the object (or possibly even object graphs).
- // To make the object verifier happy, we materialize in two steps.
-
- // 1. Allocate storage for reachable objects. This makes sure that for
- // each object we have allocated space on heap. The space will be
- // a byte array that will be later initialized, or a fully
- // initialized object if it is safe to allocate one that will
- // pass the verifier.
- container_->EnsureObjectAllocatedAt(this);
-
- // 2. Initialize the objects. If we have allocated only byte arrays
- // for some objects, we now overwrite the byte arrays with the
- // correct object fields. Note that this phase does not allocate
- // any new objects, so it does not trigger the object verifier.
- return container_->InitializeObjectAt(this);
- }
-
- case TranslatedValue::kInvalid:
- FATAL("unexpected case");
- return Handle<Object>::null();
+ Handle<Object> value(GetRawValue(), isolate());
+ if (materialization_state() == kFinished) return value;
+
+ if (value->IsSmi()) {
+ // Even though stored as a Smi, this number might instead be needed as a
+ // HeapNumber when materializing a JSObject with a field of HeapObject
+ // representation. Since we don't have this information available here, we
+ // just always allocate a HeapNumber and later extract the Smi again if we
+ // don't need a HeapObject.
+ set_initialized_storage(
+ isolate()->factory()->NewHeapNumber(value->Number()));
+ return value;
}
- FATAL("internal error: value missing");
- return Handle<Object>::null();
-}
-
-void TranslatedValue::MaterializeSimple() {
- // If we already have materialized, return.
- if (materialization_state() == kFinished) return;
-
- Object raw_value = GetRawValue();
- if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) {
- // We can get the value without allocation, just return it here.
- set_initialized_storage(Handle<Object>(raw_value, isolate()));
- return;
+ if (*value != ReadOnlyRoots(isolate()).arguments_marker()) {
+ set_initialized_storage(Handle<HeapObject>::cast(value));
+ return storage_;
}
- switch (kind()) {
- case kInt32:
- set_initialized_storage(
- Handle<Object>(isolate()->factory()->NewNumber(int32_value())));
- return;
+ // Otherwise we have to materialize.
- case kInt64:
- set_initialized_storage(Handle<Object>(
- isolate()->factory()->NewNumber(static_cast<double>(int64_value()))));
- return;
+ if (kind() == TranslatedValue::kCapturedObject ||
+ kind() == TranslatedValue::kDuplicatedObject) {
+ // We need to materialize the object (or possibly even object graphs).
+ // To make the object verifier happy, we materialize in two steps.
- case kUInt32:
- set_initialized_storage(
- Handle<Object>(isolate()->factory()->NewNumber(uint32_value())));
- return;
+ // 1. Allocate storage for reachable objects. This makes sure that for
+ // each object we have allocated space on heap. The space will be
+ // a byte array that will be later initialized, or a fully
+ // initialized object if it is safe to allocate one that will
+ // pass the verifier.
+ container_->EnsureObjectAllocatedAt(this);
- case kFloat: {
- double scalar_value = float_value().get_scalar();
- set_initialized_storage(
- Handle<Object>(isolate()->factory()->NewNumber(scalar_value)));
- return;
- }
-
- case kDouble: {
- double scalar_value = double_value().get_scalar();
- set_initialized_storage(
- Handle<Object>(isolate()->factory()->NewNumber(scalar_value)));
- return;
- }
+ // 2. Initialize the objects. If we have allocated only byte arrays
+ // for some objects, we now overwrite the byte arrays with the
+ // correct object fields. Note that this phase does not allocate
+ // any new objects, so it does not trigger the object verifier.
+ return container_->InitializeObjectAt(this);
+ }
- case kCapturedObject:
- case kDuplicatedObject:
- case kInvalid:
- case kTagged:
- case kBoolBit:
- FATAL("internal error: unexpected materialization.");
+ double number;
+ switch (kind()) {
+ case TranslatedValue::kInt32:
+ number = int32_value();
+ break;
+ case TranslatedValue::kInt64:
+ number = int64_value();
+ break;
+ case TranslatedValue::kUInt32:
+ number = uint32_value();
+ break;
+ case TranslatedValue::kFloat:
+ number = float_value().get_scalar();
break;
+ case TranslatedValue::kDouble:
+ number = double_value().get_scalar();
+ break;
+ default:
+ UNREACHABLE();
}
+ DCHECK(!IsSmiDouble(number));
+ set_initialized_storage(isolate()->factory()->NewHeapNumber(number));
+ return storage_;
}
bool TranslatedValue::IsMaterializedObject() const {
@@ -2634,8 +2621,9 @@ Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
}
void TranslatedValue::Handlify() {
- if (kind() == kTagged) {
- set_initialized_storage(Handle<Object>(raw_literal(), isolate()));
+ if (kind() == kTagged && raw_literal().IsHeapObject()) {
+ set_initialized_storage(
+ Handle<HeapObject>(HeapObject::cast(raw_literal()), isolate()));
raw_literal_ = Object();
}
}
@@ -3386,7 +3374,7 @@ TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) {
return &(frames_[pos.frame_index_].values_[pos.value_index_]);
}
-Handle<Object> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
+Handle<HeapObject> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
slot = ResolveCapturedObject(slot);
DisallowHeapAllocation no_allocation;
@@ -3401,7 +3389,7 @@ Handle<Object> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
InitializeCapturedObjectAt(index, &worklist, no_allocation);
}
}
- return slot->GetStorage();
+ return slot->storage();
}
void TranslatedState::InitializeCapturedObjectAt(
@@ -3501,11 +3489,17 @@ void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) {
}
}
+int TranslatedValue::GetSmiValue() const {
+ Object value = GetRawValue();
+ CHECK(value.IsSmi());
+ return Smi::cast(value).value();
+}
+
void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
int* value_index,
TranslatedValue* slot,
Handle<Map> map) {
- int length = Smi::cast(frame->values_[*value_index].GetRawValue()).value();
+ int length = frame->values_[*value_index].GetSmiValue();
(*value_index)++;
Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast(
isolate()->factory()->NewFixedDoubleArray(length));
@@ -3539,10 +3533,10 @@ void TranslatedState::MaterializeHeapNumber(TranslatedFrame* frame,
namespace {
-enum DoubleStorageKind : uint8_t {
+enum StorageKind : uint8_t {
kStoreTagged,
kStoreUnboxedDouble,
- kStoreMutableHeapNumber,
+ kStoreHeapObject
};
} // namespace
@@ -3614,9 +3608,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
case SIMPLE_NUMBER_DICTIONARY_TYPE:
case STRING_TABLE_TYPE: {
// Check we have the right size.
- int array_length =
- Smi::cast(frame->values_[value_index].GetRawValue()).value();
-
+ int array_length = frame->values_[value_index].GetSmiValue();
int instance_size = FixedArray::SizeFor(array_length);
CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
@@ -3635,13 +3627,13 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
case PROPERTY_ARRAY_TYPE: {
// Check we have the right size.
- int length_or_hash =
- Smi::cast(frame->values_[value_index].GetRawValue()).value();
+ int length_or_hash = frame->values_[value_index].GetSmiValue();
int array_length = PropertyArray::LengthField::decode(length_or_hash);
int instance_size = PropertyArray::SizeFor(array_length);
CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
slot->set_storage(AllocateStorageFor(slot));
+
// Make sure all the remaining children (after the map) are allocated.
return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
&value_index, worklist);
@@ -3686,7 +3678,7 @@ void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame,
} else {
// Make sure the simple values (heap numbers, etc.) are properly
// initialized.
- child_slot->MaterializeSimple();
+ child_slot->GetValue();
}
SkipSlots(1, frame, value_index);
}
@@ -3701,16 +3693,17 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
properties_slot->mark_allocated();
properties_slot->set_storage(object_storage);
- // Set markers for the double properties.
+ // Set markers for out-of-object properties.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- if (descriptors->GetDetails(i).representation().IsDouble() &&
- !index.is_inobject()) {
+ Representation representation = descriptors->GetDetails(i).representation();
+ if (!index.is_inobject() &&
+ (representation.IsDouble() || representation.IsHeapObject())) {
CHECK(!map->IsUnboxedDoubleField(index));
int outobject_index = index.outobject_array_index();
int array_index = outobject_index * kTaggedSize;
- object_storage->set(array_index, kStoreMutableHeapNumber);
+ object_storage->set(array_index, kStoreHeapObject);
}
}
}
@@ -3736,31 +3729,44 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
// Now we handle the interesting (JSObject) case.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- // Set markers for the double properties.
+ // Set markers for in-object properties.
for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- if (descriptors->GetDetails(i).representation().IsDouble() &&
- index.is_inobject()) {
+ Representation representation = descriptors->GetDetails(i).representation();
+ if (index.is_inobject() &&
+ (representation.IsDouble() || representation.IsHeapObject())) {
CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize);
int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize;
- uint8_t marker = map->IsUnboxedDoubleField(index)
- ? kStoreUnboxedDouble
- : kStoreMutableHeapNumber;
+ uint8_t marker = map->IsUnboxedDoubleField(index) ? kStoreUnboxedDouble
+ : kStoreHeapObject;
object_storage->set(array_index, marker);
}
}
slot->set_storage(object_storage);
}
-Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame,
- int* value_index) {
- TranslatedValue* slot = frame->ValueAt(*value_index);
- SkipSlots(1, frame, value_index);
+TranslatedValue* TranslatedState::GetResolvedSlot(TranslatedFrame* frame,
+ int value_index) {
+ TranslatedValue* slot = frame->ValueAt(value_index);
if (slot->kind() == TranslatedValue::kDuplicatedObject) {
slot = ResolveCapturedObject(slot);
}
- CHECK_NE(TranslatedValue::kUninitialized, slot->materialization_state());
- return slot->GetStorage();
+ CHECK_NE(slot->materialization_state(), TranslatedValue::kUninitialized);
+ return slot;
+}
+
+TranslatedValue* TranslatedState::GetResolvedSlotAndAdvance(
+ TranslatedFrame* frame, int* value_index) {
+ TranslatedValue* slot = GetResolvedSlot(frame, *value_index);
+ SkipSlots(1, frame, value_index);
+ return slot;
+}
+
+Handle<Object> TranslatedState::GetValueAndAdvance(TranslatedFrame* frame,
+ int* value_index) {
+ TranslatedValue* slot = GetResolvedSlot(frame, *value_index);
+ SkipSlots(1, frame, value_index);
+ return slot->GetValue();
}
void TranslatedState::InitializeJSObjectAt(
@@ -3788,29 +3794,25 @@ void TranslatedState::InitializeJSObjectAt(
// marker to see if we store an unboxed double.
DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset);
for (int i = 2; i < slot->GetChildrenCount(); i++) {
- // Initialize and extract the value from its slot.
- Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
-
+ TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
// Read out the marker and ensure the field is consistent with
// what the markers in the storage say (note that all heap numbers
// should be fully initialized by now).
int offset = i * kTaggedSize;
uint8_t marker = object_storage->ReadField<uint8_t>(offset);
if (marker == kStoreUnboxedDouble) {
- double double_field_value;
- if (field_value->IsSmi()) {
- double_field_value = Smi::cast(*field_value).value();
- } else {
- CHECK(field_value->IsHeapNumber());
- double_field_value = HeapNumber::cast(*field_value).value();
- }
- object_storage->WriteField<double>(offset, double_field_value);
- } else if (marker == kStoreMutableHeapNumber) {
+ Handle<HeapObject> field_value = slot->storage();
CHECK(field_value->IsHeapNumber());
+ object_storage->WriteField<double>(offset, field_value->Number());
+ } else if (marker == kStoreHeapObject) {
+ Handle<HeapObject> field_value = slot->storage();
WRITE_FIELD(*object_storage, offset, *field_value);
WRITE_BARRIER(*object_storage, offset, *field_value);
} else {
CHECK_EQ(kStoreTagged, marker);
+ Handle<Object> field_value = slot->GetValue();
+ DCHECK_IMPLIES(field_value->IsHeapNumber(),
+ !IsSmiDouble(field_value->Number()));
WRITE_FIELD(*object_storage, offset, *field_value);
WRITE_BARRIER(*object_storage, offset, *field_value);
}
@@ -3836,15 +3838,18 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
// Write the fields to the object.
for (int i = 1; i < slot->GetChildrenCount(); i++) {
- Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
+ TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
int offset = i * kTaggedSize;
uint8_t marker = object_storage->ReadField<uint8_t>(offset);
- if (i > 1 && marker == kStoreMutableHeapNumber) {
- CHECK(field_value->IsHeapNumber());
+ Handle<Object> field_value;
+ if (i > 1 && marker == kStoreHeapObject) {
+ field_value = slot->storage();
} else {
CHECK(marker == kStoreTagged || i == 1);
+ field_value = slot->GetValue();
+ DCHECK_IMPLIES(field_value->IsHeapNumber(),
+ !IsSmiDouble(field_value->Number()));
}
-
WRITE_FIELD(*object_storage, offset, *field_value);
WRITE_BARRIER(*object_storage, offset, *field_value);
}
@@ -3911,10 +3916,7 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
// argument (the receiver).
static constexpr int kTheContext = 1;
const int height = frames_[i].height() + kTheContext;
- Object argc_object = frames_[i].ValueAt(height - 1)->GetRawValue();
- CHECK(argc_object.IsSmi());
- *args_count = Smi::ToInt(argc_object);
-
+ *args_count = frames_[i].ValueAt(height - 1)->GetSmiValue();
DCHECK_EQ(*args_count, 1);
} else {
*args_count = InternalFormalParameterCountWithReceiver(
@@ -3956,21 +3958,30 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
CHECK(value_info->IsMaterializedObject());
- // Skip duplicate objects (i.e., those that point to some
- // other object id).
+ // Skip duplicate objects (i.e., those that point to some other object id).
if (value_info->object_index() != i) continue;
+ Handle<Object> previous_value(previously_materialized_objects->get(i),
+ isolate_);
Handle<Object> value(value_info->GetRawValue(), isolate_);
- if (!value.is_identical_to(marker)) {
- if (previously_materialized_objects->get(i) == *marker) {
+ if (value.is_identical_to(marker)) {
+ DCHECK_EQ(*previous_value, *marker);
+ } else {
+ if (*previous_value == *marker) {
+ if (value->IsSmi()) {
+ value = isolate()->factory()->NewHeapNumber(value->Number());
+ }
previously_materialized_objects->set(i, *value);
value_changed = true;
} else {
- CHECK(previously_materialized_objects->get(i) == *value);
+ CHECK(*previous_value == *value ||
+ (previous_value->IsHeapNumber() && value->IsSmi() &&
+ previous_value->Number() == value->Number()));
}
}
}
+
if (new_store && value_changed) {
materialized_store->Set(stack_frame_pointer_,
previously_materialized_objects);
@@ -4004,8 +4015,10 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
CHECK(value_info->IsMaterializedObject());
if (value_info->kind() == TranslatedValue::kCapturedObject) {
- value_info->set_initialized_storage(
- Handle<Object>(previously_materialized_objects->get(i), isolate_));
+ Handle<Object> object(previously_materialized_objects->get(i),
+ isolate_);
+ CHECK(object->IsHeapObject());
+ value_info->set_initialized_storage(Handle<HeapObject>::cast(object));
}
}
}
@@ -4019,7 +4032,7 @@ void TranslatedState::VerifyMaterializedObjects() {
if (slot->kind() == TranslatedValue::kCapturedObject) {
CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index()));
if (slot->materialization_state() == TranslatedValue::kFinished) {
- slot->GetStorage()->ObjectVerify(isolate());
+ slot->storage()->ObjectVerify(isolate());
} else {
CHECK_EQ(slot->materialization_state(),
TranslatedValue::kUninitialized);
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 41ef7d2336..ee6978e629 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -39,13 +39,17 @@ enum class BuiltinContinuationMode;
class TranslatedValue {
public:
- // Allocation-less getter of the value.
+ // Allocation-free getter of the value.
// Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary
- // to get the value.
+ // to get the value. In the case of numbers, returns a Smi if possible.
Object GetRawValue() const;
- // Getter for the value, takes care of materializing the subgraph
- // reachable from this value.
+ // Convenience wrapper around GetRawValue (checked).
+ int GetSmiValue() const;
+
+ // Returns the value, possibly materializing it first (and the whole subgraph
+ // reachable from this value). In the case of numbers, returns a Smi if
+ // possible.
Handle<Object> GetValue();
bool IsMaterializedObject() const;
@@ -102,15 +106,14 @@ class TranslatedValue {
static TranslatedValue NewInvalid(TranslatedState* container);
Isolate* isolate() const;
- void MaterializeSimple();
void set_storage(Handle<HeapObject> storage) { storage_ = storage; }
- void set_initialized_storage(Handle<Object> storage);
+ void set_initialized_storage(Handle<HeapObject> storage);
void mark_finished() { materialization_state_ = kFinished; }
void mark_allocated() { materialization_state_ = kAllocated; }
- Handle<Object> GetStorage() {
- DCHECK_NE(kUninitialized, materialization_state());
+ Handle<HeapObject> storage() {
+ DCHECK_NE(materialization_state(), kUninitialized);
return storage_;
}
@@ -120,9 +123,9 @@ class TranslatedValue {
// objects and constructing handles (to get
// to the isolate).
- Handle<Object> storage_; // Contains the materialized value or the
- // byte-array that will be later morphed into
- // the materialized object.
+ Handle<HeapObject> storage_; // Contains the materialized value or the
+ // byte-array that will be later morphed into
+ // the materialized object.
struct MaterializedObjectInfo {
int id_;
@@ -376,7 +379,7 @@ class TranslatedState {
int* value_index, std::stack<int>* worklist);
void EnsureCapturedObjectAllocatedAt(int object_index,
std::stack<int>* worklist);
- Handle<Object> InitializeObjectAt(TranslatedValue* slot);
+ Handle<HeapObject> InitializeObjectAt(TranslatedValue* slot);
void InitializeCapturedObjectAt(int object_index, std::stack<int>* worklist,
const DisallowHeapAllocation& no_allocation);
void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index,
@@ -392,6 +395,9 @@ class TranslatedState {
TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
TranslatedValue* GetValueByObjectIndex(int object_index);
Handle<Object> GetValueAndAdvance(TranslatedFrame* frame, int* value_index);
+ TranslatedValue* GetResolvedSlot(TranslatedFrame* frame, int value_index);
+ TranslatedValue* GetResolvedSlotAndAdvance(TranslatedFrame* frame,
+ int* value_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
static uint64_t GetUInt64Slot(Address fp, int slot_index);
@@ -773,7 +779,7 @@ class FrameDescription {
intptr_t continuation_;
// This must be at the end of the object as the object is allocated larger
- // than it's definition indicate to extend this array.
+ // than its definition indicates to extend this array.
intptr_t frame_content_[1];
intptr_t* GetFrameSlotPointer(unsigned offset) {
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 0a8798dcc8..f8959752b7 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -45,10 +45,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
- __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize));
+ __ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
- __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ __ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
}
}
{
@@ -59,7 +59,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ StoreP(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
// Get the bailout id is passed as r29 by the caller.
__ mr(r5, r29);
@@ -98,8 +98,9 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r5, MemOperand(sp, i * kPointerSize));
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r5, MemOperand(r4, offset));
}
@@ -109,7 +110,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
@@ -143,7 +145,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ bind(&pop_loop);
__ pop(r7);
__ StoreP(r7, MemOperand(r6, 0));
- __ addi(r6, r6, Operand(kPointerSize));
+ __ addi(r6, r6, Operand(kSystemPointerSize));
__ bind(&pop_loop_header);
__ cmp(r5, sp);
__ bne(&pop_loop);
@@ -167,7 +169,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// r4 = one past the last FrameDescription**.
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
__ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
- __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
@@ -187,7 +189,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ cmpi(r6, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
- __ addi(r7, r7, Operand(kPointerSize));
+ __ addi(r7, r7, Operand(kSystemPointerSize));
__ bind(&outer_loop_header);
__ cmp(r7, r4);
__ blt(&outer_push_loop);
@@ -213,7 +215,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
DCHECK(!(scratch.bit() & restored_regs));
__ mr(scratch, r5);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(scratch, offset));
}
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index c2a82a5837..3f8f40a244 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -2252,10 +2252,10 @@ void DisassemblingDecoder::VisitNEONExtract(Instruction* instr) {
void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
const char* mnemonic = nullptr;
const char* form = nullptr;
- const char* form_1v = "{'Vt.%1$s}, ['Xns]";
- const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
- const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
- const char* form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ const char* form_1v = "{'Vt.%s}, ['Xns]";
+ const char* form_2v = "{'Vt.%s, 'Vt2.%s}, ['Xns]";
+ const char* form_3v = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]";
+ const char* form_4v = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s, 'Vt4.%s}, ['Xns]";
NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
@@ -2349,11 +2349,10 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
Instruction* instr) {
const char* mnemonic = nullptr;
const char* form = nullptr;
- const char* form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
- const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
- const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
- const char* form_4v =
- "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4";
+ const char* form_1v = "{'Vt.%s}, ['Xns], 'Xmr1";
+ const char* form_2v = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmr2";
+ const char* form_3v = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmr3";
+ const char* form_4v = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s, 'Vt4.%s}, ['Xns], 'Xmr4";
NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
@@ -2561,7 +2560,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
break;
case NEON_LD4R:
mnemonic = "ld4r";
- form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s, 'Vt4.%s}, ['Xns]";
break;
default:
break;
@@ -2722,7 +2721,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex(
break;
case NEON_LD4R_post:
mnemonic = "ld4r";
- form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s, 'Vt4.%s}, ['Xns], 'Xmz4";
break;
default:
break;
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index 785c535e0a..2b3c9ede00 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -12,6 +12,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-comments.h"
#include "src/codegen/code-reference.h"
+#include "src/codegen/external-reference-encoder.h"
#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -20,7 +21,6 @@
#include "src/ic/ic.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/serializer-common.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
#include "src/wasm/wasm-code-manager.h"
@@ -222,7 +222,11 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
} else if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- relocinfo->target_object().ShortPrint(&accumulator);
+ if (relocinfo->host().is_null()) {
+ relocinfo->target_object_no_host(isolate).ShortPrint(&accumulator);
+ } else {
+ relocinfo->target_object().ShortPrint(&accumulator);
+ }
std::unique_ptr<char[]> obj_name = accumulator.ToCString();
const bool is_compressed = RelocInfo::IsCompressedEmbeddedObject(rmode);
out->AddFormatted(" ;; %sobject: %s",
@@ -245,9 +249,9 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
}
} else if (RelocInfo::IsWasmStubCall(rmode) && host.is_wasm_code()) {
// Host is isolate-independent, try wasm native module instead.
- const char* runtime_stub_name =
- host.as_wasm_code()->native_module()->GetRuntimeStubName(
- relocinfo->wasm_stub_call_address());
+ const char* runtime_stub_name = GetRuntimeStubName(
+ host.as_wasm_code()->native_module()->GetRuntimeStubId(
+ relocinfo->wasm_stub_call_address()));
out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
isolate->deoptimizer_data() != nullptr) {
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 129c2d72e1..5e0c5c65e2 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -1066,6 +1066,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x50:
+ AppendToBuffer("vmovmskps %s,%s", NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ current++;
+ break;
case 0x51:
AppendToBuffer("vsqrtps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1266,6 +1271,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", Imm8(current));
current++;
break;
+ case 0xD7:
+ AppendToBuffer("vpmovmskb %s,%s", NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ current++;
+ break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
@@ -2347,6 +2357,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0xB1) {
data++;
data += PrintOperands("cmpxchg_w", OPER_REG_OP_ORDER, data);
+ } else if (*data == 0xD7) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pmovmskb %s,%s", NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
byte op = *data;
data++;
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 4a9d029a05..32caba2da8 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -29,6 +29,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-aggregate-error-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
#include "src/objects/objects-inl.h"
@@ -72,6 +73,7 @@
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-verifiers-tq.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
@@ -279,8 +281,6 @@ void Symbol::SymbolVerify(Isolate* isolate) {
CHECK_IMPLIES(IsPrivateBrand(), IsPrivateName());
}
-USE_TORQUE_VERIFIER(ByteArray)
-
void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
// TODO(oth): Walk bytecodes and immediate values to validate sanity.
// - All bytecodes are known and well formed.
@@ -296,10 +296,6 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
CHECK(handler_table().IsByteArray());
}
-USE_TORQUE_VERIFIER(FreeSpace)
-
-USE_TORQUE_VERIFIER(HeapNumber)
-
USE_TORQUE_VERIFIER(FeedbackVector)
USE_TORQUE_VERIFIER(JSReceiver)
@@ -519,10 +515,6 @@ void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(FixedArrayBase)
-
-USE_TORQUE_VERIFIER(FixedArray)
-
void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::WeakFixedArrayVerify(*this, isolate);
for (int i = 0; i < length(); i++) {
@@ -530,8 +522,6 @@ void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(WeakArrayList)
-
void PropertyArray::PropertyArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::PropertyArrayVerify(*this, isolate);
if (length() == 0) {
@@ -1097,13 +1087,6 @@ void JSFinalizationRegistry::JSFinalizationRegistryVerify(Isolate* isolate) {
next_dirty().IsJSFinalizationRegistry());
}
-void JSFinalizationRegistryCleanupIterator::
- JSFinalizationRegistryCleanupIteratorVerify(Isolate* isolate) {
- CHECK(IsJSFinalizationRegistryCleanupIterator());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, finalization_registry());
-}
-
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSWeakMapVerify(*this, isolate);
CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
@@ -1317,8 +1300,6 @@ void JSDataView::JSDataViewVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(Foreign)
-
void AsyncGeneratorRequest::AsyncGeneratorRequestVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::AsyncGeneratorRequestVerify(*this, isolate);
CHECK_GE(resume_mode(), JSGeneratorObject::kNext);
@@ -1501,8 +1482,6 @@ void StoreHandler::StoreHandlerVerify(Isolate* isolate) {
// TODO(ishell): check handler integrity
}
-USE_TORQUE_VERIFIER(AccessorInfo)
-
void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::CallHandlerInfoVerify(*this, isolate);
CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() ||
@@ -1526,8 +1505,6 @@ void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(nested_site().IsAllocationSite() || nested_site() == Smi::zero());
}
-USE_TORQUE_VERIFIER(AllocationMemento)
-
void Script::ScriptVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ScriptVerify(*this, isolate);
for (int i = 0; i < shared_function_infos().length(); ++i) {
@@ -1571,32 +1548,6 @@ void PreparseData::PreparseDataVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(InterpreterData)
-#ifdef V8_INTL_SUPPORT
-
-USE_TORQUE_VERIFIER(JSV8BreakIterator)
-
-USE_TORQUE_VERIFIER(JSCollator)
-
-USE_TORQUE_VERIFIER(JSDateTimeFormat)
-
-USE_TORQUE_VERIFIER(JSDisplayNames)
-
-USE_TORQUE_VERIFIER(JSListFormat)
-
-USE_TORQUE_VERIFIER(JSLocale)
-
-USE_TORQUE_VERIFIER(JSNumberFormat)
-
-USE_TORQUE_VERIFIER(JSPluralRules)
-
-USE_TORQUE_VERIFIER(JSRelativeTimeFormat)
-
-USE_TORQUE_VERIFIER(JSSegmentIterator)
-
-USE_TORQUE_VERIFIER(JSSegmenter)
-
-#endif // V8_INTL_SUPPORT
-
#endif // VERIFY_HEAP
#ifdef DEBUG
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index e36171edfd..00ef81f56a 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -24,6 +24,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-aggregate-error-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/objects-inl.h"
@@ -69,6 +70,7 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
@@ -457,12 +459,13 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
}
-void PrintEmbedderData(std::ostream& os, EmbedderDataSlot slot) {
+void PrintEmbedderData(const Isolate* isolate, std::ostream& os,
+ EmbedderDataSlot slot) {
DisallowHeapAllocation no_gc;
Object value = slot.load_tagged();
os << Brief(value);
void* raw_pointer;
- if (slot.ToAlignedPointer(&raw_pointer)) {
+ if (slot.ToAlignedPointer(isolate, &raw_pointer)) {
os << ", aligned pointer: " << raw_pointer;
}
}
@@ -566,10 +569,11 @@ static void JSObjectPrintBody(std::ostream& os,
}
int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
+ const Isolate* isolate = GetIsolateForPtrCompr(obj);
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
os << "\n ";
- PrintEmbedderData(os, EmbedderDataSlot(obj, i));
+ PrintEmbedderData(isolate, os, EmbedderDataSlot(obj, i));
}
os << "\n }\n";
}
@@ -639,6 +643,12 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
+void JSAggregateError::JSAggregateErrorPrint(std::ostream& os) {
+ JSObjectPrintHeader(os, *this, "JSAggregateError");
+ os << "\n - errors: " << Brief(errors());
+ JSObjectPrintBody(os, *this);
+}
+
void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSArray");
os << "\n - length: " << Brief(this->length());
@@ -705,12 +715,6 @@ void DescriptorArray::DescriptorArrayPrint(std::ostream& os) {
PrintDescriptors(os);
}
-void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
- std::ostream& os) { // NOLINT
- PrintHeader(os, "AliasedArgumentsEntry");
- os << "\n - aliased_context_slot: " << aliased_context_slot();
-}
-
namespace {
void PrintFixedArrayWithHeader(std::ostream& os, FixedArray array,
const char* type) {
@@ -765,13 +769,14 @@ void PrintWeakArrayElements(std::ostream& os, T* array) {
} // namespace
void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
+ const Isolate* isolate = GetIsolateForPtrCompr(*this);
PrintHeader(os, "EmbedderDataArray");
os << "\n - length: " << length();
EmbedderDataSlot start(*this, 0);
EmbedderDataSlot end(*this, length());
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
os << "\n ";
- PrintEmbedderData(os, slot);
+ PrintEmbedderData(isolate, os, slot);
}
os << "\n";
}
@@ -1187,13 +1192,6 @@ void JSFinalizationRegistry::JSFinalizationRegistryPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSFinalizationRegistryCleanupIterator::
- JSFinalizationRegistryCleanupIteratorPrint(std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSFinalizationRegistryCleanupIterator");
- os << "\n - finalization_registry: " << Brief(finalization_registry());
- JSObjectPrintBody(os, *this);
-}
-
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());
@@ -1384,7 +1382,12 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data());
- os << "\n - code (from data): " << Brief(GetCode());
+ os << "\n - code (from data): ";
+ if (Heap::InOffThreadSpace(*this)) {
+ os << "<not available off-thread>";
+ } else {
+ os << Brief(GetCode());
+ }
PrintSourceCode(os);
// Script files are often large, thus only print their {Brief} representation.
os << "\n - script: " << Brief(script());
@@ -1427,12 +1430,6 @@ void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void Cell::CellPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "Cell");
- os << "\n - value: " << Brief(value());
- os << "\n";
-}
-
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "PropertyCell");
os << "\n - name: ";
@@ -1505,17 +1502,6 @@ void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "AccessorInfo");
- os << "\n - name: " << Brief(name());
- os << "\n - flags: " << flags();
- os << "\n - getter: " << Brief(getter());
- os << "\n - setter: " << Brief(setter());
- os << "\n - js_getter: " << Brief(js_getter());
- os << "\n - data: " << Brief(data());
- os << "\n";
-}
-
void CallbackTask::CallbackTaskPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "CallbackTask");
os << "\n - callback: " << Brief(callback());
@@ -1684,6 +1670,85 @@ void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void WasmStruct::WasmStructPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmStruct");
+ wasm::StructType* struct_type = type();
+ os << "\n - fields (" << struct_type->field_count() << "):";
+ for (uint32_t i = 0; i < struct_type->field_count(); i++) {
+ wasm::ValueType field = struct_type->field(i);
+ os << "\n - " << field.short_name() << ": ";
+ uint32_t field_offset = struct_type->field_offset(i);
+ Address field_address = RawField(field_offset).address();
+ switch (field.kind()) {
+ case wasm::ValueType::kI32:
+ os << base::ReadUnalignedValue<int32_t>(field_address);
+ break;
+ case wasm::ValueType::kI64:
+ os << base::ReadUnalignedValue<int64_t>(field_address);
+ break;
+ case wasm::ValueType::kF32:
+ os << base::ReadUnalignedValue<float>(field_address);
+ break;
+ case wasm::ValueType::kF64:
+ os << base::ReadUnalignedValue<double>(field_address);
+ break;
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef:
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kEqRef:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
+ UNIMPLEMENTED(); // TODO(7748): Implement.
+ break;
+ }
+ }
+ os << "\n";
+}
+
+void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmArray");
+ wasm::ArrayType* array_type = type();
+ uint32_t len = length();
+ os << "\n - type: " << array_type->element_type().type_name();
+ os << "\n - length: " << len;
+ Address data_ptr = ptr() + WasmArray::kHeaderSize - kHeapObjectTag;
+ switch (array_type->element_type().kind()) {
+ case wasm::ValueType::kI32:
+ PrintTypedArrayElements(os, reinterpret_cast<int32_t*>(data_ptr), len,
+ true);
+ break;
+ case wasm::ValueType::kI64:
+ PrintTypedArrayElements(os, reinterpret_cast<int64_t*>(data_ptr), len,
+ true);
+ break;
+ case wasm::ValueType::kF32:
+ PrintTypedArrayElements(os, reinterpret_cast<float*>(data_ptr), len,
+ true);
+ break;
+ case wasm::ValueType::kF64:
+ PrintTypedArrayElements(os, reinterpret_cast<double*>(data_ptr), len,
+ true);
+ break;
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kAnyRef:
+ case wasm::ValueType::kFuncRef:
+ case wasm::ValueType::kNullRef:
+ case wasm::ValueType::kExnRef:
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kEqRef:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
+ UNIMPLEMENTED(); // TODO(7748): Implement.
+ break;
+ }
+ os << "\n";
+}
+
void WasmDebugInfo::WasmDebugInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "WasmDebugInfo");
os << "\n - wasm_instance: " << Brief(wasm_instance());
@@ -1850,15 +1915,6 @@ void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "AccessCheckInfo");
- os << "\n - callback: " << Brief(callback());
- os << "\n - named_interceptor: " << Brief(named_interceptor());
- os << "\n - indexed_interceptor: " << Brief(indexed_interceptor());
- os << "\n - data: " << Brief(data());
- os << "\n";
-}
-
void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "CallHandlerInfo");
os << "\n - callback: " << Brief(callback());
@@ -1869,17 +1925,6 @@ void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void InterceptorInfo::InterceptorInfoPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "InterceptorInfo");
- os << "\n - getter: " << Brief(getter());
- os << "\n - setter: " << Brief(setter());
- os << "\n - query: " << Brief(query());
- os << "\n - deleter: " << Brief(deleter());
- os << "\n - enumerator: " << Brief(enumerator());
- os << "\n - data: " << Brief(data());
- os << "\n";
-}
-
void FunctionTemplateInfo::FunctionTemplateInfoPrint(
std::ostream& os) { // NOLINT
PrintHeader(os, "FunctionTemplateInfo");
@@ -2186,24 +2231,12 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "DebugInfo");
- os << "\n - flags: " << flags();
- os << "\n - debugger_hints: " << debugger_hints();
- os << "\n - shared: " << Brief(shared());
- os << "\n - script: " << Brief(script());
- os << "\n - original bytecode array: " << Brief(original_bytecode_array());
- os << "\n - debug bytecode array: " << Brief(debug_bytecode_array());
- os << "\n - break_points: ";
- break_points().FixedArrayPrint(os);
- os << "\n - coverage_info: " << Brief(coverage_info());
-}
-
void StackTraceFrame::StackTraceFramePrint(std::ostream& os) { // NOLINT
PrintHeader(os, "StackTraceFrame");
os << "\n - frame_index: " << frame_index();
os << "\n - id: " << id();
os << "\n - frame_info: " << Brief(frame_info());
+ os << "\n";
}
void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index 99767f17dc..b682bb8c5a 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -74,6 +74,7 @@ class Decoder {
void Unknown(Instruction* instr);
void UnknownFormat(Instruction* instr, const char* opcname);
+ void DecodeExt0(Instruction* instr);
void DecodeExt1(Instruction* instr);
void DecodeExt2(Instruction* instr);
void DecodeExt3(Instruction* instr);
@@ -219,6 +220,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 6;
}
+ case 'U': { // UIM
+ int32_t value = instr->Bits(20, 16);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 3;
+ }
case 'l': {
// Link (LK) Bit 0
if (instr->Bit(0) == 1) {
@@ -351,6 +357,31 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
Format(instr, buffer);
}
+void Decoder::DecodeExt0(Instruction* instr) {
+ switch (EXT0 | (instr->BitField(10, 0))) {
+ case VSPLTB: {
+ Format(instr, "vspltb 'Dt, 'Db, 'UIM");
+ break;
+ }
+ case VSPLTW: {
+ Format(instr, "vspltw 'Dt, 'Db, 'UIM");
+ break;
+ }
+ case VSPLTH: {
+ Format(instr, "vsplth 'Dt, 'Db, 'UIM");
+ break;
+ }
+ case VSRO: {
+ Format(instr, "vsro 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VOR: {
+ Format(instr, "vor 'Dt, 'Da, 'Db");
+ break;
+ }
+ }
+}
+
void Decoder::DecodeExt1(Instruction* instr) {
switch (EXT1 | (instr->BitField(10, 1))) {
case MCRF: {
@@ -832,6 +863,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "sthux 'rs, 'ra, 'rb");
return;
}
+ case STVX: {
+ Format(instr, "stvx 'Dt, 'ra, 'rb");
+ return;
+ }
case LWZX: {
Format(instr, "lwzx 'rt, 'ra, 'rb");
return;
@@ -876,6 +911,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "lwarx 'rt, 'ra, 'rb");
return;
}
+ case LVX: {
+ Format(instr, "lvx 'Dt, 'ra, 'rb");
+ return;
+ }
#if V8_TARGET_ARCH_PPC64
case LDX: {
Format(instr, "ldx 'rt, 'ra, 'rb");
@@ -1268,6 +1307,10 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
Format(instr, "b'l'a 'target26");
break;
}
+ case EXT0: {
+ DecodeExt0(instr);
+ break;
+ }
case EXT1: {
DecodeExt1(instr);
break;
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index c39adcf710..f3b9a753af 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -16,37 +16,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index d603e6169c..4d0760b17c 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -1490,6 +1490,10 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
+ case 0xD7:
+ AppendToBuffer("vpmovmskb %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
@@ -2124,7 +2128,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
UnimplementedInstruction();
}
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ // Not every opcode here has an XMM register as the dst operand.
+ const char* regop_reg = opcode == 0xD7 ? NameOfCPURegister(regop)
+ : NameOfXMMRegister(regop);
+ AppendToBuffer("%s %s,", mnemonic, regop_reg);
current += PrintRightXMMOperand(current);
if (opcode == 0xC2) {
const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index a409dc97d2..019542b12d 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -4589,7 +4589,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case 0: {
// vmov.i32 Qd, #<imm>
int vd = instr->VFPDRegValue(kSimd128Precision);
- uint64_t imm = instr->Bit(24, 24) << 7; // i
+ uint64_t imm = instr->Bit(24) << 7; // i
imm |= instr->Bits(18, 16) << 4; // imm3
imm |= instr->Bits(3, 0); // imm4
imm |= imm << 32;
@@ -5405,7 +5405,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int Vm = instr->VFPMRegValue(kSimd128Precision);
NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
bool dst_unsigned = instr->Bit(6) != 0;
- bool src_unsigned = instr->Bit(7, 6) == 0b11;
+ bool src_unsigned = instr->Bits(7, 6) == 0b11;
DCHECK_IMPLIES(src_unsigned, dst_unsigned);
switch (size) {
case Neon8: {
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 8c3f774319..4809eeca80 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -258,14 +258,14 @@ class CWasmEntryFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1);
};
-class WasmCompiledFrameConstants : public TypedFrameConstants {
+class WasmFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
DEFINE_TYPED_FRAME_SIZES(1);
};
-class WasmExitFrameConstants : public WasmCompiledFrameConstants {
+class WasmExitFrameConstants : public WasmFrameConstants {
public:
// FP-relative.
static const int kCallingPCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index e73cca4f05..ecd45abeb1 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -236,15 +236,11 @@ inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {}
-inline WasmCompiledFrame::WasmCompiledFrame(StackFrameIteratorBase* iterator)
+inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
inline WasmExitFrame::WasmExitFrame(StackFrameIteratorBase* iterator)
- : WasmCompiledFrame(iterator) {}
-
-inline WasmInterpreterEntryFrame::WasmInterpreterEntryFrame(
- StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : WasmFrame(iterator) {}
inline WasmDebugBreakFrame::WasmDebugBreakFrame(
StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 7d405efa5e..b6fc4cb754 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -21,7 +21,7 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/visitors.h"
-#include "src/snapshot/snapshot.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/strings/string-stream.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
@@ -553,13 +553,11 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (wasm_code != nullptr) {
switch (wasm_code->kind()) {
case wasm::WasmCode::kFunction:
- return WASM_COMPILED;
+ return WASM;
case wasm::WasmCode::kWasmToCapiWrapper:
return WASM_EXIT;
case wasm::WasmCode::kWasmToJsWrapper:
return WASM_TO_JS;
- case wasm::WasmCode::kInterpreterEntry:
- return WASM_INTERPRETER_ENTRY;
default:
UNREACHABLE();
}
@@ -592,7 +590,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case Code::WASM_FUNCTION:
case Code::WASM_TO_CAPI_FUNCTION:
case Code::WASM_TO_JS_FUNCTION:
- case Code::WASM_INTERPRETER_ENTRY:
// Never appear as on-heap {Code} objects.
UNREACHABLE();
default:
@@ -619,7 +616,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case CONSTRUCT:
case ARGUMENTS_ADAPTOR:
case WASM_TO_JS:
- case WASM_COMPILED:
+ case WASM:
case WASM_COMPILE_LAZY:
case WASM_EXIT:
case WASM_DEBUG_BREAK:
@@ -974,18 +971,17 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
break;
case WASM_TO_JS:
- case WASM_COMPILED:
- case WASM_INTERPRETER_ENTRY:
+ case WASM:
case WASM_COMPILE_LAZY:
- frame_header_size = WasmCompiledFrameConstants::kFixedFrameSizeFromFp;
+ frame_header_size = WasmFrameConstants::kFixedFrameSizeFromFp;
break;
case WASM_EXIT:
// The last value in the frame header is the calling PC, which should
// not be visited.
static_assert(WasmExitFrameConstants::kFixedSlotCountFromFp ==
- WasmCompiledFrameConstants::kFixedSlotCountFromFp + 1,
- "WasmExitFrame has one slot more than WasmCompiledFrame");
- frame_header_size = WasmCompiledFrameConstants::kFixedFrameSizeFromFp;
+ WasmFrameConstants::kFixedSlotCountFromFp + 1,
+ "WasmExitFrame has one slot more than WasmFrame");
+ frame_header_size = WasmFrameConstants::kFixedFrameSizeFromFp;
break;
case OPTIMIZED:
case INTERPRETED:
@@ -1396,29 +1392,25 @@ Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
}
FrameSummary::WasmFrameSummary::WasmFrameSummary(
- Isolate* isolate, FrameSummary::Kind kind,
- Handle<WasmInstanceObject> instance, bool at_to_number_conversion)
- : FrameSummaryBase(isolate, kind),
+ Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::WasmCode* code,
+ int code_offset, bool at_to_number_conversion)
+ : FrameSummaryBase(isolate, WASM),
wasm_instance_(instance),
- at_to_number_conversion_(at_to_number_conversion) {}
+ at_to_number_conversion_(at_to_number_conversion),
+ code_(code),
+ code_offset_(code_offset) {}
Handle<Object> FrameSummary::WasmFrameSummary::receiver() const {
return wasm_instance_->GetIsolate()->global_proxy();
}
-#define WASM_SUMMARY_DISPATCH(type, name) \
- type FrameSummary::WasmFrameSummary::name() const { \
- DCHECK(kind() == Kind::WASM_COMPILED || kind() == Kind::WASM_INTERPRETED); \
- return kind() == Kind::WASM_COMPILED \
- ? static_cast<const WasmCompiledFrameSummary*>(this)->name() \
- : static_cast<const WasmInterpretedFrameSummary*>(this) \
- ->name(); \
- }
-
-WASM_SUMMARY_DISPATCH(uint32_t, function_index)
-WASM_SUMMARY_DISPATCH(int, byte_offset)
+uint32_t FrameSummary::WasmFrameSummary::function_index() const {
+ return code()->index();
+}
-#undef WASM_SUMMARY_DISPATCH
+int FrameSummary::WasmFrameSummary::byte_offset() const {
+ return code_->GetSourcePositionBefore(code_offset());
+}
int FrameSummary::WasmFrameSummary::SourcePosition() const {
const wasm::WasmModule* module = wasm_instance()->module_object().module();
@@ -1442,42 +1434,6 @@ Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
return handle(wasm_instance()->native_context(), isolate());
}
-FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
- Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::WasmCode* code,
- int code_offset, bool at_to_number_conversion)
- : WasmFrameSummary(isolate, WASM_COMPILED, instance,
- at_to_number_conversion),
- code_(code),
- code_offset_(code_offset) {}
-
-uint32_t FrameSummary::WasmCompiledFrameSummary::function_index() const {
- return code()->index();
-}
-
-int FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- const wasm::WasmCode* code, int offset) {
- int position = 0;
- // Subtract one because the current PC is one instruction after the call site.
- offset--;
- for (SourcePositionTableIterator iterator(code->source_positions());
- !iterator.done() && iterator.code_offset() <= offset;
- iterator.Advance()) {
- position = iterator.source_position().ScriptOffset();
- }
- return position;
-}
-
-int FrameSummary::WasmCompiledFrameSummary::byte_offset() const {
- return GetWasmSourcePosition(code_, code_offset());
-}
-
-FrameSummary::WasmInterpretedFrameSummary::WasmInterpretedFrameSummary(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t function_index, int byte_offset)
- : WasmFrameSummary(isolate, WASM_INTERPRETED, instance, false),
- function_index_(function_index),
- byte_offset_(byte_offset) {}
-
FrameSummary::~FrameSummary() {
#define FRAME_SUMMARY_DESTR(kind, type, field, desc) \
case kind: \
@@ -1517,19 +1473,16 @@ FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
return frames[index];
}
-#define FRAME_SUMMARY_DISPATCH(ret, name) \
- ret FrameSummary::name() const { \
- switch (base_.kind()) { \
- case JAVA_SCRIPT: \
- return java_script_summary_.name(); \
- case WASM_COMPILED: \
- return wasm_compiled_summary_.name(); \
- case WASM_INTERPRETED: \
- return wasm_interpreted_summary_.name(); \
- default: \
- UNREACHABLE(); \
- return ret{}; \
- } \
+#define FRAME_SUMMARY_DISPATCH(ret, name) \
+ ret FrameSummary::name() const { \
+ switch (base_.kind()) { \
+ case JAVA_SCRIPT: \
+ return java_script_summary_.name(); \
+ case WASM: \
+ return wasm_summary_.name(); \
+ default: \
+ UNREACHABLE(); \
+ } \
}
FRAME_SUMMARY_DISPATCH(Handle<Object>, receiver)
@@ -1857,8 +1810,8 @@ Address InternalFrame::GetCallerStackPointer() const {
Code InternalFrame::unchecked_code() const { return Code(); }
-void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
- int index) const {
+void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
PrintIndex(accumulator, mode, index);
accumulator->Add("WASM [");
accumulator->PrintName(script().name());
@@ -1885,51 +1838,61 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
-Code WasmCompiledFrame::unchecked_code() const {
+Code WasmFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-void WasmCompiledFrame::Iterate(RootVisitor* v) const {
- IterateCompiledFrame(v);
-}
+void WasmFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-Address WasmCompiledFrame::GetCallerStackPointer() const {
+Address WasmFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-wasm::WasmCode* WasmCompiledFrame::wasm_code() const {
+wasm::WasmCode* WasmFrame::wasm_code() const {
return isolate()->wasm_engine()->code_manager()->LookupCode(pc());
}
-WasmInstanceObject WasmCompiledFrame::wasm_instance() const {
- const int offset = WasmCompiledFrameConstants::kWasmInstanceOffset;
+WasmInstanceObject WasmFrame::wasm_instance() const {
+ const int offset = WasmFrameConstants::kWasmInstanceOffset;
Object instance(Memory<Address>(fp() + offset));
return WasmInstanceObject::cast(instance);
}
-wasm::NativeModule* WasmCompiledFrame::native_module() const {
+wasm::NativeModule* WasmFrame::native_module() const {
return module_object().native_module();
}
-WasmModuleObject WasmCompiledFrame::module_object() const {
+WasmModuleObject WasmFrame::module_object() const {
return wasm_instance().module_object();
}
-uint32_t WasmCompiledFrame::function_index() const {
- return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
+uint32_t WasmFrame::function_index() const {
+ return FrameSummary::GetSingle(this).AsWasm().function_index();
}
-Script WasmCompiledFrame::script() const { return module_object().script(); }
+Script WasmFrame::script() const { return module_object().script(); }
-int WasmCompiledFrame::position() const {
- return FrameSummary::GetSingle(this).SourcePosition();
+int WasmFrame::position() const {
+ wasm::WasmCodeRefScope code_ref_scope;
+ const wasm::WasmModule* module = wasm_instance().module_object().module();
+ return GetSourcePosition(module, function_index(), byte_offset(),
+ at_to_number_conversion());
+}
+
+int WasmFrame::byte_offset() const {
+ wasm::WasmCode* code = wasm_code();
+ int offset = static_cast<int>(pc() - code->instruction_start());
+ return code->GetSourcePositionBefore(offset);
}
-Object WasmCompiledFrame::context() const {
- return wasm_instance().native_context();
+bool WasmFrame::is_inspectable() const {
+ wasm::WasmCodeRefScope code_ref_scope;
+ return wasm_code()->is_inspectable();
}
-void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
+Object WasmFrame::context() const { return wasm_instance().native_context(); }
+
+void WasmFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
// The {WasmCode*} escapes this scope via the {FrameSummary}, which is fine,
// since this code object is part of our stack.
@@ -1937,12 +1900,12 @@ void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
wasm::WasmCode* code = wasm_code();
int offset = static_cast<int>(pc() - code->instruction_start());
Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
- FrameSummary::WasmCompiledFrameSummary summary(
- isolate(), instance, code, offset, at_to_number_conversion());
+ FrameSummary::WasmFrameSummary summary(isolate(), instance, code, offset,
+ at_to_number_conversion());
functions->push_back(summary);
}
-bool WasmCompiledFrame::at_to_number_conversion() const {
+bool WasmFrame::at_to_number_conversion() const {
// Check whether our callee is a WASM_TO_JS frame, and this frame is at the
// ToNumber conversion call.
wasm::WasmCode* code =
@@ -1951,14 +1914,14 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
: nullptr;
if (!code || code->kind() != wasm::WasmCode::kWasmToJsWrapper) return false;
int offset = static_cast<int>(callee_pc() - code->instruction_start());
- int pos = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- code, offset);
- DCHECK(pos == 0 || pos == 1);
+ int pos = code->GetSourcePositionBefore(offset);
// The imported call has position 0, ToNumber has position 1.
- return !!pos;
+ // If there is no source position available, this is also not a ToNumber call.
+ DCHECK(pos == wasm::kNoCodePosition || pos == 0 || pos == 1);
+ return pos == 1;
}
-int WasmCompiledFrame::LookupExceptionHandlerInTable() {
+int WasmFrame::LookupExceptionHandlerInTable() {
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_size() > 0) {
@@ -1970,70 +1933,6 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable() {
return -1;
}
-void WasmInterpreterEntryFrame::Iterate(RootVisitor* v) const {
- IterateCompiledFrame(v);
-}
-
-void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
- int index) const {
- PrintIndex(accumulator, mode, index);
- accumulator->Add("WASM INTERPRETER ENTRY [");
- Script script = this->script();
- accumulator->PrintName(script.name());
- accumulator->Add("]");
- if (mode != OVERVIEW) accumulator->Add("\n");
-}
-
-void WasmInterpreterEntryFrame::Summarize(
- std::vector<FrameSummary>* functions) const {
- Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
- std::vector<std::pair<uint32_t, int>> interpreted_stack =
- instance->debug_info().GetInterpretedStack(fp());
-
- for (auto& e : interpreted_stack) {
- FrameSummary::WasmInterpretedFrameSummary summary(isolate(), instance,
- e.first, e.second);
- functions->push_back(summary);
- }
-}
-
-Code WasmInterpreterEntryFrame::unchecked_code() const { return Code(); }
-
-int WasmInterpreterEntryFrame::NumberOfActiveFrames() const {
- Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
- return instance->debug_info().NumberOfActiveFrames(fp());
-}
-
-WasmInstanceObject WasmInterpreterEntryFrame::wasm_instance() const {
- const int offset = WasmCompiledFrameConstants::kWasmInstanceOffset;
- Object instance(Memory<Address>(fp() + offset));
- return WasmInstanceObject::cast(instance);
-}
-
-WasmDebugInfo WasmInterpreterEntryFrame::debug_info() const {
- return wasm_instance().debug_info();
-}
-
-WasmModuleObject WasmInterpreterEntryFrame::module_object() const {
- return wasm_instance().module_object();
-}
-
-Script WasmInterpreterEntryFrame::script() const {
- return module_object().script();
-}
-
-int WasmInterpreterEntryFrame::position() const {
- return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
-}
-
-Object WasmInterpreterEntryFrame::context() const {
- return wasm_instance().native_context();
-}
-
-Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
// Nothing to iterate here. This will change once we support references in
// Liftoff.
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index bd50cda8f8..cd0156a887 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -66,10 +66,9 @@ class StackHandler {
V(CONSTRUCT_ENTRY, ConstructEntryFrame) \
V(EXIT, ExitFrame) \
V(OPTIMIZED, OptimizedFrame) \
- V(WASM_COMPILED, WasmCompiledFrame) \
+ V(WASM, WasmFrame) \
V(WASM_TO_JS, WasmToJsFrame) \
V(JS_TO_WASM, JsToWasmFrame) \
- V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
V(WASM_DEBUG_BREAK, WasmDebugBreakFrame) \
V(C_WASM_ENTRY, CWasmEntryFrame) \
V(WASM_EXIT, WasmExitFrame) \
@@ -181,12 +180,9 @@ class StackFrame {
bool is_exit() const { return type() == EXIT; }
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
- bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
+ bool is_wasm() const { return this->type() == WASM; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
bool is_wasm_debug_break() const { return type() == WASM_DEBUG_BREAK; }
- bool is_wasm_interpreter_entry() const {
- return type() == WASM_INTERPRETER_ENTRY;
- }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_builtin() const { return type() == BUILTIN; }
bool is_internal() const { return type() == INTERNAL; }
@@ -209,10 +205,6 @@ class StackFrame {
(type == JAVA_SCRIPT_BUILTIN_CONTINUATION) ||
(type == JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH);
}
- bool is_wasm() const {
- Type type = this->type();
- return type == WASM_COMPILED || type == WASM_INTERPRETER_ENTRY;
- }
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
// Accessors.
@@ -458,12 +450,9 @@ class StandardFrame;
class V8_EXPORT_PRIVATE FrameSummary {
public:
// Subclasses for the different summary kinds:
-#define FRAME_SUMMARY_VARIANTS(F) \
- F(JAVA_SCRIPT, JavaScriptFrameSummary, java_script_summary_, JavaScript) \
- F(WASM_COMPILED, WasmCompiledFrameSummary, wasm_compiled_summary_, \
- WasmCompiled) \
- F(WASM_INTERPRETED, WasmInterpretedFrameSummary, wasm_interpreted_summary_, \
- WasmInterpreted)
+#define FRAME_SUMMARY_VARIANTS(F) \
+ F(JAVA_SCRIPT, JavaScriptFrameSummary, java_script_summary_, JavaScript) \
+ F(WASM, WasmFrameSummary, wasm_summary_, Wasm)
#define FRAME_SUMMARY_KIND(kind, type, field, desc) kind,
enum Kind { FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_KIND) };
@@ -514,14 +503,15 @@ class V8_EXPORT_PRIVATE FrameSummary {
};
class WasmFrameSummary : public FrameSummaryBase {
- protected:
- WasmFrameSummary(Isolate*, Kind, Handle<WasmInstanceObject>,
- bool at_to_number_conversion);
-
public:
+ WasmFrameSummary(Isolate*, Handle<WasmInstanceObject>, wasm::WasmCode*,
+ int code_offset, bool at_to_number_conversion);
+
Handle<Object> receiver() const;
uint32_t function_index() const;
- int byte_offset() const;
+ wasm::WasmCode* code() const { return code_; }
+ int code_offset() const { return code_offset_; }
+ V8_EXPORT_PRIVATE int byte_offset() const;
bool is_constructor() const { return false; }
bool is_subject_to_debugging() const { return true; }
int SourcePosition() const;
@@ -535,37 +525,10 @@ class V8_EXPORT_PRIVATE FrameSummary {
private:
Handle<WasmInstanceObject> wasm_instance_;
bool at_to_number_conversion_;
- };
-
- class WasmCompiledFrameSummary : public WasmFrameSummary {
- public:
- WasmCompiledFrameSummary(Isolate*, Handle<WasmInstanceObject>,
- wasm::WasmCode*, int code_offset,
- bool at_to_number_conversion);
- uint32_t function_index() const;
- wasm::WasmCode* code() const { return code_; }
- int code_offset() const { return code_offset_; }
- int byte_offset() const;
- static int GetWasmSourcePosition(const wasm::WasmCode* code, int offset);
-
- private:
wasm::WasmCode* const code_;
int code_offset_;
};
- class WasmInterpretedFrameSummary : public WasmFrameSummary {
- public:
- WasmInterpretedFrameSummary(Isolate*, Handle<WasmInstanceObject>,
- uint32_t function_index, int byte_offset);
- uint32_t function_index() const { return function_index_; }
- int code_offset() const { return byte_offset_; }
- int byte_offset() const { return byte_offset_; }
-
- private:
- uint32_t function_index_;
- int byte_offset_;
- };
-
#define FRAME_SUMMARY_CONS(kind, type, field, desc) \
FrameSummary(type summ) : field(summ) {} // NOLINT
FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CONS)
@@ -601,12 +564,6 @@ class V8_EXPORT_PRIVATE FrameSummary {
FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CAST)
#undef FRAME_SUMMARY_CAST
- bool IsWasm() const { return IsWasmCompiled() || IsWasmInterpreted(); }
- const WasmFrameSummary& AsWasm() const {
- if (IsWasmCompiled()) return AsWasmCompiled();
- return AsWasmInterpreted();
- }
-
private:
#define FRAME_SUMMARY_FIELD(kind, type, field, desc) type field;
union {
@@ -938,9 +895,9 @@ class BuiltinFrame final : public JavaScriptFrame {
friend class StackFrameIteratorBase;
};
-class WasmCompiledFrame : public StandardFrame {
+class WasmFrame : public StandardFrame {
public:
- Type type() const override { return WASM_COMPILED; }
+ Type type() const override { return WASM; }
// GC support.
void Iterate(RootVisitor* v) const override;
@@ -956,24 +913,28 @@ class WasmCompiledFrame : public StandardFrame {
Code unchecked_code() const override;
// Accessors.
- WasmInstanceObject wasm_instance() const;
- wasm::NativeModule* native_module() const;
+ V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const;
+ V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const;
wasm::WasmCode* wasm_code() const;
uint32_t function_index() const;
Script script() const override;
+ // Byte position in the module, or asm.js source position.
int position() const override;
Object context() const override;
bool at_to_number_conversion() const;
+ // Byte offset in the function.
+ int byte_offset() const;
+ bool is_inspectable() const;
void Summarize(std::vector<FrameSummary>* frames) const override;
- static WasmCompiledFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_wasm_compiled());
- return static_cast<WasmCompiledFrame*>(frame);
+ static WasmFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_wasm());
+ return static_cast<WasmFrame*>(frame);
}
protected:
- inline explicit WasmCompiledFrame(StackFrameIteratorBase* iterator);
+ inline explicit WasmFrame(StackFrameIteratorBase* iterator);
Address GetCallerStackPointer() const override;
@@ -982,7 +943,7 @@ class WasmCompiledFrame : public StandardFrame {
WasmModuleObject module_object() const;
};
-class WasmExitFrame : public WasmCompiledFrame {
+class WasmExitFrame : public WasmFrame {
public:
Type type() const override { return WASM_EXIT; }
static Address ComputeStackPointer(Address fp);
@@ -994,46 +955,6 @@ class WasmExitFrame : public WasmCompiledFrame {
friend class StackFrameIteratorBase;
};
-class WasmInterpreterEntryFrame final : public StandardFrame {
- public:
- Type type() const override { return WASM_INTERPRETER_ENTRY; }
-
- // GC support.
- void Iterate(RootVisitor* v) const override;
-
- // Printing support.
- void Print(StringStream* accumulator, PrintMode mode,
- int index) const override;
-
- void Summarize(std::vector<FrameSummary>* frames) const override;
-
- // Determine the code for the frame.
- Code unchecked_code() const override;
-
- // Accessors.
- int NumberOfActiveFrames() const;
- WasmDebugInfo debug_info() const;
- WasmInstanceObject wasm_instance() const;
-
- Script script() const override;
- int position() const override;
- Object context() const override;
-
- static WasmInterpreterEntryFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_wasm_interpreter_entry());
- return static_cast<WasmInterpreterEntryFrame*>(frame);
- }
-
- protected:
- inline explicit WasmInterpreterEntryFrame(StackFrameIteratorBase* iterator);
-
- Address GetCallerStackPointer() const override;
-
- private:
- friend class StackFrameIteratorBase;
- WasmModuleObject module_object() const;
-};
-
class WasmDebugBreakFrame final : public StandardFrame {
public:
Type type() const override { return WASM_DEBUG_BREAK; }
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 9861454d8c..3f815e24ca 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -188,7 +188,7 @@ Object FutexEmulation::Wait(Isolate* isolate,
return isolate->PromoteScheduledException();
}
- Object result;
+ Handle<Object> result;
AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
do { // Not really a loop, just makes it easier to break out early.
@@ -206,7 +206,7 @@ Object FutexEmulation::Wait(Isolate* isolate,
T* p = reinterpret_cast<T*>(static_cast<int8_t*>(backing_store) + addr);
if (*p != value) {
- result = Smi::FromInt(WaitReturnValue::kNotEqual);
+ result = handle(Smi::FromInt(WaitReturnValue::kNotEqual), isolate);
callback_result = AtomicsWaitEvent::kNotEqual;
break;
}
@@ -244,7 +244,7 @@ Object FutexEmulation::Wait(Isolate* isolate,
if (interrupted) {
Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
if (interrupt_object.IsException(isolate)) {
- result = interrupt_object;
+ result = handle(interrupt_object, isolate);
callback_result = AtomicsWaitEvent::kTerminatedExecution;
mutex_.Pointer()->Lock();
break;
@@ -264,7 +264,7 @@ Object FutexEmulation::Wait(Isolate* isolate,
}
if (!node->waiting_) {
- result = Smi::FromInt(WaitReturnValue::kOk);
+ result = handle(Smi::FromInt(WaitReturnValue::kOk), isolate);
break;
}
@@ -272,7 +272,7 @@ Object FutexEmulation::Wait(Isolate* isolate,
if (use_timeout) {
current_time = base::TimeTicks::Now();
if (current_time >= timeout_time) {
- result = Smi::FromInt(WaitReturnValue::kTimedOut);
+ result = handle(Smi::FromInt(WaitReturnValue::kTimedOut), isolate);
callback_result = AtomicsWaitEvent::kTimedOut;
break;
}
@@ -297,10 +297,10 @@ Object FutexEmulation::Wait(Isolate* isolate,
if (isolate->has_scheduled_exception()) {
CHECK_NE(callback_result, AtomicsWaitEvent::kTerminatedExecution);
- result = isolate->PromoteScheduledException();
+ result = handle(isolate->PromoteScheduledException(), isolate);
}
- return result;
+ return *result;
}
Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 033d23d85b..bea08a16b8 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -42,6 +42,7 @@
#include "src/execution/simulator.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/stub-cache.h"
@@ -79,6 +80,7 @@
#include "src/tasks/cancelable-task.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/utils/address-map.h"
#include "src/utils/ostreams.h"
#include "src/utils/version.h"
#include "src/wasm/wasm-code-manager.h"
@@ -620,16 +622,16 @@ class FrameArrayBuilder {
if (is_constructor) flags |= FrameArray::kIsConstructor;
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
- if (V8_UNLIKELY(FLAG_detailed_error_stack_trace))
+ if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
parameters = summary.parameters();
+ }
elements_ = FrameArray::AppendJSFrame(
elements_, TheHoleToUndefined(isolate_, summary.receiver()), function,
abstract_code, offset, flags, parameters);
}
- void AppendWasmCompiledFrame(
- FrameSummary::WasmCompiledFrameSummary const& summary) {
+ void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
@@ -639,7 +641,7 @@ class FrameArrayBuilder {
flags |= FrameArray::kAsmJsAtNumberConversion;
}
} else {
- flags |= FrameArray::kIsWasmCompiledFrame;
+ flags |= FrameArray::kIsWasmFrame;
}
elements_ = FrameArray::AppendWasmFrame(
@@ -647,16 +649,6 @@ class FrameArrayBuilder {
summary.code_offset(), flags);
}
- void AppendWasmInterpretedFrame(
- FrameSummary::WasmInterpretedFrameSummary const& summary) {
- Handle<WasmInstanceObject> instance = summary.wasm_instance();
- int flags = FrameArray::kIsWasmInterpretedFrame;
- DCHECK(!instance->module_object().is_asm_js());
- elements_ = FrameArray::AppendWasmFrame(elements_, instance,
- summary.function_index(), {},
- summary.byte_offset(), flags);
- }
-
void AppendBuiltinExitFrame(BuiltinExitFrame* exit_frame) {
Handle<JSFunction> function = handle(exit_frame->function(), isolate_);
@@ -949,8 +941,7 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
case StackFrame::BUILTIN:
- case StackFrame::WASM_COMPILED:
- case StackFrame::WASM_INTERPRETER_ENTRY: {
+ case StackFrame::WASM: {
// A standard frame may include many summarized frames (due to
// inlining).
std::vector<FrameSummary> frames;
@@ -968,18 +959,12 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
//=========================================================
auto const& java_script = summary.AsJavaScript();
builder.AppendJavaScriptFrame(java_script);
- } else if (summary.IsWasmCompiled()) {
- //=========================================================
- // Handle a Wasm compiled frame.
- //=========================================================
- auto const& wasm_compiled = summary.AsWasmCompiled();
- builder.AppendWasmCompiledFrame(wasm_compiled);
- } else if (summary.IsWasmInterpreted()) {
+ } else if (summary.IsWasm()) {
//=========================================================
- // Handle a Wasm interpreted frame.
+ // Handle a Wasm frame.
//=========================================================
- auto const& wasm_interpreted = summary.AsWasmInterpreted();
- builder.AppendWasmInterpretedFrame(wasm_interpreted);
+ auto const& wasm = summary.AsWasm();
+ builder.AppendWasmFrame(wasm);
}
}
break;
@@ -1061,7 +1046,6 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
}
- // TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
return builder.GetElementsAsStackTraceFrameArray();
}
@@ -1668,7 +1652,7 @@ Object Isolate::UnwindAndFindHandler() {
code.constant_pool(), return_sp, frame->fp());
}
- case StackFrame::WASM_COMPILED: {
+ case StackFrame::WASM: {
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
}
@@ -1680,7 +1664,7 @@ Object Isolate::UnwindAndFindHandler() {
// the code. It's not actually necessary to keep the code alive as it's
// currently being executed.
wasm::WasmCodeRefScope code_ref_scope;
- WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
+ WasmFrame* wasm_frame = static_cast<WasmFrame*>(frame);
wasm::WasmCode* wasm_code =
wasm_engine()->code_manager()->LookupCode(frame->pc());
int offset = wasm_frame->LookupExceptionHandlerInTable();
@@ -1806,12 +1790,6 @@ Object Isolate::UnwindAndFindHandler() {
}
break;
- case StackFrame::WASM_INTERPRETER_ENTRY: {
- if (trap_handler::IsThreadInWasm()) {
- trap_handler::ClearThreadInWasm();
- }
- } break;
-
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
// Builtin continuation frames with catch can handle exceptions.
if (!catchable_by_js) break;
@@ -2114,14 +2092,13 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
elements->Flags(i).value() & FrameArray::kAsmJsAtNumberConversion;
- if (elements->IsWasmCompiledFrame(i) || elements->IsAsmJsWasmFrame(i)) {
+ if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
// WasmCode* held alive by the {GlobalWasmCodeRef}.
wasm::WasmCode* code =
Managed<wasm::GlobalWasmCodeRef>::cast(elements->WasmCodeObject(i))
.get()
->code();
- offset = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- code, offset);
+ offset = code->GetSourcePositionBefore(offset);
}
Handle<WasmInstanceObject> instance(elements->WasmInstance(i), this);
const wasm::WasmModule* module = elements->WasmInstance(i).module();
@@ -2527,6 +2504,14 @@ bool Isolate::AreWasmThreadsEnabled(Handle<Context> context) {
return FLAG_experimental_wasm_threads;
}
+bool Isolate::IsWasmSimdEnabled(Handle<Context> context) {
+ if (wasm_simd_enabled_callback()) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ return wasm_simd_enabled_callback()(api_context);
+ }
+ return FLAG_experimental_wasm_simd;
+}
+
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptFrameIterator it(this);
@@ -2777,7 +2762,11 @@ void Isolate::Delete(Isolate* isolate) {
SetIsolateThreadLocals(saved_isolate, saved_data);
}
-void Isolate::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
+void Isolate::SetUpFromReadOnlyArtifacts(
+ std::shared_ptr<ReadOnlyArtifacts> artifacts) {
+ artifacts_ = artifacts;
+ DCHECK_NOT_NULL(artifacts);
+ ReadOnlyHeap* ro_heap = artifacts->read_only_heap();
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
read_only_heap_ = ro_heap;
@@ -2798,6 +2787,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
+ persistent_handles_list_(new PersistentHandlesList(this)),
jitless_(FLAG_jitless),
#if V8_SFI_HAS_UNIQUE_ID
next_unique_sfi_id_(0),
@@ -3354,8 +3344,8 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
}
if (create_heap_objects) {
- // Terminate the partial snapshot cache so we can iterate.
- partial_snapshot_cache_.push_back(ReadOnlyRoots(this).undefined_value());
+ // Terminate the startup object cache so we can iterate.
+ startup_object_cache_.push_back(ReadOnlyRoots(this).undefined_value());
}
InitializeThreadLocal();
@@ -3595,6 +3585,10 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
}
}
+std::unique_ptr<PersistentHandles> Isolate::NewPersistentHandles() {
+ return std::make_unique<PersistentHandles>(this);
+}
+
void Isolate::DumpAndResetStats() {
if (turbo_statistics() != nullptr) {
DCHECK(FLAG_turbo_stats || FLAG_turbo_stats_nvp);
@@ -3958,21 +3952,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
-void Isolate::SetHostCleanupFinalizationGroupCallback(
- HostCleanupFinalizationGroupCallback callback) {
- host_cleanup_finalization_group_callback_ = callback;
-}
-
-void Isolate::RunHostCleanupFinalizationGroupCallback(
- Handle<JSFinalizationRegistry> fr) {
- if (host_cleanup_finalization_group_callback_ != nullptr) {
- v8::Local<v8::Context> api_context =
- v8::Utils::ToLocal(handle(Context::cast(fr->native_context()), this));
- host_cleanup_finalization_group_callback_(api_context,
- v8::Utils::ToLocal(fr));
- }
-}
-
void Isolate::SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback) {
host_import_module_dynamically_callback_ = callback;
@@ -3980,19 +3959,17 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
Handle<SourceTextModule> module) {
- Handle<HeapObject> host_meta(module->import_meta(), this);
- if (host_meta->IsTheHole(this)) {
- host_meta = factory()->NewJSObjectWithNullProto();
- if (host_initialize_import_meta_object_callback_ != nullptr) {
- v8::Local<v8::Context> api_context =
- v8::Utils::ToLocal(Handle<Context>(native_context()));
- host_initialize_import_meta_object_callback_(
- api_context, Utils::ToLocal(Handle<Module>::cast(module)),
- v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(host_meta)));
- }
- module->set_import_meta(*host_meta);
+ CHECK(module->import_meta().IsTheHole(this));
+ Handle<JSObject> import_meta = factory()->NewJSObjectWithNullProto();
+ if (host_initialize_import_meta_object_callback_ != nullptr) {
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(Handle<Context>(native_context()));
+ host_initialize_import_meta_object_callback_(
+ api_context, Utils::ToLocal(Handle<Module>::cast(module)),
+ v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(import_meta)));
+ CHECK(!has_scheduled_exception());
}
- return Handle<JSObject>::cast(host_meta);
+ return import_meta;
}
void Isolate::SetHostInitializeImportMetaObjectCallback(
@@ -4183,9 +4160,15 @@ void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
}
void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
- // The counter callback may cause the embedder to call into V8, which is not
- // generally possible during GC.
- if (heap_.gc_state() == Heap::NOT_IN_GC) {
+ // The counter callback
+ // - may cause the embedder to call into V8, which is not generally possible
+ // during GC.
+ // - requires a current native context, which may not always exist.
+ // TODO(jgruber): Consider either removing the native context requirement in
+ // blink, or passing it to the callback explicitly.
+ if (heap_.gc_state() == Heap::NOT_IN_GC && !context().is_null()) {
+ DCHECK(context().IsContext());
+ DCHECK(context().native_context().IsNativeContext());
if (use_counter_callback_) {
HandleScope handle_scope(this);
use_counter_callback_(reinterpret_cast<v8::Isolate*>(this), feature);
@@ -4356,6 +4339,9 @@ void Isolate::set_icu_object_in_cache(ICUObjectCacheType cache_type,
void Isolate::clear_cached_icu_object(ICUObjectCacheType cache_type) {
icu_object_cache_.erase(cache_type);
}
+
+void Isolate::ClearCachedIcuObjects() { icu_object_cache_.clear(); }
+
#endif // V8_INTL_SUPPORT
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index edf9a1a95a..de00d862a3 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -85,6 +85,9 @@ class MaterializedObjectStore;
class Microtask;
class MicrotaskQueue;
class OptimizingCompileDispatcher;
+class PersistentHandles;
+class PersistentHandlesList;
+class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
class RegExpStack;
class RootVisitor;
@@ -405,6 +408,7 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
+ V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -522,7 +526,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
- void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
+ void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
@@ -618,6 +622,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
inline void clear_pending_exception();
bool AreWasmThreadsEnabled(Handle<Context> context);
+ bool IsWasmSimdEnabled(Handle<Context> context);
THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
@@ -1169,6 +1174,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void set_icu_object_in_cache(ICUObjectCacheType cache_type,
std::shared_ptr<icu::UMemory> obj);
void clear_cached_icu_object(ICUObjectCacheType cache_type);
+ void ClearCachedIcuObjects();
#endif // V8_INTL_SUPPORT
@@ -1198,6 +1204,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
+ std::unique_ptr<PersistentHandles> NewPersistentHandles();
+
+ PersistentHandlesList* persistent_handles_list() {
+ return persistent_handles_list_.get();
+ }
+
#ifdef DEBUG
bool IsDeferredHandle(Address* location);
#endif // DEBUG
@@ -1334,9 +1346,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
- std::vector<Object>* partial_snapshot_cache() {
- return &partial_snapshot_cache_;
- }
+ std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
bool IsGeneratingEmbeddedBuiltins() const {
return builtins_constants_table_builder() != nullptr;
@@ -1406,14 +1416,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsInAnyContext(Object object, uint32_t index);
void ClearKeptObjects();
- void SetHostCleanupFinalizationGroupCallback(
- HostCleanupFinalizationGroupCallback callback);
- HostCleanupFinalizationGroupCallback
- host_cleanup_finalization_group_callback() const {
- return host_cleanup_finalization_group_callback_;
- }
- void RunHostCleanupFinalizationGroupCallback(
- Handle<JSFinalizationRegistry> fr);
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
@@ -1621,6 +1623,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
ReadOnlyHeap* read_only_heap_ = nullptr;
+ std::shared_ptr<ReadOnlyArtifacts> artifacts_;
const int id_;
EntryStackItem* entry_stack_ = nullptr;
@@ -1669,8 +1672,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
void* atomics_wait_callback_data_ = nullptr;
PromiseHook promise_hook_ = nullptr;
- HostCleanupFinalizationGroupCallback
- host_cleanup_finalization_group_callback_ = nullptr;
HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
nullptr;
HostInitializeImportMetaObjectCallback
@@ -1762,6 +1763,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
DeferredHandles* deferred_handles_head_ = nullptr;
OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
+ std::unique_ptr<PersistentHandlesList> persistent_handles_list_;
+
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_ = 0;
@@ -1783,7 +1786,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
- std::vector<Object> partial_snapshot_cache_;
+ std::vector<Object> startup_object_cache_;
// Used during builtins compilation to build the builtins constants table,
// which is stored on the root list prior to serialization.
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index d1a76f654c..33a2fa99ba 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -87,7 +87,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
int bytecode_offset = -1;
Handle<Script> script_handle = isolate->factory()->empty_script();
Handle<SharedFunctionInfo> shared_info;
- if (location != nullptr) {
+ if (location != nullptr && !FLAG_correctness_fuzzer_suppressions) {
start = location->start_pos();
end = location->end_pos();
script_handle = location->script();
@@ -547,19 +547,14 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
// This function is called for compiled and interpreted wasm frames, and for
// asm.js->wasm frames.
DCHECK(array->IsWasmFrame(frame_ix) ||
- array->IsWasmInterpretedFrame(frame_ix) ||
array->IsAsmJsWasmFrame(frame_ix));
isolate_ = isolate;
wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
wasm_func_index_ = array->WasmFunctionIndex(frame_ix).value();
- if (array->IsWasmInterpretedFrame(frame_ix)) {
- code_ = nullptr;
- } else {
- // The {WasmCode*} is held alive by the {GlobalWasmCodeRef}.
- auto global_wasm_code_ref =
- Managed<wasm::GlobalWasmCodeRef>::cast(array->WasmCodeObject(frame_ix));
- code_ = global_wasm_code_ref.get()->code();
- }
+ // The {WasmCode*} is held alive by the {GlobalWasmCodeRef}.
+ auto global_wasm_code_ref =
+ Managed<wasm::GlobalWasmCodeRef>::cast(array->WasmCodeObject(frame_ix));
+ code_ = global_wasm_code_ref.get()->code();
offset_ = array->Offset(frame_ix).value();
}
@@ -601,10 +596,7 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
Handle<Object> WasmStackFrame::GetWasmInstance() { return wasm_instance_; }
int WasmStackFrame::GetPosition() const {
- return IsInterpreted()
- ? offset_
- : FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- code_, offset_);
+ return IsInterpreted() ? offset_ : code_->GetSourcePositionBefore(offset_);
}
int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
@@ -657,9 +649,7 @@ Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
int AsmJsWasmStackFrame::GetPosition() const {
DCHECK_LE(0, offset_);
- int byte_offset =
- FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code_,
- offset_);
+ int byte_offset = code_->GetSourcePositionBefore(offset_);
const wasm::WasmModule* module = wasm_instance_->module();
return GetSourcePosition(module, wasm_func_index_, byte_offset,
is_at_number_conversion_);
@@ -692,21 +682,15 @@ void FrameArrayIterator::Advance() { frame_ix_++; }
StackFrameBase* FrameArrayIterator::Frame() {
DCHECK(HasFrame());
const int flags = array_->Flags(frame_ix_).value();
- int flag_mask = FrameArray::kIsWasmCompiledFrame |
- FrameArray::kIsWasmInterpretedFrame |
- FrameArray::kIsAsmJsWasmFrame;
+ int flag_mask = FrameArray::kIsWasmFrame | FrameArray::kIsAsmJsWasmFrame;
switch (flags & flag_mask) {
case 0:
- // JavaScript Frame.
js_frame_.FromFrameArray(isolate_, array_, frame_ix_);
return &js_frame_;
- case FrameArray::kIsWasmCompiledFrame:
- case FrameArray::kIsWasmInterpretedFrame:
- // Wasm Frame:
+ case FrameArray::kIsWasmFrame:
wasm_frame_.FromFrameArray(isolate_, array_, frame_ix_);
return &wasm_frame_;
case FrameArray::kIsAsmJsWasmFrame:
- // Asm.js Wasm Frame:
asm_wasm_frame_.FromFrameArray(isolate_, array_, frame_ix_);
return &asm_wasm_frame_;
default:
@@ -930,12 +914,25 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
}
Handle<String> MessageFormatter::Format(Isolate* isolate, MessageTemplate index,
- Handle<Object> arg) {
+ Handle<Object> arg0,
+ Handle<Object> arg1,
+ Handle<Object> arg2) {
Factory* factory = isolate->factory();
- Handle<String> result_string = Object::NoSideEffectsToString(isolate, arg);
+ Handle<String> arg0_string = factory->empty_string();
+ if (!arg0.is_null()) {
+ arg0_string = Object::NoSideEffectsToString(isolate, arg0);
+ }
+ Handle<String> arg1_string = factory->empty_string();
+ if (!arg1.is_null()) {
+ arg1_string = Object::NoSideEffectsToString(isolate, arg1);
+ }
+ Handle<String> arg2_string = factory->empty_string();
+ if (!arg2.is_null()) {
+ arg2_string = Object::NoSideEffectsToString(isolate, arg2);
+ }
MaybeHandle<String> maybe_result_string = MessageFormatter::Format(
- isolate, index, result_string, factory->empty_string(),
- factory->empty_string());
+ isolate, index, arg0_string, arg1_string, arg2_string);
+ Handle<String> result_string;
if (!maybe_result_string.ToHandle(&result_string)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
@@ -996,6 +993,26 @@ MaybeHandle<String> MessageFormatter::Format(Isolate* isolate,
return builder.Finish();
}
+MaybeHandle<JSObject> ErrorUtils::Construct(Isolate* isolate,
+ Handle<JSFunction> target,
+ Handle<Object> new_target,
+ Handle<Object> message) {
+ FrameSkipMode mode = SKIP_FIRST;
+ Handle<Object> caller;
+
+ // When we're passed a JSFunction as new target, we can skip frames until that
+ // specific function is seen instead of unconditionally skipping the first
+ // frame.
+ if (new_target->IsJSFunction()) {
+ mode = SKIP_UNTIL_SEEN;
+ caller = new_target;
+ }
+
+ return ErrorUtils::Construct(isolate, target, new_target, message, mode,
+ caller,
+ ErrorUtils::StackTraceCollection::kDetailed);
+}
+
MaybeHandle<JSObject> ErrorUtils::Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
@@ -1237,7 +1254,10 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
MessageLocation* location,
CallPrinter::ErrorHint* hint) {
if (ComputeLocation(isolate, location)) {
- ParseInfo info(isolate, *location->shared());
+ UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForFunctionCompile(
+ isolate, *location->shared());
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo info(isolate, flags, &compile_state);
if (parsing::ParseAny(&info, location->shared(), isolate)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location->shared()->IsUserJavaScript());
@@ -1290,12 +1310,15 @@ Handle<Object> ErrorUtils::NewIteratorError(Isolate* isolate,
return isolate->factory()->NewTypeError(id, callsite);
}
-Object ErrorUtils::ThrowSpreadArgIsNullOrUndefinedError(Isolate* isolate,
- Handle<Object> object) {
+Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
+ Handle<Object> object) {
MessageLocation location;
Handle<String> callsite;
if (ComputeLocation(isolate, &location)) {
- ParseInfo info(isolate, *location.shared());
+ UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForFunctionCompile(
+ isolate, *location.shared());
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo info(isolate, flags, &compile_state);
if (parsing::ParseAny(&info, location.shared(), isolate)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location.shared()->IsUserJavaScript(),
@@ -1316,7 +1339,6 @@ Object ErrorUtils::ThrowSpreadArgIsNullOrUndefinedError(Isolate* isolate,
}
}
- MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
Handle<Object> exception =
isolate->factory()->NewTypeError(id, callsite, object);
return isolate->Throw(*exception, &location);
@@ -1370,7 +1392,10 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
if (ComputeLocation(isolate, &location)) {
location_computed = true;
- ParseInfo info(isolate, *location.shared());
+ UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForFunctionCompile(
+ isolate, *location.shared());
+ UnoptimizedCompileState compile_state(isolate);
+ ParseInfo info(isolate, flags, &compile_state);
if (parsing::ParseAny(&info, location.shared(), isolate)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index cf54cac852..963796c7fe 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -269,6 +269,10 @@ class ErrorUtils : public AllStatic {
// |kNone| is useful when you don't need the stack information at all, for
// example when creating a deserialized error.
enum class StackTraceCollection { kDetailed, kSimple, kNone };
+ static MaybeHandle<JSObject> Construct(Isolate* isolate,
+ Handle<JSFunction> target,
+ Handle<Object> new_target,
+ Handle<Object> message);
static MaybeHandle<JSObject> Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
@@ -293,8 +297,8 @@ class ErrorUtils : public AllStatic {
Handle<Object> source);
static Handle<Object> NewConstructedNonConstructable(Isolate* isolate,
Handle<Object> source);
- static Object ThrowSpreadArgIsNullOrUndefinedError(Isolate* isolate,
- Handle<Object> object);
+ static Object ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
+ Handle<Object> object);
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object);
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
@@ -313,7 +317,9 @@ class MessageFormatter {
Handle<String> arg2);
static Handle<String> Format(Isolate* isolate, MessageTemplate index,
- Handle<Object> arg);
+ Handle<Object> arg0,
+ Handle<Object> arg1 = Handle<Object>(),
+ Handle<Object> arg2 = Handle<Object>());
};
// A message handler is a convenience interface for accessing the list
diff --git a/deps/v8/src/execution/off-thread-isolate-inl.h b/deps/v8/src/execution/off-thread-isolate-inl.h
new file mode 100644
index 0000000000..13dfebd47f
--- /dev/null
+++ b/deps/v8/src/execution/off-thread-isolate-inl.h
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_OFF_THREAD_ISOLATE_INL_H_
+#define V8_EXECUTION_OFF_THREAD_ISOLATE_INL_H_
+
+#include "src/execution/isolate.h"
+#include "src/execution/off-thread-isolate.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Address OffThreadIsolate::isolate_root() const {
+ return isolate_->isolate_root();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_OFF_THREAD_ISOLATE_INL_H_
diff --git a/deps/v8/src/execution/off-thread-isolate.cc b/deps/v8/src/execution/off-thread-isolate.cc
index c08d51d7b4..3a4c39052f 100644
--- a/deps/v8/src/execution/off-thread-isolate.cc
+++ b/deps/v8/src/execution/off-thread-isolate.cc
@@ -6,17 +6,126 @@
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
+#include "src/handles/handles-inl.h"
#include "src/logging/off-thread-logger.h"
namespace v8 {
namespace internal {
+class OffThreadTransferHandleStorage {
+ public:
+ enum State { kOffThreadHandle, kRawObject, kHandle };
+
+ explicit OffThreadTransferHandleStorage(
+ Address* off_thread_handle_location,
+ std::unique_ptr<OffThreadTransferHandleStorage> next)
+ : handle_location_(off_thread_handle_location),
+ next_(std::move(next)),
+ state_(kOffThreadHandle) {
+ CheckValid();
+ }
+
+ void ConvertFromOffThreadHandleOnFinish() {
+ CheckValid();
+ DCHECK_EQ(state_, kOffThreadHandle);
+ raw_obj_ptr_ = *handle_location_;
+ state_ = kRawObject;
+ CheckValid();
+ }
+
+ void ConvertToHandleOnPublish(Isolate* isolate) {
+ CheckValid();
+ DCHECK_EQ(state_, kRawObject);
+ handle_location_ = handle(Object(raw_obj_ptr_), isolate).location();
+ state_ = kHandle;
+ CheckValid();
+ }
+
+ Address* handle_location() const {
+ DCHECK_EQ(state_, kHandle);
+ DCHECK(
+ Object(*handle_location_).IsSmi() ||
+ !Heap::InOffThreadSpace(HeapObject::cast(Object(*handle_location_))));
+ return handle_location_;
+ }
+
+ OffThreadTransferHandleStorage* next() { return next_.get(); }
+
+ State state() const { return state_; }
+
+ private:
+ void CheckValid() {
+#ifdef DEBUG
+ Object obj;
+
+ switch (state_) {
+ case kHandle:
+ case kOffThreadHandle:
+ DCHECK_NOT_NULL(handle_location_);
+ obj = Object(*handle_location_);
+ break;
+ case kRawObject:
+ obj = Object(raw_obj_ptr_);
+ break;
+ }
+
+ // Smis are always fine.
+ if (obj.IsSmi()) return;
+
+ // The object that is not yet in a main-thread handle should be in
+ // off-thread space. Main-thread handles can still point to off-thread space
+ // during Publish, so that invariant is taken care of on main-thread handle
+ // access.
+ DCHECK_IMPLIES(state_ != kHandle,
+ Heap::InOffThreadSpace(HeapObject::cast(obj)));
+#endif
+ }
+
+ union {
+ Address* handle_location_;
+ Address raw_obj_ptr_;
+ };
+ std::unique_ptr<OffThreadTransferHandleStorage> next_;
+ State state_;
+};
+
+Address* OffThreadTransferHandleBase::ToHandleLocation() const {
+ return storage_ == nullptr ? nullptr : storage_->handle_location();
+}
+
OffThreadIsolate::OffThreadIsolate(Isolate* isolate, Zone* zone)
: HiddenOffThreadFactory(isolate),
+ heap_(isolate->heap()),
isolate_(isolate),
logger_(new OffThreadLogger()),
- handle_zone_(zone) {}
-OffThreadIsolate::~OffThreadIsolate() { delete logger_; }
+ handle_zone_(zone),
+ off_thread_transfer_handles_head_(nullptr) {}
+
+OffThreadIsolate::~OffThreadIsolate() = default;
+
+void OffThreadIsolate::FinishOffThread() {
+ heap()->FinishOffThread();
+
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertFromOffThreadHandleOnFinish();
+ storage = storage->next();
+ }
+
+ handle_zone_ = nullptr;
+}
+
+void OffThreadIsolate::Publish(Isolate* isolate) {
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertToHandleOnPublish(isolate);
+ storage = storage->next();
+ }
+
+ heap()->Publish(isolate->heap());
+}
int OffThreadIsolate::GetNextScriptId() { return isolate_->GetNextScriptId(); }
@@ -26,11 +135,6 @@ int OffThreadIsolate::GetNextUniqueSharedFunctionInfoId() {
}
#endif // V8_SFI_HAS_UNIQUE_ID
-bool OffThreadIsolate::NeedsSourcePositionsForProfiling() {
- // TODO(leszeks): Figure out if it makes sense to check this asynchronously.
- return isolate_->NeedsSourcePositionsForProfiling();
-}
-
bool OffThreadIsolate::is_collecting_type_profile() {
// TODO(leszeks): Figure out if it makes sense to check this asynchronously.
return isolate_->is_collecting_type_profile();
@@ -41,5 +145,16 @@ void OffThreadIsolate::PinToCurrentThread() {
thread_id_ = ThreadId::Current();
}
+OffThreadTransferHandleStorage* OffThreadIsolate::AddTransferHandleStorage(
+ HandleBase handle) {
+ DCHECK_IMPLIES(off_thread_transfer_handles_head_ != nullptr,
+ off_thread_transfer_handles_head_->state() ==
+ OffThreadTransferHandleStorage::kOffThreadHandle);
+ off_thread_transfer_handles_head_ =
+ std::make_unique<OffThreadTransferHandleStorage>(
+ handle.location(), std::move(off_thread_transfer_handles_head_));
+ return off_thread_transfer_handles_head_.get();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/off-thread-isolate.h b/deps/v8/src/execution/off-thread-isolate.h
index 9a75c32859..80fea9bc4c 100644
--- a/deps/v8/src/execution/off-thread-isolate.h
+++ b/deps/v8/src/execution/off-thread-isolate.h
@@ -5,16 +5,57 @@
#ifndef V8_EXECUTION_OFF_THREAD_ISOLATE_H_
#define V8_EXECUTION_OFF_THREAD_ISOLATE_H_
-#include "src/base/logging.h"
+#include "src/base/macros.h"
#include "src/execution/thread-id.h"
#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/off-thread-factory.h"
+#include "src/heap/off-thread-heap.h"
namespace v8 {
namespace internal {
class Isolate;
class OffThreadLogger;
+class OffThreadTransferHandleStorage;
+
+class OffThreadTransferHandleBase {
+ protected:
+ explicit OffThreadTransferHandleBase(OffThreadTransferHandleStorage* storage)
+ : storage_(storage) {}
+
+ V8_EXPORT_PRIVATE Address* ToHandleLocation() const;
+
+ private:
+ OffThreadTransferHandleStorage* storage_;
+};
+
+// Helper class for transferring ownership of an off-thread allocated object's
+// handler to the main thread. OffThreadTransferHandles should be created before
+// the OffThreadIsolate is finished, and can be accessed as a Handle after the
+// OffThreadIsolate is published.
+template <typename T>
+class OffThreadTransferHandle : public OffThreadTransferHandleBase {
+ public:
+ OffThreadTransferHandle() : OffThreadTransferHandleBase(nullptr) {}
+ explicit OffThreadTransferHandle(OffThreadTransferHandleStorage* storage)
+ : OffThreadTransferHandleBase(storage) {}
+
+ Handle<T> ToHandle() const { return Handle<T>(ToHandleLocation()); }
+};
+
+template <typename T>
+class OffThreadTransferMaybeHandle : public OffThreadTransferHandleBase {
+ public:
+ OffThreadTransferMaybeHandle() : OffThreadTransferHandleBase(nullptr) {}
+ explicit OffThreadTransferMaybeHandle(OffThreadTransferHandleStorage* storage)
+ : OffThreadTransferHandleBase(storage) {}
+
+ MaybeHandle<T> ToHandle() const {
+ Address* location = ToHandleLocation();
+ return location ? Handle<T>(location) : MaybeHandle<T>();
+ }
+};
// HiddenOffThreadFactory parallels Isolate's HiddenFactory
class V8_EXPORT_PRIVATE HiddenOffThreadFactory : private OffThreadFactory {
@@ -37,6 +78,15 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
explicit OffThreadIsolate(Isolate* isolate, Zone* zone);
~OffThreadIsolate();
+ static OffThreadIsolate* FromHeap(OffThreadHeap* heap) {
+ return reinterpret_cast<OffThreadIsolate*>(
+ reinterpret_cast<Address>(heap) - OFFSET_OF(OffThreadIsolate, heap_));
+ }
+
+ OffThreadHeap* heap() { return &heap_; }
+
+ inline Address isolate_root() const;
+
v8::internal::OffThreadFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
// undefined behavior (as static_cast cannot cast across private bases).
@@ -47,10 +97,15 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
// This method finishes the use of the off-thread Isolate, and can be safely
// called off-thread.
- void FinishOffThread() {
- factory()->FinishOffThread();
- handle_zone_ = nullptr;
- }
+ void FinishOffThread();
+
+ // This method publishes the off-thread Isolate to the main-thread Isolate,
+ // moving all off-thread allocated objects to be visible to the GC, and fixing
+ // up any other state (e.g. internalized strings). This method must be called
+ // on the main thread.
+ void Publish(Isolate* isolate);
+
+ bool has_pending_exception() const { return false; }
template <typename T>
Handle<T> Throw(Handle<Object> exception) {
@@ -68,15 +123,33 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
return location;
}
+ template <typename T>
+ OffThreadTransferHandle<T> TransferHandle(Handle<T> handle) {
+ DCHECK_NOT_NULL(handle_zone_);
+ if (handle.is_null()) {
+ return OffThreadTransferHandle<T>();
+ }
+ return OffThreadTransferHandle<T>(AddTransferHandleStorage(handle));
+ }
+
+ template <typename T>
+ OffThreadTransferMaybeHandle<T> TransferHandle(MaybeHandle<T> maybe_handle) {
+ DCHECK_NOT_NULL(handle_zone_);
+ Handle<T> handle;
+ if (!maybe_handle.ToHandle(&handle)) {
+ return OffThreadTransferMaybeHandle<T>();
+ }
+ return OffThreadTransferMaybeHandle<T>(AddTransferHandleStorage(handle));
+ }
+
int GetNextScriptId();
#if V8_SFI_HAS_UNIQUE_ID
int GetNextUniqueSharedFunctionInfoId();
#endif // V8_SFI_HAS_UNIQUE_ID
- bool NeedsSourcePositionsForProfiling();
bool is_collecting_type_profile();
- OffThreadLogger* logger() { return logger_; }
+ OffThreadLogger* logger() { return logger_.get(); }
void PinToCurrentThread();
ThreadId thread_id() { return thread_id_; }
@@ -84,13 +157,19 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
private:
friend class v8::internal::OffThreadFactory;
+ OffThreadTransferHandleStorage* AddTransferHandleStorage(HandleBase handle);
+
+ OffThreadHeap heap_;
+
// TODO(leszeks): Extract out the fields of the Isolate we want and store
// those instead of the whole thing.
Isolate* isolate_;
- OffThreadLogger* logger_;
+ std::unique_ptr<OffThreadLogger> logger_;
ThreadId thread_id_;
Zone* handle_zone_;
+ std::unique_ptr<OffThreadTransferHandleStorage>
+ off_thread_transfer_handles_head_;
};
} // namespace internal
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 47f3b9e410..24ef585031 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -15,7 +15,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
static constexpr int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize);
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
@@ -27,7 +27,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
- kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 2a9aa6486b..a0da6dd634 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -481,7 +481,7 @@ void PPCDebugger::Debug() {
PrintF("FPSCR: %08x\n", sim_->fp_condition_reg_);
} else if (strcmp(cmd, "stop") == 0) {
intptr_t value;
- intptr_t stop_pc = sim_->get_pc() - (kInstrSize + kPointerSize);
+ intptr_t stop_pc = sim_->get_pc() - (kInstrSize + kSystemPointerSize);
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
@@ -1230,7 +1230,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
DebugAtNextPC();
} else {
- set_pc(get_pc() + kInstrSize + kPointerSize);
+ set_pc(get_pc() + kInstrSize + kSystemPointerSize);
}
} else {
// This is not a valid svc code.
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.h b/deps/v8/src/execution/ppc/simulator-ppc.h
index 76f836b196..acdf7c290b 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.h
+++ b/deps/v8/src/execution/ppc/simulator-ppc.h
@@ -344,7 +344,7 @@ class Simulator : public SimulatorBase {
// Simulator support.
char* stack_;
- static const size_t stack_protection_size_ = 256 * kPointerSize;
+ static const size_t stack_protection_size_ = 256 * kSystemPointerSize;
bool pc_modified_;
int icount_;
diff --git a/deps/v8/src/execution/protectors-inl.h b/deps/v8/src/execution/protectors-inl.h
index b2428063e1..8fe8bed107 100644
--- a/deps/v8/src/execution/protectors-inl.h
+++ b/deps/v8/src/execution/protectors-inl.h
@@ -13,14 +13,6 @@
namespace v8 {
namespace internal {
-#define DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK(name, cell) \
- bool Protectors::Is##name##Intact(Handle<NativeContext> native_context) { \
- PropertyCell species_cell = native_context->cell(); \
- return species_cell.value().IsSmi() && \
- Smi::ToInt(species_cell.value()) == kProtectorValid; \
- }
-DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK)
-
#define DEFINE_PROTECTOR_ON_ISOLATE_CHECK(name, root_index, unused_cell) \
bool Protectors::Is##name##Intact(Isolate* isolate) { \
PropertyCell cell = \
@@ -29,6 +21,7 @@ DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DEFINE_PROTECTOR_ON_NATIVE_CONTEXT_CHECK)
Smi::ToInt(cell.value()) == kProtectorValid; \
}
DECLARED_PROTECTORS_ON_ISOLATE(DEFINE_PROTECTOR_ON_ISOLATE_CHECK)
+#undef DEFINE_PROTECTORS_ON_ISOLATE_CHECK
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/protectors.cc b/deps/v8/src/execution/protectors.cc
index c8ba05a8f9..9d3afd1ded 100644
--- a/deps/v8/src/execution/protectors.cc
+++ b/deps/v8/src/execution/protectors.cc
@@ -17,6 +17,7 @@ namespace v8 {
namespace internal {
namespace {
+
void TraceProtectorInvalidation(const char* protector_name) {
DCHECK(FLAG_trace_protector_invalidation);
static constexpr char kInvalidateProtectorTracingCategory[] =
@@ -26,30 +27,23 @@ void TraceProtectorInvalidation(const char* protector_name) {
DCHECK(FLAG_trace_protector_invalidation);
// TODO(jgruber): Remove the PrintF once tracing can output to stdout.
- i::PrintF("Invalidating protector cell %s", protector_name);
+ i::PrintF("Invalidating protector cell %s\n", protector_name);
TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
protector_name);
}
-} // namespace
-#define INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION(name, cell) \
- void Protectors::Invalidate##name(Isolate* isolate, \
- Handle<NativeContext> native_context) { \
- DCHECK(native_context->cell().value().IsSmi()); \
- DCHECK(Is##name##Intact(native_context)); \
- if (FLAG_trace_protector_invalidation) { \
- TraceProtectorInvalidation(#name); \
- } \
- Handle<PropertyCell> species_cell(native_context->cell(), isolate); \
- PropertyCell::SetValueWithInvalidation( \
- isolate, #cell, species_cell, \
- handle(Smi::FromInt(kProtectorInvalid), isolate)); \
- DCHECK(!Is##name##Intact(native_context)); \
- }
-DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(
- INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION)
-#undef INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION
+// Static asserts to ensure we have a use counter for every protector. If this
+// fails, add the use counter in V8 and chromium. Note: IsDefined is not
+// strictly needed but clarifies the intent of the static assert.
+constexpr bool IsDefined(v8::Isolate::UseCounterFeature) { return true; }
+#define V(Name, ...) \
+ STATIC_ASSERT(IsDefined(v8::Isolate::kInvalidated##Name##Protector));
+
+DECLARED_PROTECTORS_ON_ISOLATE(V)
+#undef V
+
+} // namespace
#define INVALIDATE_PROTECTOR_ON_ISOLATE_DEFINITION(name, unused_index, cell) \
void Protectors::Invalidate##name(Isolate* isolate) { \
@@ -58,6 +52,7 @@ DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(
if (FLAG_trace_protector_invalidation) { \
TraceProtectorInvalidation(#name); \
} \
+ isolate->CountUsage(v8::Isolate::kInvalidated##name##Protector); \
PropertyCell::SetValueWithInvalidation( \
isolate, #cell, isolate->factory()->cell(), \
handle(Smi::FromInt(kProtectorInvalid), isolate)); \
diff --git a/deps/v8/src/execution/protectors.h b/deps/v8/src/execution/protectors.h
index 4601f16cf0..c4ca49d948 100644
--- a/deps/v8/src/execution/protectors.h
+++ b/deps/v8/src/execution/protectors.h
@@ -15,9 +15,6 @@ class Protectors : public AllStatic {
static const int kProtectorValid = 1;
static const int kProtectorInvalid = 0;
-#define DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(V) \
- V(RegExpSpeciesLookupChainProtector, regexp_species_protector)
-
#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
V(ArrayBufferDetaching, ArrayBufferDetachingProtector, \
array_buffer_detaching_protector) \
@@ -41,6 +38,8 @@ class Protectors : public AllStatic {
/* property holder is the %IteratorPrototype%. Note that this also */ \
/* invalidates the SetIterator protector (see below). */ \
V(MapIteratorLookupChain, MapIteratorProtector, map_iterator_protector) \
+ V(RegExpSpeciesLookupChain, RegExpSpeciesProtector, \
+ regexp_species_protector) \
V(PromiseHook, PromiseHookProtector, promise_hook_protector) \
V(PromiseThenLookupChain, PromiseThenProtector, promise_then_protector) \
V(PromiseResolveLookupChain, PromiseResolveProtector, \
@@ -82,19 +81,9 @@ class Protectors : public AllStatic {
V(TypedArraySpeciesLookupChain, TypedArraySpeciesProtector, \
typed_array_species_protector)
-#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \
- V8_EXPORT_PRIVATE static inline bool Is##name##Intact( \
- Handle<NativeContext> native_context); \
- V8_EXPORT_PRIVATE static void Invalidate##name( \
- Isolate* isolate, Handle<NativeContext> native_context);
-
- DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DECLARE_PROTECTOR_ON_NATIVE_CONTEXT)
-#undef DECLARE_PROTECTOR_ON_NATIVE_CONTEXT
-
#define DECLARE_PROTECTOR_ON_ISOLATE(name, unused_root_index, unused_cell) \
V8_EXPORT_PRIVATE static inline bool Is##name##Intact(Isolate* isolate); \
V8_EXPORT_PRIVATE static void Invalidate##name(Isolate* isolate);
-
DECLARED_PROTECTORS_ON_ISOLATE(DECLARE_PROTECTOR_ON_ISOLATE)
#undef DECLARE_PROTECTOR_ON_ISOLATE
};
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index fddd40b352..fd0cf91333 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -105,7 +105,7 @@ class AsyncGC final : public CancelableTask {
void RunInternal() final {
v8::HandleScope scope(isolate_);
InvokeGC(isolate_, type_,
- v8::EmbedderHeapTracer::EmbedderStackState::kEmpty);
+ v8::EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
auto resolver = v8::Local<v8::Promise::Resolver>::New(isolate_, resolver_);
auto ctx = Local<v8::Context>::New(isolate_, ctx_);
resolver->Resolve(ctx, v8::Undefined(isolate_)).ToChecked();
@@ -132,9 +132,9 @@ void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Immediate bailout if no arguments are provided.
if (args.Length() == 0) {
- InvokeGC(isolate,
- v8::Isolate::GarbageCollectionType::kFullGarbageCollection,
- v8::EmbedderHeapTracer::EmbedderStackState::kUnknown);
+ InvokeGC(
+ isolate, v8::Isolate::GarbageCollectionType::kFullGarbageCollection,
+ v8::EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
return;
}
@@ -143,8 +143,9 @@ void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
GCOptions options = maybe_options.ToChecked();
switch (options.execution) {
case ExecutionType::kSync:
- InvokeGC(isolate, options.type,
- v8::EmbedderHeapTracer::EmbedderStackState::kUnknown);
+ InvokeGC(
+ isolate, options.type,
+ v8::EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
break;
case ExecutionType::kAsync: {
v8::HandleScope scope(isolate);
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 2384cf4a28..30d5f09180 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -134,6 +134,12 @@ struct MaybeBoolFlag {
#define COMPRESS_POINTERS_BOOL false
#endif
+#ifdef V8_HEAP_SANDBOX
+#define V8_HEAP_SANDBOX_BOOL true
+#else
+#define V8_HEAP_SANDBOX_BOOL false
+#endif
+
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL true
#else
@@ -207,16 +213,17 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
// Enabling import.meta requires to also enable import()
DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
+// Enabling FinalizationRegistry#cleanupSome also enables weak refs
+DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Update bootstrapper.cc whenever adding a new feature flag.
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_string_replaceall, "harmony String.prototype.replaceAll") \
- V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
- V(harmony_weak_refs, "harmony weak references") \
- V(harmony_regexp_match_indices, "harmony regexp match indices") \
- V(harmony_top_level_await, "harmony top level await")
+#define HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
+ V(harmony_weak_refs_with_cleanup_some, \
+ "harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
+ V(harmony_regexp_match_indices, "harmony regexp match indices")
#ifdef V8_INTL_SUPPORT
#define HARMONY_INPROGRESS(V) \
@@ -227,39 +234,38 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_private_methods, "harmony private methods in class literals")
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_string_replaceall, "harmony String.prototype.replaceAll") \
+ V(harmony_logical_assignment, "harmony logical assignment") \
+ V(harmony_promise_any, "harmony Promise.any") \
+ V(harmony_top_level_await, "harmony top level await")
#ifdef V8_INTL_SUPPORT
#define HARMONY_STAGED(V) \
HARMONY_STAGED_BASE(V) \
V(harmony_intl_dateformat_day_period, \
"Add dayPeriod option to DateTimeFormat") \
- V(harmony_intl_dateformat_fractional_second_digits, \
- "Add fractionalSecondDigits option to DateTimeFormat") \
V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_namespace_exports, \
- "harmony namespace exports (export * as foo from 'bar')") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_promise_all_settled, "harmony Promise.allSettled") \
- V(harmony_nullish, "harmony nullish operator") \
- V(harmony_optional_chaining, "harmony optional chaining syntax")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_namespace_exports, \
+ "harmony namespace exports (export * as foo from 'bar')") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_promise_all_settled, "harmony Promise.allSettled") \
+ V(harmony_private_methods, "harmony private methods in class literals") \
+ V(harmony_weak_refs, "harmony weak references")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_add_calendar_numbering_system, \
- "Add calendar and numberingSystem to DateTimeFormat") \
- V(harmony_intl_displaynames, "Intl.DisplayNames") \
- V(harmony_intl_other_calendars, "DateTimeFormat other calendars")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_dateformat_fractional_second_digits, \
+ "Add fractionalSecondDigits option to DateTimeFormat")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -368,6 +374,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"not-too-far future")
DEFINE_IMPLICATION(future, write_protect_code_memory)
+DEFINE_IMPLICATION(future, finalize_streaming_on_background)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
@@ -433,11 +440,6 @@ DEFINE_NEG_IMPLICATION(jitless, track_heap_object_fields)
DEFINE_IMPLICATION(jitless, regexp_interpret_all)
// asm.js validation is disabled since it triggers wasm code generation.
DEFINE_NEG_IMPLICATION(jitless, validate_asm)
-// Wasm is put into interpreter-only mode. We repeat flag implications down
-// here to ensure they're applied correctly by setting the --jitless flag.
-DEFINE_IMPLICATION(jitless, wasm_interpret_all)
-DEFINE_NEG_IMPLICATION(jitless, asm_wasm_lazy_compilation)
-DEFINE_NEG_IMPLICATION(jitless, wasm_lazy_compilation)
// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
// Flags for inline caching and feedback vectors.
@@ -495,6 +497,7 @@ DEFINE_BOOL(turboprop, false,
DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
+DEFINE_VALUE_IMPLICATION(turboprop, reuse_opt_code_count, 2)
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
@@ -645,6 +648,17 @@ DEFINE_BOOL(
stress_gc_during_compilation, false,
"simulate GC/compiler thread race related to https://crbug.com/v8/8520")
DEFINE_BOOL(turbo_fast_api_calls, false, "enable fast API calls from TurboFan")
+DEFINE_INT(reuse_opt_code_count, 0,
+ "don't discard optimized code for the specified number of deopts.")
+
+// Native context independent (NCI) code.
+DEFINE_BOOL(turbo_nci, false,
+ "enable experimental native context independent code.")
+DEFINE_BOOL(turbo_nci_as_highest_tier, false,
+ "replace default TF with NCI code as the highest tier for testing "
+ "purposes.")
+DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
+DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
@@ -691,6 +705,7 @@ DEFINE_BOOL(wasm_tier_up, true,
"enable tier up to the optimizing compiler (requires --liftoff to "
"have an effect)")
DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
+DEFINE_IMPLICATION(trace_wasm_decoder, single_threaded)
DEFINE_DEBUG_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
"trace interpretation of wasm code")
@@ -718,10 +733,8 @@ DEFINE_BOOL(trace_wasm_memory, false,
DEFINE_INT(wasm_tier_mask_for_testing, 0,
"bitmask of functions to compile with TurboFan instead of Liftoff")
-DEFINE_BOOL(debug_in_liftoff, false,
- "use Liftoff instead of the C++ interpreter for debugging "
- "WebAssembly (experimental)")
-DEFINE_IMPLICATION(future, debug_in_liftoff)
+DEFINE_BOOL(wasm_expose_debug_eval, false,
+ "Expose wasm evaluator support on the CDP")
DEFINE_BOOL(validate_asm, true, "validate asm.js modules before compiling")
DEFINE_BOOL(suppress_asm_messages, false,
@@ -770,9 +783,8 @@ DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
+DEFINE_IMPLICATION(print_wasm_code, single_threaded)
DEFINE_BOOL(print_wasm_stub_code, false, "Print WebAssembly stub code")
-DEFINE_BOOL(wasm_interpret_all, false,
- "execute all wasm code in the wasm interpreter")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
"enable lazy compilation for asm-wasm modules")
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
@@ -794,10 +806,6 @@ DEFINE_BOOL(wasm_simd_post_mvp, false,
"included in the current proposal")
DEFINE_IMPLICATION(wasm_simd_post_mvp, experimental_wasm_simd)
-// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
-DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
-DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
-DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_tier_up)
DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
DEFINE_BOOL(stress_wasm_code_gc, false,
@@ -889,8 +897,20 @@ DEFINE_BOOL(trace_mutator_utilization, false,
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
+DEFINE_BOOL(incremental_marking_task, true, "use tasks for incremental marking")
+DEFINE_INT(incremental_marking_soft_trigger, 0,
+ "threshold for starting incremental marking via a task in percent "
+ "of available space: limit - size")
+DEFINE_INT(incremental_marking_hard_trigger, 0,
+ "threshold for starting incremental marking immediately in percent "
+ "of available space: limit - size")
DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
+DEFINE_BOOL(scavenge_task, true, "schedule scavenge tasks")
+DEFINE_INT(scavenge_task_trigger, 80,
+ "scavenge task trigger in percent of the current heap limit")
+DEFINE_BOOL(scavenge_separate_stack_scanning, true,
+ "use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#ifdef V8_CONCURRENT_MARKING
@@ -910,7 +930,9 @@ DEFINE_BOOL_READONLY(array_buffer_extension, V8_ARRAY_BUFFER_EXTENSION_BOOL,
DEFINE_IMPLICATION(array_buffer_extension, always_promote_young_mc)
DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers")
+DEFINE_BOOL(concurrent_allocation, false, "concurrently allocate in old space")
DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks")
+DEFINE_NEG_NEG_IMPLICATION(array_buffer_extension, local_heaps)
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
@@ -1078,6 +1100,8 @@ DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
// api.cc
DEFINE_BOOL(script_streaming, true, "enable parsing on background")
+DEFINE_BOOL(stress_background_compile, false,
+ "stress test parsing on background")
DEFINE_BOOL(
finalize_streaming_on_background, false,
"perform the script streaming finalization on the background thread")
@@ -1358,9 +1382,14 @@ DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
// Test flag for a check in %OptimizeFunctionOnNextCall
DEFINE_BOOL(
testing_d8_test_runner, false,
- "test runner turns on this flag to enable a check that the funciton was "
+ "test runner turns on this flag to enable a check that the function was "
"prepared for optimization before marking it for optimization")
+DEFINE_BOOL(
+ fuzzing, false,
+ "Fuzzers use this flag to signal that they are ... fuzzing. This causes "
+ "intrinsics to fail silently (e.g. return undefined) on invalid usage.")
+
// mksnapshot.cc
DEFINE_STRING(embedded_src, nullptr,
"Path for the generated embedded data file. (mksnapshot only)")
@@ -1419,6 +1448,7 @@ DEFINE_BOOL(multi_mapped_mock_allocator, false,
#define DEFAULT_WASM_GDB_REMOTE_PORT 8765
DEFINE_BOOL(wasm_gdb_remote, false,
"enable GDB-remote for WebAssembly debugging")
+DEFINE_NEG_IMPLICATION(wasm_gdb_remote, wasm_tier_up)
DEFINE_INT(wasm_gdb_remote_port, DEFAULT_WASM_GDB_REMOTE_PORT,
"default port for WebAssembly debugging with LLDB.")
DEFINE_BOOL(wasm_pause_waiting_for_debugger, false,
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index df4042e8eb..e6dbd6ad45 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -381,10 +381,11 @@ namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
int field_count = jsobject.GetEmbedderFieldCount();
+ const Isolate* isolate = GetIsolateForPtrCompr(jsobject);
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
- if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(&pointer)) {
+ if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(isolate, &pointer)) {
embedder_fields[i] = pointer;
}
}
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index b90a942df1..aa9e522c0e 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -148,7 +148,7 @@ class Handle final : public HandleBase {
template <typename S>
inline static const Handle<T> cast(Handle<S> that);
- // TODO(yangguo): Values that contain empty handles should be declared as
+ // Consider declaring values that contain empty handles as
// MaybeHandle to force validation before being used as handles.
static const Handle<T> null() { return Handle<T>(); }
diff --git a/deps/v8/src/handles/persistent-handles.cc b/deps/v8/src/handles/persistent-handles.cc
new file mode 100644
index 0000000000..3ef2dee6f1
--- /dev/null
+++ b/deps/v8/src/handles/persistent-handles.cc
@@ -0,0 +1,122 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handles/persistent-handles.h"
+
+#include "src/api/api.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/safepoint.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+PersistentHandles::PersistentHandles(Isolate* isolate, size_t block_size)
+ : isolate_(isolate),
+ block_size_(block_size),
+ block_next_(nullptr),
+ block_limit_(nullptr),
+ prev_(nullptr),
+ next_(nullptr) {
+ isolate->persistent_handles_list()->Add(this);
+}
+
+PersistentHandles::~PersistentHandles() {
+ isolate_->persistent_handles_list()->Remove(this);
+
+ for (Address* block_start : blocks_) {
+ DeleteArray(block_start);
+ }
+}
+
+#ifdef DEBUG
+void PersistentHandles::Attach(LocalHeap* local_heap) {
+ DCHECK_NULL(owner_);
+ owner_ = local_heap;
+}
+
+void PersistentHandles::Detach() {
+ DCHECK_NOT_NULL(owner_);
+ owner_ = nullptr;
+}
+#endif
+
+void PersistentHandles::AddBlock() {
+ DCHECK_EQ(block_next_, block_limit_);
+
+ Address* block_start = NewArray<Address>(block_size_);
+ blocks_.push_back(block_start);
+
+ block_next_ = block_start;
+ block_limit_ = block_start + block_size_;
+}
+
+Handle<Object> PersistentHandles::NewHandle(Address value) {
+#ifdef DEBUG
+ if (owner_) DCHECK(!owner_->IsParked());
+#endif
+ return Handle<Object>(GetHandle(value));
+}
+
+Address* PersistentHandles::GetHandle(Address value) {
+ if (block_next_ == block_limit_) {
+ AddBlock();
+ }
+
+ DCHECK_LT(block_next_, block_limit_);
+ *block_next_ = value;
+ return block_next_++;
+}
+
+void PersistentHandles::Iterate(RootVisitor* visitor) {
+ for (int i = 0; i < static_cast<int>(blocks_.size()) - 1; i++) {
+ Address* block_start = blocks_[i];
+ Address* block_end = block_start + block_size_;
+ visitor->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(block_start),
+ FullObjectSlot(block_end));
+ }
+
+ if (!blocks_.empty()) {
+ Address* block_start = blocks_.back();
+ visitor->VisitRootPointers(Root::kHandleScope, nullptr,
+ FullObjectSlot(block_start),
+ FullObjectSlot(block_next_));
+ }
+}
+
+void PersistentHandlesList::Add(PersistentHandles* persistent_handles) {
+ base::MutexGuard guard(&persistent_handles_mutex_);
+ if (persistent_handles_head_)
+ persistent_handles_head_->prev_ = persistent_handles;
+ persistent_handles->prev_ = nullptr;
+ persistent_handles->next_ = persistent_handles_head_;
+ persistent_handles_head_ = persistent_handles;
+}
+
+void PersistentHandlesList::Remove(PersistentHandles* persistent_handles) {
+ base::MutexGuard guard(&persistent_handles_mutex_);
+ if (persistent_handles->next_)
+ persistent_handles->next_->prev_ = persistent_handles->prev_;
+ if (persistent_handles->prev_)
+ persistent_handles->prev_->next_ = persistent_handles->next_;
+ else
+ persistent_handles_head_ = persistent_handles->next_;
+}
+
+void PersistentHandlesList::Iterate(RootVisitor* visitor) {
+#if DEBUG
+ DCHECK(isolate_->heap()->safepoint()->IsActive());
+#else
+ USE(isolate_);
+#endif
+ base::MutexGuard guard(&persistent_handles_mutex_);
+ for (PersistentHandles* current = persistent_handles_head_; current;
+ current = current->next_) {
+ current->Iterate(visitor);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/handles/persistent-handles.h b/deps/v8/src/handles/persistent-handles.h
new file mode 100644
index 0000000000..eb0e65fb85
--- /dev/null
+++ b/deps/v8/src/handles/persistent-handles.h
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_PERSISTENT_HANDLES_H_
+#define V8_HANDLES_PERSISTENT_HANDLES_H_
+
+#include <vector>
+
+#include "include/v8-internal.h"
+#include "src/api/api.h"
+#include "src/base/macros.h"
+#include "src/objects/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+// PersistentHandles serves as a container for handles that can be passed back
+// and forth between threads. Allocation and deallocation of this class is
+// thread-safe and the isolate tracks all PersistentHandles containers.
+class PersistentHandles {
+ public:
+ V8_EXPORT_PRIVATE explicit PersistentHandles(
+ Isolate* isolate, size_t block_size = kHandleBlockSize);
+ V8_EXPORT_PRIVATE ~PersistentHandles();
+
+ PersistentHandles(const PersistentHandles&) = delete;
+ PersistentHandles& operator=(const PersistentHandles&) = delete;
+
+ void Iterate(RootVisitor* visitor);
+
+ V8_EXPORT_PRIVATE Handle<Object> NewHandle(Address value);
+
+ private:
+ void AddBlock();
+ Address* GetHandle(Address value);
+
+#ifdef DEBUG
+ void Attach(LocalHeap* local_heap);
+ void Detach();
+
+ LocalHeap* owner_ = nullptr;
+
+#else
+ void Attach(LocalHeap*) {}
+ void Detach() {}
+#endif
+
+ Isolate* isolate_;
+ std::vector<Address*> blocks_;
+ size_t block_size_;
+
+ Address* block_next_;
+ Address* block_limit_;
+
+ PersistentHandles* prev_;
+ PersistentHandles* next_;
+
+ friend class PersistentHandlesList;
+ friend class LocalHeap;
+};
+
+class PersistentHandlesList {
+ public:
+ explicit PersistentHandlesList(Isolate* isolate)
+ : isolate_(isolate), persistent_handles_head_(nullptr) {}
+
+ // Iteration is only safe during a safepoint
+ void Iterate(RootVisitor* visitor);
+
+ private:
+ void Add(PersistentHandles* persistent_handles);
+ void Remove(PersistentHandles* persistent_handles);
+
+ Isolate* isolate_;
+
+ base::Mutex persistent_handles_mutex_;
+ PersistentHandles* persistent_handles_head_ = nullptr;
+
+ friend class PersistentHandles;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_PERSISTENT_HANDLES_H_
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 5a02732930..51a6b41416 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -6,5 +6,6 @@ omerkatz@chromium.org
ulan@chromium.org
per-file *factory*=leszeks@chromium.org
+per-file read-only-*=delphick@chromium.org
# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index e79f86942f..88d81ca9cf 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -9,6 +9,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
#define TRACE_BS(...) \
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index 058e48397e..27b1315c6b 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -6,6 +6,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
+#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/src/heap/concurrent-allocator-inl.h b/deps/v8/src/heap/concurrent-allocator-inl.h
new file mode 100644
index 0000000000..65f1be313f
--- /dev/null
+++ b/deps/v8/src/heap/concurrent-allocator-inl.h
@@ -0,0 +1,86 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
+#define V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
+
+#include "include/v8-internal.h"
+#include "src/common/globals.h"
+#include "src/heap/concurrent-allocator.h"
+
+#include "src/heap/heap.h"
+#include "src/heap/spaces-inl.h"
+#include "src/heap/spaces.h"
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationResult ConcurrentAllocator::Allocate(int object_size,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ // TODO(dinfuehr): Add support for allocation observers
+ CHECK(FLAG_concurrent_allocation);
+ if (object_size > kMaxLabObjectSize) {
+ auto result = space_->SlowGetLinearAllocationAreaBackground(
+ local_heap_, object_size, object_size, alignment, origin);
+
+ if (result) {
+ HeapObject object = HeapObject::FromAddress(result->first);
+ return AllocationResult(object);
+ } else {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ }
+
+ return AllocateInLab(object_size, alignment, origin);
+}
+
+Address ConcurrentAllocator::AllocateOrFail(int object_size,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ AllocationResult result = Allocate(object_size, alignment, origin);
+ if (!result.IsRetry()) return result.ToObjectChecked().address();
+ return PerformCollectionAndAllocateAgain(object_size, alignment, origin);
+}
+
+AllocationResult ConcurrentAllocator::AllocateInLab(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
+ AllocationResult allocation;
+ if (!lab_.IsValid() && !EnsureLab(origin)) {
+ return AllocationResult::Retry(space_->identity());
+ }
+ allocation = lab_.AllocateRawAligned(object_size, alignment);
+ if (allocation.IsRetry()) {
+ if (!EnsureLab(origin)) {
+ return AllocationResult::Retry(space_->identity());
+ } else {
+ allocation = lab_.AllocateRawAligned(object_size, alignment);
+ CHECK(!allocation.IsRetry());
+ }
+ }
+ return allocation;
+}
+
+bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
+ auto result = space_->SlowGetLinearAllocationAreaBackground(
+ local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
+
+ if (!result) return false;
+
+ HeapObject object = HeapObject::FromAddress(result->first);
+ LocalAllocationBuffer saved_lab = std::move(lab_);
+ lab_ = LocalAllocationBuffer::FromResult(
+ local_heap_->heap(), AllocationResult(object), result->second);
+ DCHECK(lab_.IsValid());
+ if (!lab_.TryMerge(&saved_lab)) {
+ saved_lab.CloseAndMakeIterable();
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
new file mode 100644
index 0000000000..7fd2911021
--- /dev/null
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -0,0 +1,43 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/concurrent-allocator.h"
+
+#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/local-heap.h"
+
+namespace v8 {
+namespace internal {
+
+Address ConcurrentAllocator::PerformCollectionAndAllocateAgain(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
+ Heap* heap = local_heap_->heap();
+ local_heap_->allocation_failed_ = true;
+
+ for (int i = 0; i < 3; i++) {
+ {
+ ParkedScope scope(local_heap_);
+ heap->RequestAndWaitForCollection();
+ }
+
+ AllocationResult result = Allocate(object_size, alignment, origin);
+ if (!result.IsRetry()) {
+ local_heap_->allocation_failed_ = false;
+ return result.ToObjectChecked().address();
+ }
+ }
+
+ heap->FatalProcessOutOfMemory("ConcurrentAllocator: allocation failed");
+}
+
+void ConcurrentAllocator::FreeLinearAllocationArea() {
+ lab_.CloseAndMakeIterable();
+}
+
+void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
+ lab_.MakeIterable();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/concurrent-allocator.h b/deps/v8/src/heap/concurrent-allocator.h
new file mode 100644
index 0000000000..f165d00962
--- /dev/null
+++ b/deps/v8/src/heap/concurrent-allocator.h
@@ -0,0 +1,57 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CONCURRENT_ALLOCATOR_H_
+#define V8_HEAP_CONCURRENT_ALLOCATOR_H_
+
+#include "src/common/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+class LocalHeap;
+
+// Concurrent allocator for allocation from background threads/tasks.
+// Allocations are served from a TLAB if possible.
+class ConcurrentAllocator {
+ public:
+ static const int kLabSize = 4 * KB;
+ static const int kMaxLabSize = 32 * KB;
+ static const int kMaxLabObjectSize = 2 * KB;
+
+ explicit ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
+ : local_heap_(local_heap),
+ space_(space),
+ lab_(LocalAllocationBuffer::InvalidBuffer()) {}
+
+ inline AllocationResult Allocate(int object_size,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ inline Address AllocateOrFail(int object_size, AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ void FreeLinearAllocationArea();
+ void MakeLinearAllocationAreaIterable();
+
+ private:
+ inline bool EnsureLab(AllocationOrigin origin);
+ inline AllocationResult AllocateInLab(int object_size,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ V8_EXPORT_PRIVATE Address PerformCollectionAndAllocateAgain(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin);
+
+ LocalHeap* const local_heap_;
+ PagedSpace* const space_;
+ LocalAllocationBuffer lab_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CONCURRENT_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 562719c07f..7b9385b441 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -17,6 +17,7 @@
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/objects-visiting-inl.h"
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 24bcae20a6..3ab07a6073 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -28,6 +28,7 @@ namespace internal {
class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
+class MemoryChunk;
struct WeakObjects;
struct MemoryChunkData {
diff --git a/deps/v8/src/heap/cppgc/allocation.cc b/deps/v8/src/heap/cppgc/allocation.cc
index 7e98d1eec9..32f917da5a 100644
--- a/deps/v8/src/heap/cppgc/allocation.cc
+++ b/deps/v8/src/heap/cppgc/allocation.cc
@@ -11,6 +11,9 @@
namespace cppgc {
namespace internal {
+STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
+ kLargeObjectSizeThreshold);
+
// static
void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
size_t size,
@@ -19,5 +22,13 @@ void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
return Heap::From(heap)->Allocate(size, index);
}
+// static
+void* MakeGarbageCollectedTraitInternal::Allocate(
+ cppgc::Heap* heap, size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_index) {
+ DCHECK_NOT_NULL(heap);
+ return Heap::From(heap)->Allocate(size, index, space_index);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
new file mode 100644
index 0000000000..5246c3f6c3
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 8-byte alignment at calls by pushing an additional
+// non-callee-saved register (r3).
+//
+// Calling convention source:
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A32)
+// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html
+asm(".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ // Only {r4-r11} are callee-saved registers. Push r3 in addition to align
+ // the stack back to 8 bytes.
+ " push {r3-r11, lr} \n"
+ // Pass 1st parameter (r0) unchanged (Stack*).
+ // Pass 2nd parameter (r1) unchanged (StackVisitor*).
+ // Save 3rd parameter (r2; IterateStackCallback).
+ " mov r3, r2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mov r2, sp \n"
+ // Call the callback.
+ " blx r3 \n"
+ // Discard all the registers.
+ " add sp, sp, #36 \n"
+ // Pop lr into pc which returns and switches mode if needed.
+ " pop {pc} \n");
diff --git a/deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
new file mode 100644
index 0000000000..30d4de1f30
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
@@ -0,0 +1,52 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 16-byte alignment.
+//
+// Calling convention source:
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
+
+asm(
+#if defined(__APPLE__)
+ ".globl _PushAllRegistersAndIterateStack \n"
+ ".private_extern _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !defined(__APPLE__)
+ ".globl PushAllRegistersAndIterateStack \n"
+#if !defined(_WIN64)
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+#endif // !defined(_WIN64)
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !defined(__APPLE__)
+ // x19-x29 are callee-saved.
+ " stp x19, x20, [sp, #-16]! \n"
+ " stp x21, x22, [sp, #-16]! \n"
+ " stp x23, x24, [sp, #-16]! \n"
+ " stp x25, x26, [sp, #-16]! \n"
+ " stp x27, x28, [sp, #-16]! \n"
+ " stp fp, lr, [sp, #-16]! \n"
+ // Maintain frame pointer.
+ " mov fp, sp \n"
+ // Pass 1st parameter (x0) unchanged (Stack*).
+ // Pass 2nd parameter (x1) unchanged (StackVisitor*).
+ // Save 3rd parameter (x2; IterateStackCallback)
+ " mov x7, x2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mov x2, sp \n"
+ " blr x7 \n"
+ // Load return address.
+ " ldr lr, [sp, #8] \n"
+ // Restore frame pointer and pop all callee-saved registers.
+ " ldr fp, [sp], #96 \n"
+ " ret \n");
diff --git a/deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
new file mode 100644
index 0000000000..9773654ffc
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
@@ -0,0 +1,32 @@
+; Copyright 2020 the V8 project authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+; This file is exactly the same as push_registers_asm.cc, just formatted for
+; the Microsoft Arm Assembler.
+
+ AREA |.text|, CODE, ALIGN=4, READONLY
+ EXPORT PushAllRegistersAndIterateStack
+PushAllRegistersAndIterateStack
+ ; x19-x29 are callee-saved
+ STP x19, x20, [sp, #-16]!
+ STP x21, x22, [sp, #-16]!
+ STP x23, x24, [sp, #-16]!
+ STP x25, x26, [sp, #-16]!
+ STP x27, x28, [sp, #-16]!
+ STP fp, lr, [sp, #-16]!
+ ; Maintain frame pointer
+ MOV fp, sp
+ ; Pass 1st parameter (x0) unchanged (Stack*).
+ ; Pass 2nd parameter (x1) unchanged (StackVisitor*).
+ ; Save 3rd parameter (x2; IterateStackCallback)
+ MOV x7, x2
+ ; Pass 3rd parameter as sp (stack pointer)
+ MOV x2, sp
+ BLR x7
+ ; Load return address
+ LDR lr, [sp, #8]
+ ; Restore frame pointer and pop all callee-saved registers.
+ LDR fp, [sp], #96
+ RET
+ END \ No newline at end of file
diff --git a/deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
new file mode 100644
index 0000000000..ed9c14a50e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 16-byte alignment at calls. There is an 4-byte return address
+// on the stack and we push 28 bytes which maintains 16-byte stack alignment
+// at the call.
+//
+// The following assumes cdecl calling convention.
+// Source: https://en.wikipedia.org/wiki/X86_calling_conventions#cdecl
+asm(
+#ifdef _WIN32
+ ".globl _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !_WIN32
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !_WIN32
+ // [ IterateStackCallback ]
+ // [ StackVisitor* ]
+ // [ Stack* ]
+ // [ ret ]
+ // ebp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %ebp \n"
+ " movl %esp, %ebp \n"
+ " push %ebx \n"
+ " push %esi \n"
+ " push %edi \n"
+ // Save 3rd parameter (IterateStackCallback).
+ " movl 28(%esp), %ecx \n"
+ // Pass 3rd parameter as esp (stack pointer).
+ " push %esp \n"
+ // Pass 2nd parameter (StackVisitor*).
+ " push 28(%esp) \n"
+ // Pass 1st parameter (Stack*).
+ " push 28(%esp) \n"
+ " call *%ecx \n"
+ // Pop the callee-saved registers.
+ " addl $24, %esp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %ebp \n"
+ " ret \n");
diff --git a/deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
new file mode 100644
index 0000000000..a35fd6e527
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
@@ -0,0 +1,48 @@
+;; Copyright 2020 the V8 project authors. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; MASM syntax
+;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
+
+.model flat, C
+
+public PushAllRegistersAndIterateStack
+
+.code
+PushAllRegistersAndIterateStack:
+ ;; Push all callee-saved registers to get them on the stack for conservative
+ ;; stack scanning.
+ ;;
+ ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
+ ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
+ ;; at the call.
+ ;;
+ ;; The following assumes cdecl calling convention.
+ ;; Source: https://docs.microsoft.com/en-us/cpp/cpp/cdecl?view=vs-2019
+ ;;
+ ;; [ IterateStackCallback ]
+ ;; [ StackVisitor* ]
+ ;; [ Stack* ]
+ ;; [ ret ]
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+ ;; Save 3rd parameter (IterateStackCallback).
+ mov ecx, [ esp + 28 ]
+ ;; Pass 3rd parameter as esp (stack pointer).
+ push esp
+ ;; Pass 2nd parameter (StackVisitor*).
+ push [ esp + 28 ]
+ ;; Pass 1st parameter (Stack*).
+ push [ esp + 28 ]
+ call ecx
+ ;; Pop the callee-saved registers.
+ add esp, 24
+ ;; Restore rbp as it was used as frame pointer.
+ pop ebp
+ ret
+
+end
diff --git a/deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
new file mode 100644
index 0000000000..4a46caa6c5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".set noreorder \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addiu $sp, $sp, -48 \n"
+ " sw $ra, 44($sp) \n"
+ " sw $s8, 40($sp) \n"
+ " sw $sp, 36($sp) \n"
+ " sw $gp, 32($sp) \n"
+ " sw $s7, 28($sp) \n"
+ " sw $s6, 24($sp) \n"
+ " sw $s5, 20($sp) \n"
+ " sw $s4, 16($sp) \n"
+ " sw $s3, 12($sp) \n"
+ " sw $s2, 8($sp) \n"
+ " sw $s1, 4($sp) \n"
+ " sw $s0, 0($sp) \n"
+ // Maintain frame pointer.
+ " move $s8, $sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " move $a3, $a2 \n"
+ // Call the callback.
+ " jalr $a3 \n"
+ // Delay slot: Pass 3rd parameter as sp (stack pointer).
+ " move $a2, $sp \n"
+ // Load return address.
+ " lw $ra, 44($sp) \n"
+ // Restore frame pointer.
+ " lw $s8, 40($sp) \n"
+ " jr $ra \n"
+ // Delay slot: Discard all callee-saved registers.
+ " addiu $sp, $sp, 48 \n");
diff --git a/deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
new file mode 100644
index 0000000000..6befa3bcc0
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".set noreorder \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " daddiu $sp, $sp, -96 \n"
+ " sd $ra, 88($sp) \n"
+ " sd $s8, 80($sp) \n"
+ " sd $sp, 72($sp) \n"
+ " sd $gp, 64($sp) \n"
+ " sd $s7, 56($sp) \n"
+ " sd $s6, 48($sp) \n"
+ " sd $s5, 40($sp) \n"
+ " sd $s4, 32($sp) \n"
+ " sd $s3, 24($sp) \n"
+ " sd $s2, 16($sp) \n"
+ " sd $s1, 8($sp) \n"
+ " sd $s0, 0($sp) \n"
+ // Maintain frame pointer.
+ " move $s8, $sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " move $a3, $a2 \n"
+ // Call the callback.
+ " jalr $a3 \n"
+ // Delay slot: Pass 3rd parameter as sp (stack pointer).
+ " move $a2, $sp \n"
+ // Load return address.
+ " ld $ra, 88($sp) \n"
+ // Restore frame pointer.
+ " ld $s8, 80($sp) \n"
+ " jr $ra \n"
+ // Delay slot: Discard all callee-saved registers.
+ " daddiu $sp, $sp, 96 \n");
diff --git a/deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
new file mode 100644
index 0000000000..6936819ba2
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// PPC ABI source:
+// http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html
+
+// AIX Runtime process stack:
+// https://www.ibm.com/support/knowledgecenter/ssw_aix_71/assembler/idalangref_runtime_process.html
+asm(
+#if defined(_AIX)
+ ".globl .PushAllRegistersAndIterateStack, hidden \n"
+ ".csect .text[PR] \n"
+ ".PushAllRegistersAndIterateStack: \n"
+#else
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif
+ // Push all callee-saved registers.
+ // lr, TOC pointer, r16 to r31. 160 bytes.
+ // The parameter save area shall be allocated by the caller. 112 btes.
+ // At anytime, SP (r1) needs to be multiple of 16 (i.e. 16-aligned).
+ " mflr 0 \n"
+ " std 0, 16(1) \n"
+#if defined(_AIX)
+ " std 2, 40(1) \n"
+#else
+ " std 2, 24(1) \n"
+#endif
+ " stdu 1, -256(1) \n"
+ " std 14, 112(1) \n"
+ " std 15, 120(1) \n"
+ " std 16, 128(1) \n"
+ " std 17, 136(1) \n"
+ " std 18, 144(1) \n"
+ " std 19, 152(1) \n"
+ " std 20, 160(1) \n"
+ " std 21, 168(1) \n"
+ " std 22, 176(1) \n"
+ " std 23, 184(1) \n"
+ " std 24, 192(1) \n"
+ " std 25, 200(1) \n"
+ " std 26, 208(1) \n"
+ " std 27, 216(1) \n"
+ " std 28, 224(1) \n"
+ " std 29, 232(1) \n"
+ " std 30, 240(1) \n"
+ " std 31, 248(1) \n"
+ // Pass 1st parameter (r3) unchanged (Stack*).
+ // Pass 2nd parameter (r4) unchanged (StackVisitor*).
+ // Save 3rd parameter (r5; IterateStackCallback).
+ " mr 6, 5 \n"
+#if defined(_AIX)
+ // Set up TOC for callee.
+ " ld 2,8(5) \n"
+ // AIX uses function decorators, which means that
+ // pointers to functions do not point to code, but
+ // instead point to metadata about them, hence
+ // need to deterrence.
+ " ld 6,0(6) \n"
+#endif
+ // Pass 3rd parameter as sp (stack pointer).
+ " mr 5, 1 \n"
+#if !defined(_AIX)
+ // Set up r12 to be equal to the callee address (in order for TOC
+ // relocation). Only needed on LE Linux.
+ " mr 12, 6 \n"
+#endif
+ // Call the callback.
+ " mtctr 6 \n"
+ " bctrl \n"
+ // Discard all the registers.
+ " addi 1, 1, 256 \n"
+ // Restore lr.
+ " ld 0, 16(1) \n"
+ " mtlr 0 \n"
+#if defined(_AIX)
+ // Restore TOC pointer.
+ " ld 2, 40(1) \n"
+#else
+ " ld 2, 24(1) \n"
+#endif
+ " blr \n");
diff --git a/deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
new file mode 100644
index 0000000000..6b9b2c0853
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// S390 ABI source:
+// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
+asm(".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers.
+ // r6-r13, r14 and sp(r15)
+ " stmg %r6, %sp, 48(%sp) \n"
+ // Allocate frame.
+ " lay %sp, -160(%sp) \n"
+ // Pass 1st parameter (r2) unchanged (Stack*).
+ // Pass 2nd parameter (r3) unchanged (StackVisitor*).
+ // Save 3rd parameter (r4; IterateStackCallback).
+ " lgr %r5, %r4 \n"
+ // Pass sp as 3rd parameter. 160+48 to point
+ // to callee saved region stored above.
+ " lay %r4, 208(%sp) \n"
+ // Call the callback.
+ " basr %r14, %r5 \n"
+ " lmg %r14,%sp, 272(%sp) \n"
+ " br %r14 \n");
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers.S b/deps/v8/src/heap/cppgc/asm/x64/push_registers.S
deleted file mode 100644
index 018859d5c0..0000000000
--- a/deps/v8/src/heap/cppgc/asm/x64/push_registers.S
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-.att_syntax
-
-.text
-
-#ifdef V8_TARGET_OS_MACOSX
-
-.globl _PushAllRegistersAndIterateStack
-_PushAllRegistersAndIterateStack:
-
-#else // !V8_TARGET_OS_MACOSX
-
-.type PushAllRegistersAndIterateStack, %function
-.global PushAllRegistersAndIterateStack
-.hidden PushAllRegistersAndIterateStack
-PushAllRegistersAndIterateStack:
-
-#endif // !V8_TARGET_OS_MACOSX
-
- // Push all callee-saved registers to get them on the stack for conservative
- // stack scanning.
- //
- // We maintain 16-byte alignment at calls. There is an 8-byte return address
- // on the stack and we push 56 bytes which maintains 16-byte stack alignment
- // at the call.
- // Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
- //
- // rbp is callee-saved. Maintain proper frame pointer for debugging.
- push %rbp
- mov %rsp, %rbp
- push $0xCDCDCD // Dummy for alignment.
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- // Pass 1st parameter (rdi) unchanged (Stack*).
- // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
- // Save 3rd parameter (rdx; IterateStackCallback)
- mov %rdx, %r8
- // Pass 3rd parameter as rsp (stack pointer).
- mov %rsp, %rdx
- // Call the callback.
- call *%r8
- // Pop the callee-saved registers.
- add $48, %rsp
- // Restore rbp as it was used as frame pointer.
- pop %rbp
- ret
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
new file mode 100644
index 0000000000..68f7918c93
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// We cannot rely on clang generating the function and right symbol mangling
+// as `__attribite__((naked))` does not prevent clang from generating TSAN
+// function entry stubs (`__tsan_func_entry`). Even with
+// `__attribute__((no_sanitize_thread)` annotation clang generates the entry
+// stub.
+// See https://bugs.llvm.org/show_bug.cgi?id=45400.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+// _WIN64 Defined as 1 when the compilation target is 64-bit ARM or x64.
+// Otherwise, undefined.
+#ifdef _WIN64
+
+// We maintain 16-byte alignment at calls. There is an 8-byte return address
+// on the stack and we push 72 bytes which maintains 16-byte stack alignment
+// at the call.
+// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
+asm(".globl PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %rbp \n"
+ " mov %rsp, %rbp \n"
+ // Dummy for alignment.
+ " push $0xCDCDCD \n"
+ " push %rsi \n"
+ " push %rdi \n"
+ " push %rbx \n"
+ " push %r12 \n"
+ " push %r13 \n"
+ " push %r14 \n"
+ " push %r15 \n"
+ // Pass 1st parameter (rcx) unchanged (Stack*).
+ // Pass 2nd parameter (rdx) unchanged (StackVisitor*).
+ // Save 3rd parameter (r8; IterateStackCallback)
+ " mov %r8, %r9 \n"
+ // Pass 3rd parameter as rsp (stack pointer).
+ " mov %rsp, %r8 \n"
+ // Call the callback.
+ " call *%r9 \n"
+ // Pop the callee-saved registers.
+ " add $64, %rsp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %rbp \n"
+ " ret \n");
+
+#else // !_WIN64
+
+// We maintain 16-byte alignment at calls. There is an 8-byte return address
+// on the stack and we push 56 bytes which maintains 16-byte stack alignment
+// at the call.
+// Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
+asm(
+#ifdef __APPLE__
+ ".globl _PushAllRegistersAndIterateStack \n"
+ ".private_extern _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !__APPLE__
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !__APPLE__
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %rbp \n"
+ " mov %rsp, %rbp \n"
+ // Dummy for alignment.
+ " push $0xCDCDCD \n"
+ " push %rbx \n"
+ " push %r12 \n"
+ " push %r13 \n"
+ " push %r14 \n"
+ " push %r15 \n"
+ // Pass 1st parameter (rdi) unchanged (Stack*).
+ // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
+ // Save 3rd parameter (rdx; IterateStackCallback)
+ " mov %rdx, %r8 \n"
+ // Pass 3rd parameter as rsp (stack pointer).
+ " mov %rsp, %rdx \n"
+ // Call the callback.
+ " call *%r8 \n"
+ // Pop the callee-saved registers.
+ " add $48, %rsp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %rbp \n"
+ " ret \n");
+
+#endif // !_WIN64
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S b/deps/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
index 627843830f..627843830f 100644
--- a/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S
+++ b/deps/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
diff --git a/deps/v8/src/heap/cppgc/free-list.cc b/deps/v8/src/heap/cppgc/free-list.cc
new file mode 100644
index 0000000000..e5e6b70793
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/free-list.cc
@@ -0,0 +1,190 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/free-list.h"
+
+#include <algorithm>
+
+#include "include/cppgc/internal/logging.h"
+#include "src/base/bits.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+uint32_t BucketIndexForSize(uint32_t size) {
+ return v8::base::bits::WhichPowerOfTwo(
+ v8::base::bits::RoundDownToPowerOfTwo32(size));
+}
+} // namespace
+
+class FreeList::Entry : public HeapObjectHeader {
+ public:
+ explicit Entry(size_t size) : HeapObjectHeader(size, kFreeListGCInfoIndex) {
+ static_assert(sizeof(Entry) == kFreeListEntrySize, "Sizes must match");
+ }
+
+ Entry* Next() const { return next_; }
+ void SetNext(Entry* next) { next_ = next; }
+
+ void Link(Entry** previous_next) {
+ next_ = *previous_next;
+ *previous_next = this;
+ }
+ void Unlink(Entry** previous_next) {
+ *previous_next = next_;
+ next_ = nullptr;
+ }
+
+ private:
+ Entry* next_ = nullptr;
+};
+
+FreeList::FreeList() { Clear(); }
+
+FreeList::FreeList(FreeList&& other) V8_NOEXCEPT
+ : free_list_heads_(std::move(other.free_list_heads_)),
+ free_list_tails_(std::move(other.free_list_tails_)),
+ biggest_free_list_index_(std::move(other.biggest_free_list_index_)) {
+ other.Clear();
+}
+
+FreeList& FreeList::operator=(FreeList&& other) V8_NOEXCEPT {
+ Clear();
+ Append(std::move(other));
+ DCHECK(other.IsEmpty());
+ return *this;
+}
+
+void FreeList::Add(FreeList::Block block) {
+ const size_t size = block.size;
+ DCHECK_GT(kPageSize, size);
+ DCHECK_LE(sizeof(HeapObjectHeader), size);
+
+ if (block.size < sizeof(Entry)) {
+ // Create wasted entry. This can happen when an almost emptied linear
+ // allocation buffer is returned to the freelist.
+ new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
+ return;
+ }
+
+ // Make sure the freelist header is writable.
+ SET_MEMORY_ACCESIBLE(block.address, sizeof(Entry));
+ Entry* entry = new (block.address) Entry(size);
+ const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
+ entry->Link(&free_list_heads_[index]);
+ biggest_free_list_index_ = std::max(biggest_free_list_index_, index);
+ if (!entry->Next()) {
+ free_list_tails_[index] = entry;
+ }
+}
+
+void FreeList::Append(FreeList&& other) {
+#if DEBUG
+ const size_t expected_size = Size() + other.Size();
+#endif
+ // Newly created entries get added to the head.
+ for (size_t index = 0; index < free_list_tails_.size(); ++index) {
+ Entry* other_tail = other.free_list_tails_[index];
+ Entry*& this_head = this->free_list_heads_[index];
+ if (other_tail) {
+ other_tail->SetNext(this_head);
+ if (!this_head) {
+ this->free_list_tails_[index] = other_tail;
+ }
+ this_head = other.free_list_heads_[index];
+ other.free_list_heads_[index] = nullptr;
+ other.free_list_tails_[index] = nullptr;
+ }
+ }
+
+ biggest_free_list_index_ =
+ std::max(biggest_free_list_index_, other.biggest_free_list_index_);
+ other.biggest_free_list_index_ = 0;
+#if DEBUG
+ DCHECK_EQ(expected_size, Size());
+#endif
+ DCHECK(other.IsEmpty());
+}
+
+FreeList::Block FreeList::Allocate(size_t allocation_size) {
+ // Try reusing a block from the largest bin. The underlying reasoning
+ // being that we want to amortize this slow allocation call by carving
+ // off as a large a free block as possible in one go; a block that will
+ // service this block and let following allocations be serviced quickly
+ // by bump allocation.
+ // bucket_size represents minimal size of entries in a bucket.
+ size_t bucket_size = static_cast<size_t>(1) << biggest_free_list_index_;
+ size_t index = biggest_free_list_index_;
+ for (; index > 0; --index, bucket_size >>= 1) {
+ DCHECK(IsConsistent(index));
+ Entry* entry = free_list_heads_[index];
+ if (allocation_size > bucket_size) {
+ // Final bucket candidate; check initial entry if it is able
+ // to service this allocation. Do not perform a linear scan,
+ // as it is considered too costly.
+ if (!entry || entry->GetSize() < allocation_size) break;
+ }
+ if (entry) {
+ if (!entry->Next()) {
+ DCHECK_EQ(entry, free_list_tails_[index]);
+ free_list_tails_[index] = nullptr;
+ }
+ entry->Unlink(&free_list_heads_[index]);
+ biggest_free_list_index_ = index;
+ return {entry, entry->GetSize()};
+ }
+ }
+ biggest_free_list_index_ = index;
+ return {nullptr, 0u};
+}
+
+void FreeList::Clear() {
+ std::fill(free_list_heads_.begin(), free_list_heads_.end(), nullptr);
+ std::fill(free_list_tails_.begin(), free_list_tails_.end(), nullptr);
+ biggest_free_list_index_ = 0;
+}
+
+size_t FreeList::Size() const {
+ size_t size = 0;
+ for (auto* entry : free_list_heads_) {
+ while (entry) {
+ size += entry->GetSize();
+ entry = entry->Next();
+ }
+ }
+ return size;
+}
+
+bool FreeList::IsEmpty() const {
+ return std::all_of(free_list_heads_.cbegin(), free_list_heads_.cend(),
+ [](const auto* entry) { return !entry; });
+}
+
+bool FreeList::Contains(Block block) const {
+ for (Entry* list : free_list_heads_) {
+ for (Entry* entry = list; entry; entry = entry->Next()) {
+ if (entry <= block.address &&
+ (reinterpret_cast<Address>(block.address) + block.size <=
+ reinterpret_cast<Address>(entry) + entry->GetSize()))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool FreeList::IsConsistent(size_t index) const {
+ // Check that freelist head and tail pointers are consistent, i.e.
+ // - either both are nulls (no entries in the bucket);
+ // - or both are non-nulls and the tail points to the end.
+ return (!free_list_heads_[index] && !free_list_tails_[index]) ||
+ (free_list_heads_[index] && free_list_tails_[index] &&
+ !free_list_tails_[index]->Next());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/free-list.h b/deps/v8/src/heap/cppgc/free-list.h
new file mode 100644
index 0000000000..ba578f3820
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/free-list.h
@@ -0,0 +1,62 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_FREE_LIST_H_
+#define V8_HEAP_CPPGC_FREE_LIST_H_
+
+#include <array>
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE FreeList {
+ public:
+ struct Block {
+ void* address;
+ size_t size;
+ };
+
+ FreeList();
+
+ FreeList(const FreeList&) = delete;
+ FreeList& operator=(const FreeList&) = delete;
+
+ FreeList(FreeList&& freelist) V8_NOEXCEPT;
+ FreeList& operator=(FreeList&& freelist) V8_NOEXCEPT;
+
+ // Allocates entries which are at least of the provided size.
+ Block Allocate(size_t);
+
+ // Adds block to the freelist. The minimal block size is two words.
+ void Add(Block);
+
+ // Append other freelist into this.
+ void Append(FreeList&&);
+
+ void Clear();
+
+ size_t Size() const;
+ bool IsEmpty() const;
+
+ bool Contains(Block) const;
+
+ private:
+ class Entry;
+
+ bool IsConsistent(size_t) const;
+
+ // All |Entry|s in the nth list have size >= 2^n.
+ std::array<Entry*, kPageSizeLog2> free_list_heads_;
+ std::array<Entry*, kPageSizeLog2> free_list_tails_;
+ size_t biggest_free_list_index_ = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_FREE_LIST_H_
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.cc b/deps/v8/src/heap/cppgc/gc-info-table.cc
index 580ff4d069..dda5f0a7e8 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.cc
+++ b/deps/v8/src/heap/cppgc/gc-info-table.cc
@@ -8,7 +8,7 @@
#include <limits>
#include <memory>
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/platform.h"
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
index c5ccec2a38..25141f5d1c 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.h
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -7,7 +7,7 @@
#include <stdint.h>
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/platform.h"
#include "include/v8config.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index 21492825cc..007eab3a33 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/v8config.h"
#include "src/heap/cppgc/gc-info-table.h"
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 18a7e3189e..734abd508e 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -8,6 +8,8 @@
#include <stddef.h>
#include <stdint.h>
+#include "include/cppgc/internal/gc-info.h"
+
namespace cppgc {
namespace internal {
@@ -31,8 +33,15 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
+// Guard pages are always put into memory. Whether they are actually protected
+// depends on the allocator provided to the garbage collector.
+constexpr size_t kGuardPageSize = 4096;
+
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
+constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
+constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-inl.h b/deps/v8/src/heap/cppgc/heap-inl.h
index 28a4a14139..4fe3186230 100644
--- a/deps/v8/src/heap/cppgc/heap-inl.h
+++ b/deps/v8/src/heap/cppgc/heap-inl.h
@@ -2,32 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/cppgc/heap.h"
-
-#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap-object-header-inl.h"
-
#ifndef V8_HEAP_CPPGC_HEAP_INL_H_
#define V8_HEAP_CPPGC_HEAP_INL_H_
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
+
namespace cppgc {
namespace internal {
void* Heap::Allocate(size_t size, GCInfoIndex index) {
- // TODO(chromium:1056170): This is merely a dummy implementation and will be
- // replaced with proper allocation code throughout the migration.
- size_t allocation_size = size + sizeof(HeapObjectHeader);
- // The allocation size calculation can overflow for large sizes.
- CHECK_GT(allocation_size, size);
- // calloc() provides stricter alignment guarantees than the GC. Allocate
- // a multiple of kAllocationGranularity to follow restrictions of
- // HeapObjectHeader.
- allocation_size = (allocation_size + kAllocationMask) & ~kAllocationMask;
- void* memory = calloc(1, allocation_size);
- HeapObjectHeader* header =
- new (memory) HeapObjectHeader(allocation_size, index);
- objects_.push_back(header);
- return header->Payload();
+ DCHECK(is_allocation_allowed());
+ void* result = object_allocator_.AllocateObject(size, index);
+ objects_.push_back(&HeapObjectHeader::FromPayload(result));
+ return result;
+}
+
+void* Heap::Allocate(size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_index) {
+ DCHECK(is_allocation_allowed());
+ void* result = object_allocator_.AllocateObject(size, index, space_index);
+ objects_.push_back(&HeapObjectHeader::FromPayload(result));
+ return result;
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-object-header-inl.h b/deps/v8/src/heap/cppgc/heap-object-header-inl.h
index a0bcda464b..cba7b24a4c 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header-inl.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header-inl.h
@@ -6,11 +6,12 @@
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
#include "include/cppgc/allocation.h"
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
@@ -33,7 +34,7 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
USE(padding_);
#endif // defined(V8_TARGET_ARCH_64_BIT)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
- DCHECK_EQ(0u, size & kAllocationMask);
+ DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
encoded_high_ = GCInfoIndexField::encode(gc_info_index);
encoded_low_ = EncodeSize(size);
@@ -111,6 +112,16 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
+template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsFree() const {
+ return GetGCInfoIndex() == kFreeListGCInfoIndex;
+}
+
+bool HeapObjectHeader::IsFinalizable() const {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ return gc_info.finalize;
+}
+
template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.cc b/deps/v8/src/heap/cppgc/heap-object-header.cc
index bd90d5930c..ccc660fcee 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.cc
+++ b/deps/v8/src/heap/cppgc/heap-object-header.cc
@@ -4,7 +4,7 @@
#include "src/heap/cppgc/heap-object-header.h"
-#include "include/cppgc/internals.h"
+#include "include/cppgc/internal/api-constants.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
@@ -12,6 +12,8 @@
namespace cppgc {
namespace internal {
+STATIC_ASSERT((kAllocationGranularity % sizeof(HeapObjectHeader)) == 0);
+
void HeapObjectHeader::CheckApiConstants() {
STATIC_ASSERT(api_constants::kFullyConstructedBitMask ==
FullyConstructedField::kMask);
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index 738f9d9ab9..b517617dd1 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -6,9 +6,10 @@
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
#include <stdint.h>
+
#include <atomic>
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "src/base/bit-field.h"
#include "src/heap/cppgc/globals.h"
@@ -41,12 +42,13 @@ namespace internal {
// stored in |LargeObjectPage::PayloadSize()|.
// - |mark bit| and |in construction| bits are located in separate 16-bit halves
// to allow potentially accessing them non-atomically.
-class HeapObjectHeader final {
+class HeapObjectHeader {
public:
enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
+ static constexpr uint16_t kLargeObjectSizeInHeader = 0;
inline static HeapObjectHeader& FromPayload(void* address);
inline static const HeapObjectHeader& FromPayload(const void* address);
@@ -77,13 +79,15 @@ class HeapObjectHeader final {
void Unmark();
inline bool TryMarkAtomic();
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsFree() const;
+
+ inline bool IsFinalizable() const;
void Finalize();
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
- static constexpr uint16_t kLargeObjectSizeInHeader = 0;
-
// Used in |encoded_high_|.
using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
using UnusedField1 = FullyConstructedField::Next<bool, 1>;
@@ -102,7 +106,7 @@ class HeapObjectHeader final {
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
using SizeField = UnusedField2::Next<size_t, 14>;
- return SizeField::encode(size) / kAllocationGranularity;
+ return SizeField::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
new file mode 100644
index 0000000000..e8afbafbd2
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -0,0 +1,201 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-page.h"
+
+#include <algorithm>
+
+#include "include/cppgc/internal/api-constants.h"
+#include "src/base/logging.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+Address AlignAddress(Address address, size_t alignment) {
+ return reinterpret_cast<Address>(
+ RoundUp(reinterpret_cast<uintptr_t>(address), alignment));
+}
+
+} // namespace
+
+STATIC_ASSERT(kPageSize == api_constants::kPageAlignment);
+
+// static
+BasePage* BasePage::FromPayload(void* payload) {
+ return reinterpret_cast<BasePage*>(
+ (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+}
+
+// static
+const BasePage* BasePage::FromPayload(const void* payload) {
+ return reinterpret_cast<const BasePage*>(
+ (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
+ kPageBaseMask) +
+ kGuardPageSize);
+}
+
+HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(void* address) {
+ return const_cast<HeapObjectHeader*>(
+ ObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
+}
+
+const HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(
+ const void* address) {
+ if (is_large()) {
+ return LargePage::From(this)->ObjectHeader();
+ }
+ ObjectStartBitmap& bitmap = NormalPage::From(this)->object_start_bitmap();
+ HeapObjectHeader* header =
+ bitmap.FindHeader(static_cast<ConstAddress>(address));
+ DCHECK_LT(address,
+ reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ DCHECK_NE(kFreeListGCInfoIndex,
+ header->GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+ return header;
+}
+
+BasePage::BasePage(Heap* heap, BaseSpace* space, PageType type)
+ : heap_(heap), space_(space), type_(type) {
+ DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
+ kPageOffsetMask);
+ DCHECK_EQ(reinterpret_cast<void*>(&heap_),
+ FromPayload(this) + api_constants::kHeapOffset);
+ DCHECK_EQ(&heap_->raw_heap(), space_->raw_heap());
+}
+
+// static
+NormalPage* NormalPage::Create(NormalPageSpace* space) {
+ DCHECK(space);
+ Heap* heap = space->raw_heap()->heap();
+ DCHECK(heap);
+ void* memory = heap->page_backend()->AllocateNormalPageMemory(space->index());
+ auto* normal_page = new (memory) NormalPage(heap, space);
+ space->AddPage(normal_page);
+ space->AddToFreeList(normal_page->PayloadStart(), normal_page->PayloadSize());
+ return normal_page;
+}
+
+// static
+void NormalPage::Destroy(NormalPage* page) {
+ DCHECK(page);
+ BaseSpace* space = page->space();
+ DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ page->~NormalPage();
+ PageBackend* backend = page->heap()->page_backend();
+ backend->FreeNormalPageMemory(space->index(),
+ reinterpret_cast<Address>(page));
+}
+
+NormalPage::NormalPage(Heap* heap, BaseSpace* space)
+ : BasePage(heap, space, PageType::kNormal),
+ object_start_bitmap_(PayloadStart()) {
+ DCHECK_LT(kLargeObjectSizeThreshold,
+ static_cast<size_t>(PayloadEnd() - PayloadStart()));
+}
+
+NormalPage::~NormalPage() = default;
+
+NormalPage::iterator NormalPage::begin() {
+ const auto& lab = NormalPageSpace::From(space())->linear_allocation_buffer();
+ return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadStart()),
+ lab.start(), lab.size());
+}
+
+NormalPage::const_iterator NormalPage::begin() const {
+ const auto& lab = NormalPageSpace::From(space())->linear_allocation_buffer();
+ return const_iterator(
+ reinterpret_cast<const HeapObjectHeader*>(PayloadStart()), lab.start(),
+ lab.size());
+}
+
+Address NormalPage::PayloadStart() {
+ return AlignAddress((reinterpret_cast<Address>(this + 1)),
+ kAllocationGranularity);
+}
+
+ConstAddress NormalPage::PayloadStart() const {
+ return const_cast<NormalPage*>(this)->PayloadStart();
+}
+
+Address NormalPage::PayloadEnd() { return PayloadStart() + PayloadSize(); }
+
+ConstAddress NormalPage::PayloadEnd() const {
+ return const_cast<NormalPage*>(this)->PayloadEnd();
+}
+
+// static
+size_t NormalPage::PayloadSize() {
+ const size_t header_size =
+ RoundUp(sizeof(NormalPage), kAllocationGranularity);
+ return kPageSize - 2 * kGuardPageSize - header_size;
+}
+
+LargePage::LargePage(Heap* heap, BaseSpace* space, size_t size)
+ : BasePage(heap, space, PageType::kLarge), payload_size_(size) {}
+
+LargePage::~LargePage() = default;
+
+// static
+LargePage* LargePage::Create(LargePageSpace* space, size_t size) {
+ DCHECK(space);
+ DCHECK_LE(kLargeObjectSizeThreshold, size);
+ const size_t page_header_size =
+ RoundUp(sizeof(LargePage), kAllocationGranularity);
+ const size_t allocation_size = page_header_size + size;
+
+ Heap* heap = space->raw_heap()->heap();
+ void* memory = heap->page_backend()->AllocateLargePageMemory(allocation_size);
+ LargePage* page = new (memory) LargePage(heap, space, size);
+ space->AddPage(page);
+ return page;
+}
+
+// static
+void LargePage::Destroy(LargePage* page) {
+ DCHECK(page);
+#if DEBUG
+ BaseSpace* space = page->space();
+ DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
+#endif
+ page->~LargePage();
+ PageBackend* backend = page->heap()->page_backend();
+ backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
+}
+
+HeapObjectHeader* LargePage::ObjectHeader() {
+ return reinterpret_cast<HeapObjectHeader*>(PayloadStart());
+}
+
+const HeapObjectHeader* LargePage::ObjectHeader() const {
+ return reinterpret_cast<const HeapObjectHeader*>(PayloadStart());
+}
+
+Address LargePage::PayloadStart() {
+ return AlignAddress((reinterpret_cast<Address>(this + 1)),
+ kAllocationGranularity);
+}
+
+ConstAddress LargePage::PayloadStart() const {
+ return const_cast<LargePage*>(this)->PayloadStart();
+}
+
+Address LargePage::PayloadEnd() { return PayloadStart() + PayloadSize(); }
+
+ConstAddress LargePage::PayloadEnd() const {
+ return const_cast<LargePage*>(this)->PayloadEnd();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
new file mode 100644
index 0000000000..c676bc4bde
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -0,0 +1,181 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_PAGE_H_
+#define V8_HEAP_CPPGC_HEAP_PAGE_H_
+
+#include "src/base/iterator.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+
+namespace cppgc {
+namespace internal {
+
+class BaseSpace;
+class NormalPageSpace;
+class LargePageSpace;
+class Heap;
+class PageBackend;
+
+class V8_EXPORT_PRIVATE BasePage {
+ public:
+ static BasePage* FromPayload(void*);
+ static const BasePage* FromPayload(const void*);
+
+ BasePage(const BasePage&) = delete;
+ BasePage& operator=(const BasePage&) = delete;
+
+ Heap* heap() { return heap_; }
+ const Heap* heap() const { return heap_; }
+
+ BaseSpace* space() { return space_; }
+ const BaseSpace* space() const { return space_; }
+ void set_space(BaseSpace* space) { space_ = space; }
+
+ bool is_large() const { return type_ == PageType::kLarge; }
+
+ // |address| must refer to real object.
+ HeapObjectHeader* ObjectHeaderFromInnerAddress(void* address);
+ const HeapObjectHeader* ObjectHeaderFromInnerAddress(const void* address);
+
+ protected:
+ enum class PageType { kNormal, kLarge };
+ BasePage(Heap*, BaseSpace*, PageType);
+
+ private:
+ Heap* heap_;
+ BaseSpace* space_;
+ PageType type_;
+};
+
+class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
+ template <typename T>
+ class IteratorImpl : v8::base::iterator<std::forward_iterator_tag, T> {
+ public:
+ explicit IteratorImpl(T* p, ConstAddress lab_start = nullptr,
+ size_t lab_size = 0)
+ : p_(p), lab_start_(lab_start), lab_size_(lab_size) {
+ DCHECK(p);
+ DCHECK_EQ(0, (lab_size & (sizeof(T) - 1)));
+ if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
+ p_ += (lab_size_ / sizeof(T));
+ }
+ }
+
+ T& operator*() { return *p_; }
+ const T& operator*() const { return *p_; }
+
+ bool operator==(IteratorImpl other) const { return p_ == other.p_; }
+ bool operator!=(IteratorImpl other) const { return !(*this == other); }
+
+ IteratorImpl& operator++() {
+ const size_t size = p_->GetSize();
+ DCHECK_EQ(0, (size & (sizeof(T) - 1)));
+ p_ += (size / sizeof(T));
+ if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
+ p_ += (lab_size_ / sizeof(T));
+ }
+ return *this;
+ }
+ IteratorImpl operator++(int) {
+ IteratorImpl temp(*this);
+ ++(*this);
+ return temp;
+ }
+
+ T* base() const { return p_; }
+
+ private:
+ T* p_;
+ ConstAddress lab_start_;
+ size_t lab_size_;
+ };
+
+ public:
+ using iterator = IteratorImpl<HeapObjectHeader>;
+ using const_iterator = IteratorImpl<const HeapObjectHeader>;
+
+ // Allocates a new page.
+ static NormalPage* Create(NormalPageSpace*);
+ // Destroys and frees the page. The page must be detached from the
+ // corresponding space (i.e. be swept when called).
+ static void Destroy(NormalPage*);
+
+ static NormalPage* From(BasePage* page) {
+ DCHECK(!page->is_large());
+ return static_cast<NormalPage*>(page);
+ }
+ static const NormalPage* From(const BasePage* page) {
+ return From(const_cast<BasePage*>(page));
+ }
+
+ iterator begin();
+ const_iterator begin() const;
+
+ iterator end() {
+ return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadEnd()));
+ }
+ const_iterator end() const {
+ return const_iterator(
+ reinterpret_cast<const HeapObjectHeader*>(PayloadEnd()));
+ }
+
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
+ static size_t PayloadSize();
+
+ ObjectStartBitmap& object_start_bitmap() { return object_start_bitmap_; }
+ const ObjectStartBitmap& object_start_bitmap() const {
+ return object_start_bitmap_;
+ }
+
+ private:
+ NormalPage(Heap* heap, BaseSpace* space);
+ ~NormalPage();
+
+ ObjectStartBitmap object_start_bitmap_;
+};
+
+class V8_EXPORT_PRIVATE LargePage final : public BasePage {
+ public:
+ // Allocates a new page.
+ static LargePage* Create(LargePageSpace*, size_t);
+ // Destroys and frees the page. The page must be detached from the
+ // corresponding space (i.e. be swept when called).
+ static void Destroy(LargePage*);
+
+ static LargePage* From(BasePage* page) {
+ DCHECK(page->is_large());
+ return static_cast<LargePage*>(page);
+ }
+ static const LargePage* From(const BasePage* page) {
+ return From(const_cast<BasePage*>(page));
+ }
+
+ HeapObjectHeader* ObjectHeader();
+ const HeapObjectHeader* ObjectHeader() const;
+
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
+ size_t PayloadSize() const { return payload_size_; }
+
+ private:
+ LargePage(Heap* heap, BaseSpace* space, size_t);
+ ~LargePage();
+
+ size_t payload_size_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_PAGE_H_
diff --git a/deps/v8/src/heap/cppgc/heap-space.cc b/deps/v8/src/heap/cppgc/heap-space.cc
new file mode 100644
index 0000000000..70ddb93531
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-space.cc
@@ -0,0 +1,58 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-space.h"
+
+#include <algorithm>
+
+#include "src/base/logging.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+
+namespace cppgc {
+namespace internal {
+
+BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
+ : heap_(heap), index_(index), type_(type) {}
+
+void BaseSpace::AddPage(BasePage* page) {
+ DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
+ pages_.push_back(page);
+}
+
+void BaseSpace::RemovePage(BasePage* page) {
+ auto it = std::find(pages_.cbegin(), pages_.cend(), page);
+ DCHECK_NE(pages_.cend(), it);
+ pages_.erase(it);
+}
+
+BaseSpace::Pages BaseSpace::RemoveAllPages() {
+ Pages pages = std::move(pages_);
+ pages_.clear();
+ return pages;
+}
+
+NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
+ : BaseSpace(heap, index, PageType::kNormal) {}
+
+void NormalPageSpace::AddToFreeList(void* address, size_t size) {
+ free_list_.Add({address, size});
+ NormalPage::From(BasePage::FromPayload(address))
+ ->object_start_bitmap()
+ .SetBit(static_cast<Address>(address));
+}
+
+void NormalPageSpace::ResetLinearAllocationBuffer() {
+ if (current_lab_.size()) {
+ DCHECK_NOT_NULL(current_lab_.start());
+ AddToFreeList(current_lab_.start(), current_lab_.size());
+ current_lab_.Set(nullptr, 0);
+ }
+}
+
+LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
+ : BaseSpace(heap, index, PageType::kLarge) {}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
new file mode 100644
index 0000000000..d84207c2cd
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -0,0 +1,127 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_SPACE_H_
+#define V8_HEAP_CPPGC_HEAP_SPACE_H_
+
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/free-list.h"
+
+namespace cppgc {
+namespace internal {
+
+class RawHeap;
+class BasePage;
+
+// BaseSpace is responsible for page management.
+class V8_EXPORT_PRIVATE BaseSpace {
+ public:
+ using Pages = std::vector<BasePage*>;
+
+ using iterator = Pages::iterator;
+ using const_iterator = Pages::const_iterator;
+
+ BaseSpace(const BaseSpace&) = delete;
+ BaseSpace& operator=(const BaseSpace&) = delete;
+
+ iterator begin() { return pages_.begin(); }
+ const_iterator begin() const { return pages_.begin(); }
+ iterator end() { return pages_.end(); }
+ const_iterator end() const { return pages_.end(); }
+
+ size_t size() const { return pages_.size(); }
+
+ bool is_large() const { return type_ == PageType::kLarge; }
+ size_t index() const { return index_; }
+
+ RawHeap* raw_heap() { return heap_; }
+ const RawHeap* raw_heap() const { return heap_; }
+
+ // Page manipulation functions.
+ void AddPage(BasePage*);
+ void RemovePage(BasePage*);
+ Pages RemoveAllPages();
+
+ protected:
+ enum class PageType { kNormal, kLarge };
+ explicit BaseSpace(RawHeap* heap, size_t index, PageType type);
+
+ private:
+ RawHeap* heap_;
+ Pages pages_;
+ const size_t index_;
+ const PageType type_;
+};
+
+class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
+ public:
+ class LinearAllocationBuffer {
+ public:
+ Address Allocate(size_t alloc_size) {
+ DCHECK_GE(size_, alloc_size);
+ Address result = start_;
+ start_ += alloc_size;
+ size_ -= alloc_size;
+ return result;
+ }
+
+ void Set(Address ptr, size_t size) {
+ start_ = ptr;
+ size_ = size;
+ }
+
+ Address start() const { return start_; }
+ size_t size() const { return size_; }
+
+ private:
+ Address start_ = nullptr;
+ size_t size_ = 0;
+ };
+
+ static NormalPageSpace* From(BaseSpace* space) {
+ DCHECK(!space->is_large());
+ return static_cast<NormalPageSpace*>(space);
+ }
+ static const NormalPageSpace* From(const BaseSpace* space) {
+ return From(const_cast<BaseSpace*>(space));
+ }
+
+ NormalPageSpace(RawHeap* heap, size_t index);
+
+ void AddToFreeList(void*, size_t);
+ void ResetLinearAllocationBuffer();
+
+ LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
+ const LinearAllocationBuffer& linear_allocation_buffer() const {
+ return current_lab_;
+ }
+
+ FreeList& free_list() { return free_list_; }
+ const FreeList& free_list() const { return free_list_; }
+
+ private:
+ LinearAllocationBuffer current_lab_;
+ FreeList free_list_;
+};
+
+class V8_EXPORT_PRIVATE LargePageSpace final : public BaseSpace {
+ public:
+ static LargePageSpace* From(BaseSpace* space) {
+ DCHECK(space->is_large());
+ return static_cast<LargePageSpace*>(space);
+ }
+ static const LargePageSpace* From(const BaseSpace* space) {
+ return From(const_cast<BaseSpace*>(space));
+ }
+
+ LargePageSpace(RawHeap* heap, size_t index);
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_SPACE_H_
diff --git a/deps/v8/src/heap/cppgc/heap-visitor.h b/deps/v8/src/heap/cppgc/heap-visitor.h
new file mode 100644
index 0000000000..7fcbc1b980
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-visitor.h
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_VISITOR_H_
+#define V8_HEAP_CPPGC_HEAP_VISITOR_H_
+
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+// Visitor for heap, which also implements the accept (traverse) interface.
+// Implements preorder traversal of the heap. The order of traversal is defined.
+// Implemented as a CRTP visitor to avoid virtual calls and support better
+// inlining.
+template <typename Derived>
+class HeapVisitor {
+ public:
+ void Traverse(RawHeap* heap) {
+ if (VisitHeapImpl(heap)) return;
+ for (auto& space : *heap) {
+ Traverse(space.get());
+ }
+ }
+
+ void Traverse(BaseSpace* space) {
+ const bool is_stopped =
+ space->is_large()
+ ? VisitLargePageSpaceImpl(LargePageSpace::From(space))
+ : VisitNormalPageSpaceImpl(NormalPageSpace::From(space));
+ if (is_stopped) return;
+ for (auto* page : *space) {
+ Traverse(page);
+ }
+ }
+
+ void Traverse(BasePage* page) {
+ if (page->is_large()) {
+ auto* large_page = LargePage::From(page);
+ if (VisitLargePageImpl(large_page)) return;
+ VisitHeapObjectHeaderImpl(large_page->ObjectHeader());
+ } else {
+ auto* normal_page = NormalPage::From(page);
+ if (VisitNormalPageImpl(normal_page)) return;
+ for (auto& header : *normal_page) {
+ VisitHeapObjectHeaderImpl(&header);
+ }
+ }
+ }
+
+ protected:
+ // Visitor functions return true if no deeper processing is required.
+ // Users are supposed to override functions that need special treatment.
+ bool VisitHeap(RawHeap*) { return false; }
+ bool VisitNormalPageSpace(NormalPageSpace*) { return false; }
+ bool VisitLargePageSpace(LargePageSpace*) { return false; }
+ bool VisitNormalPage(NormalPage*) { return false; }
+ bool VisitLargePage(LargePage*) { return false; }
+ bool VisitHeapObjectHeader(HeapObjectHeader*) { return false; }
+
+ private:
+ Derived& ToDerived() { return static_cast<Derived&>(*this); }
+
+ bool VisitHeapImpl(RawHeap* heap) { return ToDerived().VisitHeap(heap); }
+ bool VisitNormalPageSpaceImpl(NormalPageSpace* space) {
+ return ToDerived().VisitNormalPageSpace(space);
+ }
+ bool VisitLargePageSpaceImpl(LargePageSpace* space) {
+ return ToDerived().VisitLargePageSpace(space);
+ }
+ bool VisitNormalPageImpl(NormalPage* page) {
+ return ToDerived().VisitNormalPage(page);
+ }
+ bool VisitLargePageImpl(LargePage* page) {
+ return ToDerived().VisitLargePage(page);
+ }
+ bool VisitHeapObjectHeaderImpl(HeapObjectHeader* hoh) {
+ return ToDerived().VisitHeapObjectHeader(hoh);
+ }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_VISITOR_H_
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index e60cb15573..ee400cee28 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -6,23 +6,131 @@
#include <memory>
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/stack.h"
+#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
-std::unique_ptr<Heap> Heap::Create() {
- return std::make_unique<internal::Heap>();
+namespace {
+
+void VerifyCustomSpaces(
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces) {
+ // Ensures that user-provided custom spaces have indices that form a sequence
+ // starting at 0.
+#ifdef DEBUG
+ for (size_t i = 0; i < custom_spaces.size(); ++i) {
+ DCHECK_EQ(i, custom_spaces[i]->GetCustomSpaceIndex().value);
+ }
+#endif // DEBUG
+}
+
+} // namespace
+
+std::unique_ptr<Heap> Heap::Create(cppgc::Heap::HeapOptions options) {
+ VerifyCustomSpaces(options.custom_spaces);
+ return std::make_unique<internal::Heap>(options.custom_spaces.size());
+}
+
+void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
+ Heap::StackState stack_state) {
+ internal::Heap::From(this)->CollectGarbage({stack_state});
}
namespace internal {
-void Heap::CollectGarbage() {
- for (HeapObjectHeader* header : objects_) {
- header->Finalize();
- free(header);
+namespace {
+
+class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
+ friend class HeapVisitor<ObjectSizeCounter>;
+
+ public:
+ size_t GetSize(RawHeap* heap) {
+ Traverse(heap);
+ return accumulated_size_;
+ }
+
+ private:
+ static size_t ObjectSize(const HeapObjectHeader* header) {
+ const size_t size =
+ header->IsLargeObject()
+ ? static_cast<const LargePage*>(BasePage::FromPayload(header))
+ ->PayloadSize()
+ : header->GetSize();
+ DCHECK_GE(size, sizeof(HeapObjectHeader));
+ return size - sizeof(HeapObjectHeader);
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ accumulated_size_ += ObjectSize(header);
+ return true;
}
- objects_.clear();
+
+ size_t accumulated_size_ = 0;
+};
+
+} // namespace
+
+// static
+cppgc::LivenessBroker LivenessBrokerFactory::Create() {
+ return cppgc::LivenessBroker();
+}
+
+Heap::Heap(size_t custom_spaces)
+ : raw_heap_(this, custom_spaces),
+ page_backend_(std::make_unique<PageBackend>(&system_allocator_)),
+ object_allocator_(&raw_heap_),
+ sweeper_(&raw_heap_),
+ stack_(std::make_unique<Stack>(v8::base::Stack::GetStackStart())),
+ prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()) {}
+
+Heap::~Heap() {
+ NoGCScope no_gc(this);
+ // Finish already running GC if any, but don't finalize live objects.
+ sweeper_.Finish();
+}
+
+void Heap::CollectGarbage(GCConfig config) {
+ if (in_no_gc_scope()) return;
+
+ epoch_++;
+
+ // TODO(chromium:1056170): Replace with proper mark-sweep algorithm.
+ // "Marking".
+ marker_ = std::make_unique<Marker>(this);
+ marker_->StartMarking(Marker::MarkingConfig(config.stack_state));
+ marker_->FinishMarking();
+ // "Sweeping and finalization".
+ {
+ // Pre finalizers are forbidden from allocating objects
+ NoAllocationScope no_allocation_scope_(this);
+ marker_->ProcessWeakness();
+ prefinalizer_handler_->InvokePreFinalizers();
+ }
+ marker_.reset();
+ {
+ NoGCScope no_gc(this);
+ sweeper_.Start(Sweeper::Config::kAtomic);
+ }
+}
+
+size_t Heap::ObjectPayloadSize() const {
+ return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
+}
+
+Heap::NoGCScope::NoGCScope(Heap* heap) : heap_(heap) { heap_->no_gc_scope_++; }
+
+Heap::NoGCScope::~NoGCScope() { heap_->no_gc_scope_--; }
+
+Heap::NoAllocationScope::NoAllocationScope(Heap* heap) : heap_(heap) {
+ heap_->no_allocation_scope_++;
}
+Heap::NoAllocationScope::~NoAllocationScope() { heap_->no_allocation_scope_--; }
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index baf70d8f4e..fa19b74be5 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -5,28 +5,143 @@
#ifndef V8_HEAP_CPPGC_HEAP_H_
#define V8_HEAP_CPPGC_HEAP_H_
+#include <memory>
#include <vector>
-#include "include/cppgc/gc-info.h"
#include "include/cppgc/heap.h"
+#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/liveness-broker.h"
+#include "src/base/page-allocator.h"
#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
namespace internal {
+class Stack;
+
+class V8_EXPORT_PRIVATE LivenessBrokerFactory {
+ public:
+ static LivenessBroker Create();
+};
+
class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
public:
+ // NoGCScope allows going over limits and avoids triggering garbage
+ // collection triggered through allocations or even explicitly.
+ class V8_EXPORT_PRIVATE NoGCScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoGCScope(Heap* heap);
+ ~NoGCScope();
+
+ NoGCScope(const NoGCScope&) = delete;
+ NoGCScope& operator=(const NoGCScope&) = delete;
+
+ private:
+ Heap* const heap_;
+ };
+
+ // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
+ // allocations during GC.
+ class V8_EXPORT_PRIVATE NoAllocationScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoAllocationScope(Heap* heap);
+ ~NoAllocationScope();
+
+ NoAllocationScope(const NoAllocationScope&) = delete;
+ NoAllocationScope& operator=(const NoAllocationScope&) = delete;
+
+ private:
+ Heap* const heap_;
+ };
+
+ struct GCConfig {
+ using StackState = Heap::StackState;
+
+ static GCConfig Default() { return {StackState::kMayContainHeapPointers}; }
+
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ };
+
static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
- Heap() = default;
- ~Heap() final = default;
+ explicit Heap(size_t custom_spaces);
+ ~Heap() final;
inline void* Allocate(size_t size, GCInfoIndex index);
+ inline void* Allocate(size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_index);
- void CollectGarbage();
+ void CollectGarbage(GCConfig config = GCConfig::Default());
+
+ PreFinalizerHandler* prefinalizer_handler() {
+ return prefinalizer_handler_.get();
+ }
+
+ PersistentRegion& GetStrongPersistentRegion() {
+ return strong_persistent_region_;
+ }
+ const PersistentRegion& GetStrongPersistentRegion() const {
+ return strong_persistent_region_;
+ }
+ PersistentRegion& GetWeakPersistentRegion() {
+ return weak_persistent_region_;
+ }
+ const PersistentRegion& GetWeakPersistentRegion() const {
+ return weak_persistent_region_;
+ }
+
+ RawHeap& raw_heap() { return raw_heap_; }
+ const RawHeap& raw_heap() const { return raw_heap_; }
+
+ Stack* stack() { return stack_.get(); }
+
+ PageBackend* page_backend() { return page_backend_.get(); }
+ const PageBackend* page_backend() const { return page_backend_.get(); }
+
+ Sweeper& sweeper() { return sweeper_; }
+
+ size_t epoch() const { return epoch_; }
+
+ size_t ObjectPayloadSize() const;
+
+ // Temporary getter until proper visitation of on-stack objects is
+ // implemented.
+ std::vector<HeapObjectHeader*>& objects() { return objects_; }
private:
+ bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
+ bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
+
+ RawHeap raw_heap_;
+
+ v8::base::PageAllocator system_allocator_;
+ std::unique_ptr<PageBackend> page_backend_;
+ ObjectAllocator object_allocator_;
+ Sweeper sweeper_;
+
+ std::unique_ptr<Stack> stack_;
+ std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
+ std::unique_ptr<Marker> marker_;
std::vector<HeapObjectHeader*> objects_;
+
+ PersistentRegion strong_persistent_region_;
+ PersistentRegion weak_persistent_region_;
+
+ size_t epoch_ = 0;
+
+ size_t no_gc_scope_ = 0;
+ size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/liveness-broker.cc b/deps/v8/src/heap/cppgc/liveness-broker.cc
new file mode 100644
index 0000000000..bb912eb329
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/liveness-broker.cc
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/liveness-broker.h"
+
+#include "src/heap/cppgc/heap-object-header-inl.h"
+
+namespace cppgc {
+
+bool LivenessBroker::IsHeapObjectAliveImpl(const void* payload) const {
+ return internal::HeapObjectHeader::FromPayload(payload).IsMarked();
+}
+
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/logging.cc b/deps/v8/src/heap/cppgc/logging.cc
new file mode 100644
index 0000000000..e98ca28dfb
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/logging.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/logging.h"
+#include "include/cppgc/source-location.h"
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+void DCheckImpl(const char* message, const SourceLocation& loc) {
+ V8_Dcheck(loc.FileName(), static_cast<int>(loc.Line()), message);
+}
+
+void FatalImpl(const char* message, const SourceLocation& loc) {
+#if DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()), "Check failed: %s.",
+ message);
+#elif !defined(OFFICIAL_BUILD)
+ V8_Fatal("Check failed: %s.", message);
+#else
+ V8_Fatal("ignored");
+#endif
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
new file mode 100644
index 0000000000..5a30c89f0d
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -0,0 +1,152 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marker.h"
+
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+template <typename Worklist, typename Callback>
+bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
+ Callback callback, int task_id) {
+ const size_t kDeadlineCheckInterval = 1250;
+
+ size_t processed_callback_count = 0;
+ typename Worklist::View view(worklist, task_id);
+ typename Worklist::EntryType item;
+ while (view.Pop(&item)) {
+ callback(item);
+ if (++processed_callback_count == kDeadlineCheckInterval) {
+ if (deadline <= v8::base::TimeTicks::Now()) {
+ return false;
+ }
+ processed_callback_count = 0;
+ }
+ }
+ return true;
+}
+} // namespace
+
+constexpr int Marker::kMutatorThreadId;
+
+Marker::Marker(Heap* heap)
+ : heap_(heap), marking_visitor_(CreateMutatorThreadMarkingVisitor()) {}
+
+Marker::~Marker() {
+ // The fixed point iteration may have found not-fully-constructed objects.
+ // Such objects should have already been found through the stack scan though
+ // and should thus already be marked.
+ if (!not_fully_constructed_worklist_.IsEmpty()) {
+#if DEBUG
+ DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state_);
+ NotFullyConstructedItem item;
+ NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
+ kMutatorThreadId);
+ while (view.Pop(&item)) {
+ // TODO(chromium:1056170): uncomment following check after implementing
+ // FromInnerAddress.
+ //
+ // HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
+ // reinterpret_cast<Address>(const_cast<void*>(item)));
+ // DCHECK(header->IsMarked())
+ }
+#else
+ not_fully_constructed_worklist_.Clear();
+#endif
+ }
+}
+
+void Marker::StartMarking(MarkingConfig config) {
+ config_ = config;
+ VisitRoots();
+}
+
+void Marker::FinishMarking() {
+ if (config_.stack_state_ == MarkingConfig::StackState::kNoHeapPointers) {
+ FlushNotFullyConstructedObjects();
+ }
+ AdvanceMarkingWithDeadline(v8::base::TimeDelta::Max());
+}
+
+void Marker::ProcessWeakness() {
+ heap_->GetWeakPersistentRegion().Trace(marking_visitor_.get());
+
+ // Call weak callbacks on objects that may now be pointing to dead objects.
+ WeakCallbackItem item;
+ LivenessBroker broker = LivenessBrokerFactory::Create();
+ WeakCallbackWorklist::View view(&weak_callback_worklist_, kMutatorThreadId);
+ while (view.Pop(&item)) {
+ item.callback(broker, item.parameter);
+ }
+ // Weak callbacks should not add any new objects for marking.
+ DCHECK(marking_worklist_.IsEmpty());
+}
+
+void Marker::VisitRoots() {
+ heap_->GetStrongPersistentRegion().Trace(marking_visitor_.get());
+ if (config_.stack_state_ != MarkingConfig::StackState::kNoHeapPointers)
+ heap_->stack()->IteratePointers(marking_visitor_.get());
+}
+
+std::unique_ptr<MutatorThreadMarkingVisitor>
+Marker::CreateMutatorThreadMarkingVisitor() {
+ return std::make_unique<MutatorThreadMarkingVisitor>(this);
+}
+
+bool Marker::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
+ MutatorThreadMarkingVisitor* visitor = marking_visitor_.get();
+ v8::base::TimeTicks deadline = v8::base::TimeTicks::Now() + duration;
+
+ do {
+ // Convert |previously_not_fully_constructed_worklist_| to
+ // |marking_worklist_|. This merely re-adds items with the proper
+ // callbacks.
+ if (!DrainWorklistWithDeadline(
+ deadline, &previously_not_fully_constructed_worklist_,
+ [visitor](NotFullyConstructedItem& item) {
+ visitor->DynamicallyMarkAddress(
+ reinterpret_cast<ConstAddress>(item));
+ },
+ kMutatorThreadId))
+ return false;
+
+ if (!DrainWorklistWithDeadline(
+ deadline, &marking_worklist_,
+ [visitor](const MarkingItem& item) {
+ const HeapObjectHeader& header =
+ HeapObjectHeader::FromPayload(item.base_object_payload);
+ DCHECK(!MutatorThreadMarkingVisitor::IsInConstruction(header));
+ item.callback(visitor, item.base_object_payload);
+ visitor->AccountMarkedBytes(header);
+ },
+ kMutatorThreadId))
+ return false;
+ } while (!marking_worklist_.IsLocalViewEmpty(kMutatorThreadId));
+
+ return true;
+}
+
+void Marker::FlushNotFullyConstructedObjects() {
+ if (!not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId)) {
+ not_fully_constructed_worklist_.FlushToGlobal(kMutatorThreadId);
+ previously_not_fully_constructed_worklist_.MergeGlobalPool(
+ &not_fully_constructed_worklist_);
+ }
+ DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId));
+}
+
+void Marker::ClearAllWorklistsForTesting() {
+ marking_worklist_.Clear();
+ not_fully_constructed_worklist_.Clear();
+ previously_not_fully_constructed_worklist_.Clear();
+ weak_callback_worklist_.Clear();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
new file mode 100644
index 0000000000..c18c23df2c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -0,0 +1,121 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_MARKER_H_
+#define V8_HEAP_CPPGC_MARKER_H_
+
+#include <memory>
+
+#include "include/cppgc/heap.h"
+#include "include/cppgc/trace-trait.h"
+#include "include/cppgc/visitor.h"
+#include "src/base/platform/time.h"
+#include "src/heap/cppgc/worklist.h"
+
+namespace cppgc {
+namespace internal {
+
+class Heap;
+class MutatorThreadMarkingVisitor;
+
+class V8_EXPORT_PRIVATE Marker {
+ static constexpr int kNumConcurrentMarkers = 0;
+ static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
+
+ public:
+ static constexpr int kMutatorThreadId = 0;
+
+ using MarkingItem = cppgc::TraceDescriptor;
+ using NotFullyConstructedItem = const void*;
+ struct WeakCallbackItem {
+ cppgc::WeakCallback callback;
+ const void* parameter;
+ };
+
+ // Segment size of 512 entries necessary to avoid throughput regressions.
+ // Since the work list is currently a temporary object this is not a problem.
+ using MarkingWorklist =
+ Worklist<MarkingItem, 512 /* local entries */, kNumMarkers>;
+ using NotFullyConstructedWorklist =
+ Worklist<NotFullyConstructedItem, 16 /* local entries */, kNumMarkers>;
+ using WeakCallbackWorklist =
+ Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
+
+ struct MarkingConfig {
+ using StackState = cppgc::Heap::StackState;
+ enum class IncrementalMarking : uint8_t { kDisabled };
+ enum class ConcurrentMarking : uint8_t { kDisabled };
+
+ static MarkingConfig Default() {
+ return {StackState::kMayContainHeapPointers,
+ IncrementalMarking::kDisabled, ConcurrentMarking::kDisabled};
+ }
+
+ explicit MarkingConfig(StackState stack_state)
+ : MarkingConfig(stack_state, IncrementalMarking::kDisabled,
+ ConcurrentMarking::kDisabled) {}
+
+ MarkingConfig(StackState stack_state,
+ IncrementalMarking incremental_marking_state,
+ ConcurrentMarking concurrent_marking_state)
+ : stack_state_(stack_state),
+ incremental_marking_state_(incremental_marking_state),
+ concurrent_marking_state_(concurrent_marking_state) {}
+
+ StackState stack_state_;
+ IncrementalMarking incremental_marking_state_;
+ ConcurrentMarking concurrent_marking_state_;
+ };
+
+ explicit Marker(Heap* heap);
+ virtual ~Marker();
+
+ Marker(const Marker&) = delete;
+ Marker& operator=(const Marker&) = delete;
+
+ // Initialize marking according to the given config. This method will
+ // trigger incremental/concurrent marking if needed.
+ void StartMarking(MarkingConfig config);
+ // Finalize marking. This method stops incremental/concurrent marking
+ // if exsists and performs atomic pause marking.
+ void FinishMarking();
+
+ void ProcessWeakness();
+
+ Heap* heap() { return heap_; }
+ MarkingWorklist* marking_worklist() { return &marking_worklist_; }
+ NotFullyConstructedWorklist* not_fully_constructed_worklist() {
+ return &not_fully_constructed_worklist_;
+ }
+ WeakCallbackWorklist* weak_callback_worklist() {
+ return &weak_callback_worklist_;
+ }
+
+ void ClearAllWorklistsForTesting();
+
+ protected:
+ virtual std::unique_ptr<MutatorThreadMarkingVisitor>
+ CreateMutatorThreadMarkingVisitor();
+
+ private:
+ void VisitRoots();
+
+ bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
+ void FlushNotFullyConstructedObjects();
+
+ Heap* const heap_;
+ MarkingConfig config_ = MarkingConfig::Default();
+
+ std::unique_ptr<MutatorThreadMarkingVisitor> marking_visitor_;
+
+ MarkingWorklist marking_worklist_;
+ NotFullyConstructedWorklist not_fully_constructed_worklist_;
+ NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ WeakCallbackWorklist weak_callback_worklist_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_MARKER_H_
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
new file mode 100644
index 0000000000..9647f9b3ca
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -0,0 +1,143 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marking-visitor.h"
+
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/internal/accessors.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+bool MarkingVisitor::IsInConstruction(const HeapObjectHeader& header) {
+ return header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>();
+}
+
+MarkingVisitor::MarkingVisitor(Marker* marking_handler, int task_id)
+ : marker_(marking_handler),
+ marking_worklist_(marking_handler->marking_worklist(), task_id),
+ not_fully_constructed_worklist_(
+ marking_handler->not_fully_constructed_worklist(), task_id),
+ weak_callback_worklist_(marking_handler->weak_callback_worklist(),
+ task_id) {}
+
+void MarkingVisitor::AccountMarkedBytes(const HeapObjectHeader& header) {
+ marked_bytes_ +=
+ header.IsLargeObject()
+ ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
+ ->PayloadSize()
+ : header.GetSize();
+}
+
+void MarkingVisitor::Visit(const void* object, TraceDescriptor desc) {
+ DCHECK_NOT_NULL(object);
+ if (desc.base_object_payload ==
+ cppgc::GarbageCollectedMixin::kNotFullyConstructedObject) {
+ // This means that the objects are not-yet-fully-constructed. See comments
+ // on GarbageCollectedMixin for how those objects are handled.
+ not_fully_constructed_worklist_.Push(object);
+ return;
+ }
+ MarkHeader(&HeapObjectHeader::FromPayload(
+ const_cast<void*>(desc.base_object_payload)),
+ desc);
+}
+
+void MarkingVisitor::VisitWeak(const void* object, TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_member) {
+ // Filter out already marked values. The write barrier for WeakMember
+ // ensures that any newly set value after this point is kept alive and does
+ // not require the callback.
+ if (desc.base_object_payload !=
+ cppgc::GarbageCollectedMixin::kNotFullyConstructedObject &&
+ HeapObjectHeader::FromPayload(desc.base_object_payload)
+ .IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
+ return;
+ RegisterWeakCallback(weak_callback, weak_member);
+}
+
+void MarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc) {
+ Visit(object, desc);
+}
+
+void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_root) {
+ if (desc.base_object_payload ==
+ cppgc::GarbageCollectedMixin::kNotFullyConstructedObject) {
+ // This method is only called at the end of marking. If the object is in
+ // construction, then it should be reachable from the stack.
+ return;
+ }
+ // Since weak roots arev only traced at the end of marking, we can execute
+ // the callback instead of registering it.
+ weak_callback(LivenessBrokerFactory::Create(), weak_root);
+}
+
+void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
+ TraceDescriptor desc) {
+ DCHECK(header);
+ DCHECK_NOT_NULL(desc.callback);
+
+ if (IsInConstruction(*header)) {
+ not_fully_constructed_worklist_.Push(header->Payload());
+ } else if (MarkHeaderNoTracing(header)) {
+ marking_worklist_.Push(desc);
+ }
+}
+
+bool MarkingVisitor::MarkHeaderNoTracing(HeapObjectHeader* header) {
+ DCHECK(header);
+ // A GC should only mark the objects that belong in its heap.
+ DCHECK_EQ(marker_->heap(), BasePage::FromPayload(header)->heap());
+ // Never mark free space objects. This would e.g. hint to marking a promptly
+ // freed backing store.
+ DCHECK(!header->IsFree());
+
+ return header->TryMarkAtomic();
+}
+
+void MarkingVisitor::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
+ weak_callback_worklist_.Push({callback, object});
+}
+
+void MarkingVisitor::FlushWorklists() {
+ marking_worklist_.FlushToGlobal();
+ not_fully_constructed_worklist_.FlushToGlobal();
+ weak_callback_worklist_.FlushToGlobal();
+}
+
+void MarkingVisitor::DynamicallyMarkAddress(ConstAddress address) {
+ for (auto* header : marker_->heap()->objects()) {
+ if (address >= header->Payload() &&
+ address < (header->Payload() + header->GetSize())) {
+ header->TryMarkAtomic();
+ }
+ }
+ // TODO(chromium:1056170): Implement dynamically getting HeapObjectHeader
+ // for handling previously_not_fully_constructed objects. Requires object
+ // start bitmap.
+}
+
+void MarkingVisitor::VisitPointer(const void* address) {
+ for (auto* header : marker_->heap()->objects()) {
+ if (address >= header->Payload() &&
+ address < (header->Payload() + header->GetSize())) {
+ header->TryMarkAtomic();
+ }
+ }
+ // TODO(chromium:1056170): Implement proper conservative scanning for
+ // on-stack objects. Requires page bloom filter.
+}
+
+MutatorThreadMarkingVisitor::MutatorThreadMarkingVisitor(Marker* marker)
+ : MarkingVisitor(marker, Marker::kMutatorThreadId) {}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
new file mode 100644
index 0000000000..33616b3784
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -0,0 +1,70 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_MARKING_VISITOR_H_
+#define V8_HEAP_CPPGC_MARKING_VISITOR_H_
+
+#include "include/cppgc/source-location.h"
+#include "include/cppgc/trace-trait.h"
+#include "include/v8config.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/stack.h"
+#include "src/heap/cppgc/visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+class MarkingVisitor : public VisitorBase, public StackVisitor {
+ public:
+ MarkingVisitor(Marker*, int);
+ virtual ~MarkingVisitor() = default;
+
+ MarkingVisitor(const MarkingVisitor&) = delete;
+ MarkingVisitor& operator=(const MarkingVisitor&) = delete;
+
+ void FlushWorklists();
+
+ void DynamicallyMarkAddress(ConstAddress);
+
+ void AccountMarkedBytes(const HeapObjectHeader&);
+ size_t marked_bytes() const { return marked_bytes_; }
+
+ static bool IsInConstruction(const HeapObjectHeader&);
+
+ protected:
+ void Visit(const void*, TraceDescriptor) override;
+ void VisitWeak(const void*, TraceDescriptor, WeakCallback,
+ const void*) override;
+ void VisitRoot(const void*, TraceDescriptor) override;
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
+ const void*) override;
+
+ void VisitPointer(const void*) override;
+
+ private:
+ void MarkHeader(HeapObjectHeader*, TraceDescriptor);
+ bool MarkHeaderNoTracing(HeapObjectHeader*);
+ void RegisterWeakCallback(WeakCallback, const void*) override;
+
+ Marker* const marker_;
+ Marker::MarkingWorklist::View marking_worklist_;
+ Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist_;
+ Marker::WeakCallbackWorklist::View weak_callback_worklist_;
+
+ size_t marked_bytes_;
+};
+
+class V8_EXPORT_PRIVATE MutatorThreadMarkingVisitor : public MarkingVisitor {
+ public:
+ explicit MutatorThreadMarkingVisitor(Marker*);
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_MARKING_VISITOR_H_
diff --git a/deps/v8/src/heap/cppgc/object-allocator-inl.h b/deps/v8/src/heap/cppgc/object-allocator-inl.h
new file mode 100644
index 0000000000..7d8d126d63
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-allocator-inl.h
@@ -0,0 +1,74 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_INL_H_
+#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_INL_H_
+
+#include <new>
+
+#include "src/base/logging.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
+ const size_t allocation_size =
+ RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
+ const RawHeap::RegularSpaceType type =
+ GetInitialSpaceIndexForSize(allocation_size);
+ return AllocateObjectOnSpace(NormalPageSpace::From(raw_heap_->Space(type)),
+ allocation_size, gcinfo);
+}
+
+void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
+ CustomSpaceIndex space_index) {
+ const size_t allocation_size =
+ RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
+ return AllocateObjectOnSpace(
+ NormalPageSpace::From(raw_heap_->CustomSpace(space_index)),
+ allocation_size, gcinfo);
+}
+
+// static
+RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
+ size_t size) {
+ if (size < 64) {
+ if (size < 32) return RawHeap::RegularSpaceType::kNormal1;
+ return RawHeap::RegularSpaceType::kNormal2;
+ }
+ if (size < 128) return RawHeap::RegularSpaceType::kNormal3;
+ return RawHeap::RegularSpaceType::kNormal4;
+}
+
+void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
+ size_t size, GCInfoIndex gcinfo) {
+ DCHECK_LT(0u, gcinfo);
+
+ NormalPageSpace::LinearAllocationBuffer& current_lab =
+ space->linear_allocation_buffer();
+ if (current_lab.size() < size) {
+ return OutOfLineAllocate(space, size, gcinfo);
+ }
+
+ void* raw = current_lab.Allocate(size);
+ SET_MEMORY_ACCESIBLE(raw, size);
+ auto* header = new (raw) HeapObjectHeader(size, gcinfo);
+
+ NormalPage::From(BasePage::FromPayload(header))
+ ->object_start_bitmap()
+ .SetBit(reinterpret_cast<ConstAddress>(header));
+
+ return header->Payload();
+}
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
new file mode 100644
index 0000000000..df83d8ee9d
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -0,0 +1,87 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/object-allocator.h"
+
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/sweeper.h"
+
+namespace cppgc {
+namespace internal {
+namespace {
+
+void* AllocateLargeObject(RawHeap* raw_heap, LargePageSpace* space, size_t size,
+ GCInfoIndex gcinfo) {
+ LargePage* page = LargePage::Create(space, size);
+ auto* header = new (page->ObjectHeader())
+ HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
+
+ return header->Payload();
+}
+
+} // namespace
+
+ObjectAllocator::ObjectAllocator(RawHeap* heap) : raw_heap_(heap) {}
+
+void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
+ GCInfoIndex gcinfo) {
+ DCHECK_EQ(0, size & kAllocationMask);
+ DCHECK_LE(kFreeListEntrySize, size);
+
+ // 1. If this allocation is big enough, allocate a large object.
+ if (size >= kLargeObjectSizeThreshold) {
+ auto* large_space = LargePageSpace::From(
+ raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
+ return AllocateLargeObject(raw_heap_, large_space, size, gcinfo);
+ }
+
+ // 2. Try to allocate from the freelist.
+ if (void* result = AllocateFromFreeList(space, size, gcinfo)) {
+ return result;
+ }
+
+ // 3. Lazily sweep pages of this heap until we find a freed area for
+ // this allocation or we finish sweeping all pages of this heap.
+ // TODO(chromium:1056170): Add lazy sweep.
+
+ // 4. Complete sweeping.
+ raw_heap_->heap()->sweeper().Finish();
+
+ // 5. Add a new page to this heap.
+ NormalPage::Create(space);
+
+ // 6. Try to allocate from the freelist. This allocation must succeed.
+ void* result = AllocateFromFreeList(space, size, gcinfo);
+ CPPGC_CHECK(result);
+
+ return result;
+}
+
+void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
+ GCInfoIndex gcinfo) {
+ const FreeList::Block entry = space->free_list().Allocate(size);
+ if (!entry.address) return nullptr;
+
+ auto& current_lab = space->linear_allocation_buffer();
+ if (current_lab.size()) {
+ space->AddToFreeList(current_lab.start(), current_lab.size());
+ }
+
+ current_lab.Set(static_cast<Address>(entry.address), entry.size);
+ NormalPage::From(BasePage::FromPayload(current_lab.start()))
+ ->object_start_bitmap()
+ .ClearBit(current_lab.start());
+ return AllocateObjectOnSpace(space, size, gcinfo);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
new file mode 100644
index 0000000000..510a935f56
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -0,0 +1,40 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+
+#include "include/cppgc/internal/gc-info.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE ObjectAllocator final {
+ public:
+ explicit ObjectAllocator(RawHeap* heap);
+
+ inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
+ inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
+ CustomSpaceIndex space_index);
+
+ private:
+ // Returns the initially tried SpaceType to allocate an object of |size| bytes
+ // on. Returns the largest regular object size bucket for large objects.
+ inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
+ size_t size);
+
+ inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
+ GCInfoIndex gcinfo);
+ void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
+ void* AllocateFromFreeList(NormalPageSpace*, size_t, GCInfoIndex);
+
+ RawHeap* raw_heap_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap-inl.h b/deps/v8/src/heap/cppgc/object-start-bitmap-inl.h
new file mode 100644
index 0000000000..93243979aa
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap-inl.h
@@ -0,0 +1,95 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_START_BITMAP_INL_H_
+#define V8_HEAP_CPPGC_OBJECT_START_BITMAP_INL_H_
+
+#include <algorithm>
+
+#include "src/base/bits.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+
+namespace cppgc {
+namespace internal {
+
+ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
+ Clear();
+}
+
+HeapObjectHeader* ObjectStartBitmap::FindHeader(
+ ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ size_t object_offset =
+ address_maybe_pointing_to_the_middle_of_object - offset_;
+ size_t object_start_number = object_offset / kAllocationGranularity;
+ size_t cell_index = object_start_number / kBitsPerCell;
+ DCHECK_GT(object_start_bit_map_.size(), cell_index);
+ const size_t bit = object_start_number & kCellMask;
+ uint8_t byte = object_start_bit_map_[cell_index] & ((1 << (bit + 1)) - 1);
+ while (!byte && cell_index) {
+ DCHECK_LT(0u, cell_index);
+ byte = object_start_bit_map_[--cell_index];
+ }
+ const int leading_zeroes = v8::base::bits::CountLeadingZeros(byte);
+ object_start_number =
+ (cell_index * kBitsPerCell) + (kBitsPerCell - 1) - leading_zeroes;
+ object_offset = object_start_number * kAllocationGranularity;
+ return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
+}
+
+void ObjectStartBitmap::SetBit(ConstAddress header_address) {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ object_start_bit_map_[cell_index] |= (1 << object_bit);
+}
+
+void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ object_start_bit_map_[cell_index] &= ~(1 << object_bit);
+}
+
+bool ObjectStartBitmap::CheckBit(ConstAddress header_address) const {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ return object_start_bit_map_[cell_index] & (1 << object_bit);
+}
+
+void ObjectStartBitmap::ObjectStartIndexAndBit(ConstAddress header_address,
+ size_t* cell_index,
+ size_t* bit) const {
+ const size_t object_offset = header_address - offset_;
+ DCHECK(!(object_offset & kAllocationMask));
+ const size_t object_start_number = object_offset / kAllocationGranularity;
+ *cell_index = object_start_number / kBitsPerCell;
+ DCHECK_GT(kBitmapSize, *cell_index);
+ *bit = object_start_number & kCellMask;
+}
+
+template <typename Callback>
+inline void ObjectStartBitmap::Iterate(Callback callback) const {
+ for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
+ if (!object_start_bit_map_[cell_index]) continue;
+
+ uint8_t value = object_start_bit_map_[cell_index];
+ while (value) {
+ const int trailing_zeroes = v8::base::bits::CountTrailingZeros(value);
+ const size_t object_start_number =
+ (cell_index * kBitsPerCell) + trailing_zeroes;
+ const Address object_address =
+ offset_ + (kAllocationGranularity * object_start_number);
+ callback(object_address);
+ // Clear current object bit in temporary value to advance iteration.
+ value &= ~(1 << (object_start_number & kCellMask));
+ }
+ }
+}
+
+void ObjectStartBitmap::Clear() {
+ std::fill(object_start_bit_map_.begin(), object_start_bit_map_.end(), 0);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_START_BITMAP_INL_H_
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
new file mode 100644
index 0000000000..1a180a552e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -0,0 +1,80 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
+#define V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <array>
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapObjectHeader;
+
+// A bitmap for recording object starts. Objects have to be allocated at
+// minimum granularity of kGranularity.
+//
+// Depends on internals such as:
+// - kBlinkPageSize
+// - kAllocationGranularity
+class V8_EXPORT_PRIVATE ObjectStartBitmap {
+ public:
+ // Granularity of addresses added to the bitmap.
+ static constexpr size_t Granularity() { return kAllocationGranularity; }
+
+ // Maximum number of entries in the bitmap.
+ static constexpr size_t MaxEntries() {
+ return kReservedForBitmap * kBitsPerCell;
+ }
+
+ explicit inline ObjectStartBitmap(Address offset);
+
+ // Finds an object header based on a
+ // address_maybe_pointing_to_the_middle_of_object. Will search for an object
+ // start in decreasing address order.
+ inline HeapObjectHeader* FindHeader(
+ ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
+
+ inline void SetBit(ConstAddress);
+ inline void ClearBit(ConstAddress);
+ inline bool CheckBit(ConstAddress) const;
+
+ // Iterates all object starts recorded in the bitmap.
+ //
+ // The callback is of type
+ // void(Address)
+ // and is passed the object start address as parameter.
+ template <typename Callback>
+ inline void Iterate(Callback) const;
+
+ // Clear the object start bitmap.
+ inline void Clear();
+
+ private:
+ static constexpr size_t kBitsPerCell = sizeof(uint8_t) * CHAR_BIT;
+ static constexpr size_t kCellMask = kBitsPerCell - 1;
+ static constexpr size_t kBitmapSize =
+ (kPageSize + ((kBitsPerCell * kAllocationGranularity) - 1)) /
+ (kBitsPerCell * kAllocationGranularity);
+ static constexpr size_t kReservedForBitmap =
+ ((kBitmapSize + kAllocationMask) & ~kAllocationMask);
+
+ inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
+
+ Address offset_;
+ // The bitmap contains a bit for every kGranularity aligned address on a
+ // a NormalPage, i.e., for a page of size kBlinkPageSize.
+ std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
diff --git a/deps/v8/src/heap/cppgc/page-memory-inl.h b/deps/v8/src/heap/cppgc/page-memory-inl.h
new file mode 100644
index 0000000000..23ce061b43
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/page-memory-inl.h
@@ -0,0 +1,57 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
+#define V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
+
+#include "src/heap/cppgc/page-memory.h"
+
+namespace cppgc {
+namespace internal {
+
+// Returns true if the provided allocator supports committing at the required
+// granularity.
+inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
+ return kGuardPageSize % allocator->CommitPageSize() == 0;
+}
+
+Address NormalPageMemoryRegion::Lookup(Address address) const {
+ size_t index = GetIndex(address);
+ if (!page_memories_in_use_[index]) return nullptr;
+ const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
+ return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
+}
+
+Address LargePageMemoryRegion::Lookup(Address address) const {
+ const MemoryRegion writeable_region = GetPageMemory().writeable_region();
+ return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
+}
+
+Address PageMemoryRegion::Lookup(Address address) const {
+ DCHECK(reserved_region().Contains(address));
+ return is_large()
+ ? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
+ : static_cast<const NormalPageMemoryRegion*>(this)->Lookup(
+ address);
+}
+
+PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
+ auto it = set_.upper_bound(address);
+ // This check also covers set_.size() > 0, since for empty vectors it is
+ // guaranteed that begin() == end().
+ if (it == set_.begin()) return nullptr;
+ auto* result = std::next(it, -1)->second;
+ if (address < result->reserved_region().end()) return result;
+ return nullptr;
+}
+
+Address PageBackend::Lookup(Address address) const {
+ PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
+ return pmr ? pmr->Lookup(address) : nullptr;
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
diff --git a/deps/v8/src/heap/cppgc/page-memory.cc b/deps/v8/src/heap/cppgc/page-memory.cc
new file mode 100644
index 0000000000..66e2812f5c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/page-memory.cc
@@ -0,0 +1,211 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/page-memory.h"
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
+ if (SupportsCommittingGuardPages(allocator)) {
+ CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kReadWrite));
+ } else {
+ // No protection in case the allocator cannot commit at the required
+ // granularity. Only protect if the allocator supports committing at that
+ // granularity.
+ //
+ // The allocator needs to support committing the overall range.
+ CHECK_EQ(0u,
+ page_memory.overall_region().size() % allocator->CommitPageSize());
+ CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kReadWrite));
+ }
+}
+
+void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
+ if (SupportsCommittingGuardPages(allocator)) {
+ // Swap the same region, providing the OS with a chance for fast lookup and
+ // change.
+ CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kNoAccess));
+ } else {
+ // See Unprotect().
+ CHECK_EQ(0u,
+ page_memory.overall_region().size() % allocator->CommitPageSize());
+ CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kNoAccess));
+ }
+}
+
+MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
+ size_t allocation_size) {
+ void* region_memory =
+ allocator->AllocatePages(nullptr, allocation_size, kPageSize,
+ PageAllocator::Permission::kNoAccess);
+ const MemoryRegion reserved_region(static_cast<Address>(region_memory),
+ allocation_size);
+ DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
+ return reserved_region;
+}
+
+void FreeMemoryRegion(PageAllocator* allocator,
+ const MemoryRegion& reserved_region) {
+ allocator->FreePages(reserved_region.base(), reserved_region.size());
+}
+
+} // namespace
+
+PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
+ MemoryRegion reserved_region, bool is_large)
+ : allocator_(allocator),
+ reserved_region_(reserved_region),
+ is_large_(is_large) {}
+
+PageMemoryRegion::~PageMemoryRegion() {
+ FreeMemoryRegion(allocator_, reserved_region());
+}
+
+// static
+constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
+
+NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
+ : PageMemoryRegion(allocator,
+ ReserveMemoryRegion(
+ allocator, RoundUp(kPageSize * kNumPageRegions,
+ allocator->AllocatePageSize())),
+ false) {
+#ifdef DEBUG
+ for (size_t i = 0; i < kNumPageRegions; ++i) {
+ DCHECK_EQ(false, page_memories_in_use_[i]);
+ }
+#endif // DEBUG
+}
+
+NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
+
+void NormalPageMemoryRegion::Allocate(Address writeable_base) {
+ const size_t index = GetIndex(writeable_base);
+ ChangeUsed(index, true);
+ Unprotect(allocator_, GetPageMemory(index));
+}
+
+void NormalPageMemoryRegion::Free(Address writeable_base) {
+ const size_t index = GetIndex(writeable_base);
+ ChangeUsed(index, false);
+ Protect(allocator_, GetPageMemory(index));
+}
+
+void NormalPageMemoryRegion::UnprotectForTesting() {
+ for (size_t i = 0; i < kNumPageRegions; ++i) {
+ Unprotect(allocator_, GetPageMemory(i));
+ }
+}
+
+LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
+ size_t length)
+ : PageMemoryRegion(allocator,
+ ReserveMemoryRegion(
+ allocator, RoundUp(length + 2 * kGuardPageSize,
+ allocator->AllocatePageSize())),
+ true) {}
+
+LargePageMemoryRegion::~LargePageMemoryRegion() = default;
+
+void LargePageMemoryRegion::UnprotectForTesting() {
+ Unprotect(allocator_, GetPageMemory());
+}
+
+PageMemoryRegionTree::PageMemoryRegionTree() = default;
+
+PageMemoryRegionTree::~PageMemoryRegionTree() = default;
+
+void PageMemoryRegionTree::Add(PageMemoryRegion* region) {
+ DCHECK(region);
+ auto result = set_.emplace(region->reserved_region().base(), region);
+ USE(result);
+ DCHECK(result.second);
+}
+
+void PageMemoryRegionTree::Remove(PageMemoryRegion* region) {
+ DCHECK(region);
+ auto size = set_.erase(region->reserved_region().base());
+ USE(size);
+ DCHECK_EQ(1u, size);
+}
+
+NormalPageMemoryPool::NormalPageMemoryPool() = default;
+
+NormalPageMemoryPool::~NormalPageMemoryPool() = default;
+
+void NormalPageMemoryPool::Add(size_t bucket, NormalPageMemoryRegion* pmr,
+ Address writeable_base) {
+ DCHECK_LT(bucket, kNumPoolBuckets);
+ pool_[bucket].push_back(std::make_pair(pmr, writeable_base));
+}
+
+std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
+ size_t bucket) {
+ DCHECK_LT(bucket, kNumPoolBuckets);
+ if (pool_[bucket].empty()) return {nullptr, nullptr};
+ std::pair<NormalPageMemoryRegion*, Address> pair = pool_[bucket].back();
+ pool_[bucket].pop_back();
+ return pair;
+}
+
+PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
+
+PageBackend::~PageBackend() = default;
+
+Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
+ std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
+ if (!result.first) {
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
+ for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
+ page_pool_.Add(bucket, pmr.get(),
+ pmr->GetPageMemory(i).writeable_region().base());
+ }
+ page_memory_region_tree_.Add(pmr.get());
+ normal_page_memory_regions_.push_back(std::move(pmr));
+ return AllocateNormalPageMemory(bucket);
+ }
+ result.first->Allocate(result.second);
+ return result.second;
+}
+
+void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
+ auto* pmr = static_cast<NormalPageMemoryRegion*>(
+ page_memory_region_tree_.Lookup(writeable_base));
+ pmr->Free(writeable_base);
+ page_pool_.Add(bucket, pmr, writeable_base);
+}
+
+Address PageBackend::AllocateLargePageMemory(size_t size) {
+ auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
+ const PageMemory pm = pmr->GetPageMemory();
+ Unprotect(allocator_, pm);
+ page_memory_region_tree_.Add(pmr.get());
+ large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
+ return pm.writeable_region().base();
+}
+
+void PageBackend::FreeLargePageMemory(Address writeable_base) {
+ PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
+ page_memory_region_tree_.Remove(pmr);
+ auto size = large_page_memory_regions_.erase(pmr);
+ USE(size);
+ DCHECK_EQ(1u, size);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/page-memory.h b/deps/v8/src/heap/cppgc/page-memory.h
new file mode 100644
index 0000000000..f3bc685fa3
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/page-memory.h
@@ -0,0 +1,237 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_H_
+#define V8_HEAP_CPPGC_PAGE_MEMORY_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE MemoryRegion final {
+ public:
+ MemoryRegion() = default;
+ MemoryRegion(Address base, size_t size) : base_(base), size_(size) {
+ DCHECK(base);
+ DCHECK_LT(0u, size);
+ }
+
+ Address base() const { return base_; }
+ size_t size() const { return size_; }
+ Address end() const { return base_ + size_; }
+
+ bool Contains(Address addr) const {
+ return (reinterpret_cast<uintptr_t>(addr) -
+ reinterpret_cast<uintptr_t>(base_)) < size_;
+ }
+
+ bool Contains(const MemoryRegion& other) const {
+ return base_ <= other.base() && other.end() <= end();
+ }
+
+ private:
+ Address base_ = nullptr;
+ size_t size_ = 0;
+};
+
+// PageMemory provides the backing of a single normal or large page.
+class V8_EXPORT_PRIVATE PageMemory final {
+ public:
+ PageMemory(MemoryRegion overall, MemoryRegion writeable)
+ : overall_(overall), writable_(writeable) {
+ DCHECK(overall.Contains(writeable));
+ }
+
+ const MemoryRegion writeable_region() const { return writable_; }
+ const MemoryRegion overall_region() const { return overall_; }
+
+ private:
+ MemoryRegion overall_;
+ MemoryRegion writable_;
+};
+
+class V8_EXPORT_PRIVATE PageMemoryRegion {
+ public:
+ virtual ~PageMemoryRegion();
+
+ const MemoryRegion reserved_region() const { return reserved_region_; }
+ bool is_large() const { return is_large_; }
+
+ // Lookup writeable base for an |address| that's contained in
+ // PageMemoryRegion. Filters out addresses that are contained in non-writeable
+ // regions (e.g. guard pages).
+ inline Address Lookup(Address address) const;
+
+ // Disallow copy/move.
+ PageMemoryRegion(const PageMemoryRegion&) = delete;
+ PageMemoryRegion& operator=(const PageMemoryRegion&) = delete;
+
+ virtual void UnprotectForTesting() = 0;
+
+ protected:
+ PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
+
+ PageAllocator* const allocator_;
+ const MemoryRegion reserved_region_;
+ const bool is_large_;
+};
+
+// NormalPageMemoryRegion serves kNumPageRegions normal-sized PageMemory object.
+class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
+ public:
+ static constexpr size_t kNumPageRegions = 10;
+
+ explicit NormalPageMemoryRegion(PageAllocator*);
+ ~NormalPageMemoryRegion() override;
+
+ const PageMemory GetPageMemory(size_t index) const {
+ DCHECK_LT(index, kNumPageRegions);
+ return PageMemory(
+ MemoryRegion(reserved_region().base() + kPageSize * index, kPageSize),
+ MemoryRegion(
+ reserved_region().base() + kPageSize * index + kGuardPageSize,
+ kPageSize - 2 * kGuardPageSize));
+ }
+
+ // Allocates a normal page at |writeable_base| address. Changes page
+ // protection.
+ void Allocate(Address writeable_base);
+
+ // Frees a normal page at at |writeable_base| address. Changes page
+ // protection.
+ void Free(Address);
+
+ inline Address Lookup(Address) const;
+
+ void UnprotectForTesting() final;
+
+ private:
+ void ChangeUsed(size_t index, bool value) {
+ DCHECK_LT(index, kNumPageRegions);
+ DCHECK_EQ(value, !page_memories_in_use_[index]);
+ page_memories_in_use_[index] = value;
+ }
+
+ size_t GetIndex(Address address) const {
+ return static_cast<size_t>(address - reserved_region().base()) >>
+ kPageSizeLog2;
+ }
+
+ std::array<bool, kNumPageRegions> page_memories_in_use_ = {};
+};
+
+// LargePageMemoryRegion serves a single large PageMemory object.
+class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
+ public:
+ LargePageMemoryRegion(PageAllocator*, size_t);
+ ~LargePageMemoryRegion() override;
+
+ const PageMemory GetPageMemory() const {
+ return PageMemory(
+ MemoryRegion(reserved_region().base(), reserved_region().size()),
+ MemoryRegion(reserved_region().base() + kGuardPageSize,
+ reserved_region().size() - 2 * kGuardPageSize));
+ }
+
+ inline Address Lookup(Address) const;
+
+ void UnprotectForTesting() final;
+};
+
+// A PageMemoryRegionTree is a binary search tree of PageMemoryRegions sorted
+// by reserved base addresses.
+//
+// The tree does not keep its elements alive but merely provides indexing
+// capabilities.
+class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
+ public:
+ PageMemoryRegionTree();
+ ~PageMemoryRegionTree();
+
+ void Add(PageMemoryRegion*);
+ void Remove(PageMemoryRegion*);
+
+ inline PageMemoryRegion* Lookup(Address) const;
+
+ private:
+ std::map<Address, PageMemoryRegion*> set_;
+};
+
+// A pool of PageMemory objects represented by the writeable base addresses.
+//
+// The pool does not keep its elements alive but merely provides pooling
+// capabilities.
+class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
+ public:
+ static constexpr size_t kNumPoolBuckets = 16;
+
+ using Result = std::pair<NormalPageMemoryRegion*, Address>;
+
+ NormalPageMemoryPool();
+ ~NormalPageMemoryPool();
+
+ void Add(size_t, NormalPageMemoryRegion*, Address);
+ Result Take(size_t);
+
+ private:
+ std::vector<Result> pool_[kNumPoolBuckets];
+};
+
+// A backend that is used for allocating and freeing normal and large pages.
+//
+// Internally maintaints a set of PageMemoryRegions. The backend keeps its used
+// regions alive.
+class V8_EXPORT_PRIVATE PageBackend final {
+ public:
+ explicit PageBackend(PageAllocator*);
+ ~PageBackend();
+
+ // Allocates a normal page from the backend.
+ //
+ // Returns the writeable base of the region.
+ Address AllocateNormalPageMemory(size_t);
+
+ // Returns normal page memory back to the backend. Expects the
+ // |writeable_base| returned by |AllocateNormalMemory()|.
+ void FreeNormalPageMemory(size_t, Address writeable_base);
+
+ // Allocates a large page from the backend.
+ //
+ // Returns the writeable base of the region.
+ Address AllocateLargePageMemory(size_t size);
+
+ // Returns large page memory back to the backend. Expects the |writeable_base|
+ // returned by |AllocateLargePageMemory()|.
+ void FreeLargePageMemory(Address writeable_base);
+
+ // Returns the writeable base if |address| is contained in a valid page
+ // memory.
+ inline Address Lookup(Address) const;
+
+ // Disallow copy/move.
+ PageBackend(const PageBackend&) = delete;
+ PageBackend& operator=(const PageBackend&) = delete;
+
+ private:
+ PageAllocator* allocator_;
+ NormalPageMemoryPool page_pool_;
+ PageMemoryRegionTree page_memory_region_tree_;
+ std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
+ std::unordered_map<PageMemoryRegion*, std::unique_ptr<PageMemoryRegion>>
+ large_page_memory_regions_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PAGE_MEMORY_H_
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
new file mode 100644
index 0000000000..299cefc521
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/persistent-node.h"
+
+#include <algorithm>
+#include <numeric>
+
+namespace cppgc {
+namespace internal {
+
+size_t PersistentRegion::NodesInUse() const {
+ return std::accumulate(
+ nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
+ return acc + std::count_if(slots->cbegin(), slots->cend(),
+ [](const PersistentNode& node) {
+ return node.IsUsed();
+ });
+ });
+}
+
+void PersistentRegion::EnsureNodeSlots() {
+ nodes_.push_back(std::make_unique<PersistentNodeSlots>());
+ for (auto& node : *nodes_.back()) {
+ node.InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = &node;
+ }
+}
+
+void PersistentRegion::Trace(Visitor* visitor) {
+ free_list_head_ = nullptr;
+ for (auto& slots : nodes_) {
+ bool is_empty = true;
+ for (auto& node : *slots) {
+ if (node.IsUsed()) {
+ node.Trace(visitor);
+ is_empty = false;
+ } else {
+ node.InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = &node;
+ }
+ }
+ if (is_empty) {
+ PersistentNode* first_next = (*slots)[0].FreeListNext();
+ // First next was processed first in the loop above, guaranteeing that it
+ // either points to null or into a different node block.
+ CPPGC_DCHECK(!first_next || first_next < &slots->front() ||
+ first_next > &slots->back());
+ free_list_head_ = first_next;
+ slots.reset();
+ }
+ }
+ nodes_.erase(std::remove_if(nodes_.begin(), nodes_.end(),
+ [](const auto& ptr) { return !ptr; }),
+ nodes_.end());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
new file mode 100644
index 0000000000..e9dfcecdf3
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/pointer-policies.h"
+#include "include/cppgc/internal/persistent-node.h"
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap.h"
+
+namespace cppgc {
+namespace internal {
+
+EnabledCheckingPolicy::EnabledCheckingPolicy() {
+ USE(impl_);
+ // TODO(chromium:1056170): Save creating heap state.
+}
+
+void EnabledCheckingPolicy::CheckPointer(const void* ptr) {
+ // TODO(chromium:1056170): Provide implementation.
+}
+
+PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetStrongPersistentRegion();
+}
+
+PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetWeakPersistentRegion();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
new file mode 100644
index 0000000000..40107c1526
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -0,0 +1,66 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/prefinalizer-handler.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
+ cppgc::Heap* heap, PreFinalizer prefinalzier) {
+ internal::Heap::From(heap)->prefinalizer_handler()->RegisterPrefinalizer(
+ prefinalzier);
+}
+
+bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
+ const PreFinalizer& other) {
+ return (object_ == other.object_) && (callback_ == other.callback_);
+}
+
+PreFinalizerHandler::PreFinalizerHandler()
+#ifdef DEBUG
+ : creation_thread_id_(v8::base::OS::GetCurrentThreadId())
+#endif
+{
+}
+
+void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer prefinalizer) {
+ DCHECK(CurrentThreadIsCreationThread());
+ DCHECK_EQ(ordered_pre_finalizers_.end(),
+ std::find(ordered_pre_finalizers_.begin(),
+ ordered_pre_finalizers_.end(), prefinalizer));
+ ordered_pre_finalizers_.push_back(prefinalizer);
+}
+
+void PreFinalizerHandler::InvokePreFinalizers() {
+ DCHECK(CurrentThreadIsCreationThread());
+ LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
+ ordered_pre_finalizers_.erase(
+ ordered_pre_finalizers_.begin(),
+ std::remove_if(ordered_pre_finalizers_.rbegin(),
+ ordered_pre_finalizers_.rend(),
+ [liveness_broker](const PreFinalizer& pf) {
+ return (pf.callback_)(liveness_broker, pf.object_);
+ })
+ .base());
+ ordered_pre_finalizers_.shrink_to_fit();
+}
+
+bool PreFinalizerHandler::CurrentThreadIsCreationThread() {
+#ifdef DEBUG
+ return creation_thread_id_ == v8::base::OS::GetCurrentThreadId();
+#else
+ return true;
+#endif
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
new file mode 100644
index 0000000000..a625553471
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+#define V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+
+#include <vector>
+
+#include "include/cppgc/prefinalizer.h"
+
+namespace cppgc {
+namespace internal {
+
+class PreFinalizerHandler final {
+ public:
+ using PreFinalizer =
+ cppgc::internal::PreFinalizerRegistrationDispatcher::PreFinalizer;
+
+ PreFinalizerHandler();
+
+ void RegisterPrefinalizer(PreFinalizer prefinalzier);
+
+ void InvokePreFinalizers();
+
+ private:
+ // Checks that the current thread is the thread that created the heap.
+ bool CurrentThreadIsCreationThread();
+
+ // Pre-finalizers are called in the reverse order in which they are
+ // registered by the constructors (including constructors of Mixin
+ // objects) for an object, by processing the ordered_pre_finalizers_
+ // back-to-front.
+ std::vector<PreFinalizer> ordered_pre_finalizers_;
+
+#ifdef DEBUG
+ int creation_thread_id_;
+#endif
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
diff --git a/deps/v8/src/heap/cppgc/raw-heap.cc b/deps/v8/src/heap/cppgc/raw-heap.cc
new file mode 100644
index 0000000000..cf7311b46f
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/raw-heap.cc
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/raw-heap.h"
+
+#include "src/heap/cppgc/heap-space.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+constexpr size_t RawHeap::kNumberOfRegularSpaces;
+
+RawHeap::RawHeap(Heap* heap, size_t custom_spaces) : main_heap_(heap) {
+ size_t i = 0;
+ for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
+ spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
+ }
+ spaces_.push_back(std::make_unique<LargePageSpace>(
+ this, static_cast<size_t>(RegularSpaceType::kLarge)));
+ DCHECK_EQ(kNumberOfRegularSpaces, spaces_.size());
+ for (size_t j = 0; j < custom_spaces; j++) {
+ spaces_.push_back(
+ std::make_unique<NormalPageSpace>(this, kNumberOfRegularSpaces + j));
+ }
+}
+
+RawHeap::~RawHeap() = default;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/raw-heap.h b/deps/v8/src/heap/cppgc/raw-heap.h
new file mode 100644
index 0000000000..0591fa87ab
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/raw-heap.h
@@ -0,0 +1,106 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_RAW_HEAP_H_
+#define V8_HEAP_CPPGC_RAW_HEAP_H_
+
+#include <iterator>
+#include <memory>
+#include <vector>
+
+#include "include/cppgc/heap.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class Heap;
+class BaseSpace;
+
+// RawHeap is responsible for space management.
+class V8_EXPORT_PRIVATE RawHeap final {
+ public:
+ // Normal spaces are used to store objects of different size classes:
+ // - kNormal1: < 32 bytes
+ // - kNormal2: < 64 bytes
+ // - kNormal3: < 128 bytes
+ // - kNormal4: >= 128 bytes
+ //
+ // Objects of size greater than 2^16 get stored in the large space.
+ //
+ // Users can override where objects are allocated via cppgc::CustomSpace to
+ // force allocation in a custom space.
+ enum class RegularSpaceType : uint8_t {
+ kNormal1,
+ kNormal2,
+ kNormal3,
+ kNormal4,
+ kLarge,
+ };
+
+ static constexpr size_t kNumberOfRegularSpaces =
+ static_cast<size_t>(RegularSpaceType::kLarge) + 1;
+
+ using Spaces = std::vector<std::unique_ptr<BaseSpace>>;
+ using iterator = Spaces::iterator;
+ using const_iterator = Spaces::const_iterator;
+
+ explicit RawHeap(Heap* heap, size_t custom_spaces);
+ ~RawHeap();
+
+ // Space iteration support.
+ iterator begin() { return spaces_.begin(); }
+ const_iterator begin() const { return spaces_.begin(); }
+ iterator end() { return spaces_.end(); }
+ const_iterator end() const { return spaces_.end(); }
+
+ iterator custom_begin() { return std::next(begin(), kNumberOfRegularSpaces); }
+ iterator custom_end() { return end(); }
+
+ size_t size() const { return spaces_.size(); }
+
+ BaseSpace* Space(RegularSpaceType type) {
+ const size_t index = static_cast<size_t>(type);
+ DCHECK_GT(kNumberOfRegularSpaces, index);
+ return Space(index);
+ }
+ const BaseSpace* Space(RegularSpaceType space) const {
+ return const_cast<RawHeap&>(*this).Space(space);
+ }
+
+ BaseSpace* CustomSpace(CustomSpaceIndex space_index) {
+ return Space(SpaceIndexForCustomSpace(space_index));
+ }
+ const BaseSpace* CustomSpace(CustomSpaceIndex space_index) const {
+ return const_cast<RawHeap&>(*this).CustomSpace(space_index);
+ }
+
+ Heap* heap() { return main_heap_; }
+ const Heap* heap() const { return main_heap_; }
+
+ private:
+ size_t SpaceIndexForCustomSpace(CustomSpaceIndex space_index) const {
+ DCHECK_LT(space_index.value, spaces_.size() - kNumberOfRegularSpaces);
+ return kNumberOfRegularSpaces + space_index.value;
+ }
+
+ BaseSpace* Space(size_t space_index) {
+ DCHECK_GT(spaces_.size(), space_index);
+ BaseSpace* space = spaces_[space_index].get();
+ DCHECK(space);
+ return space;
+ }
+ const BaseSpace* Space(size_t space_index) const {
+ return const_cast<RawHeap&>(*this).Space(space_index);
+ }
+
+ Heap* main_heap_;
+ Spaces spaces_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_RAW_HEAP_H_
diff --git a/deps/v8/src/heap/cppgc/sanitizers.h b/deps/v8/src/heap/cppgc/sanitizers.h
index e3102b01ed..17f6cd7306 100644
--- a/deps/v8/src/heap/cppgc/sanitizers.h
+++ b/deps/v8/src/heap/cppgc/sanitizers.h
@@ -5,6 +5,9 @@
#ifndef V8_HEAP_CPPGC_SANITIZERS_H_
#define V8_HEAP_CPPGC_SANITIZERS_H_
+#include <stdint.h>
+#include <string.h>
+
#include "src/base/macros.h"
//
@@ -16,10 +19,15 @@
#include <sanitizer/asan_interface.h>
#define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION)
+#error "ASAN_POISON_MEMORY_REGION must be defined"
+#endif
#else // !V8_USE_ADDRESS_SANITIZER
#define NO_SANITIZE_ADDRESS
+#define ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
#endif // V8_USE_ADDRESS_SANITIZER
@@ -27,12 +35,43 @@
#include <sanitizer/msan_interface.h>
+#define MSAN_POISON(addr, size) __msan_allocated_memory(addr, size)
#define MSAN_UNPOISON(addr, size) __msan_unpoison(addr, size)
#else // !V8_USE_MEMORY_SANITIZER
+#define MSAN_POISON(addr, size) ((void)(addr), (void)(size))
#define MSAN_UNPOISON(addr, size) ((void)(addr), (void)(size))
#endif // V8_USE_MEMORY_SANITIZER
+// API for newly allocated or reclaimed memory.
+#if defined(V8_USE_MEMORY_SANITIZER)
+#define SET_MEMORY_ACCESIBLE(address, size) \
+ MSAN_UNPOISON(address, size); \
+ memset((address), 0, (size))
+#define SET_MEMORY_INACCESIBLE(address, size) MSAN_POISON((address), (size))
+#elif DEBUG || defined(V8_USE_ADDRESS_SANITIZER)
+#define SET_MEMORY_ACCESIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size); \
+ memset((address), 0, (size))
+#define SET_MEMORY_INACCESIBLE(address, size) \
+ ::cppgc::internal::ZapMemory((address), (size)); \
+ ASAN_POISON_MEMORY_REGION(address, size)
+#else
+#define SET_MEMORY_ACCESIBLE(address, size) memset((address), 0, (size))
+#define SET_MEMORY_INACCESIBLE(address, size) ((void)(address), (void)(size))
+#endif
+
+namespace cppgc {
+namespace internal {
+
+inline void ZapMemory(void* address, size_t size) {
+ static constexpr uint8_t kZappedValue = 0xcd;
+ memset(address, kZappedValue, size);
+}
+
+} // namespace internal
+} // namespace cppgc
+
#endif // V8_HEAP_CPPGC_SANITIZERS_H_
diff --git a/deps/v8/src/heap/cppgc/source-location.cc b/deps/v8/src/heap/cppgc/source-location.cc
new file mode 100644
index 0000000000..95154cf6d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/source-location.cc
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/source-location.h"
+
+namespace cppgc {
+
+std::string SourceLocation::ToString() const {
+ if (!file_) {
+ return {};
+ }
+ return std::string(function_) + "@" + file_ + ":" + std::to_string(line_);
+}
+
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/stack.cc b/deps/v8/src/heap/cppgc/stack.cc
index a821768917..b99693708c 100644
--- a/deps/v8/src/heap/cppgc/stack.cc
+++ b/deps/v8/src/heap/cppgc/stack.cc
@@ -13,7 +13,7 @@
namespace cppgc {
namespace internal {
-using IterateStackCallback = void (Stack::*)(StackVisitor*, intptr_t*) const;
+using IterateStackCallback = void (*)(const Stack*, StackVisitor*, intptr_t*);
extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
IterateStackCallback);
@@ -63,8 +63,6 @@ void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
#endif // V8_USE_ADDRESS_SANITIZER
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
void IterateSafeStackIfNecessary(StackVisitor* visitor) {
#if defined(__has_feature)
#if __has_feature(safe_stack)
@@ -88,49 +86,44 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
#endif // defined(__has_feature)
}
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
-} // namespace
-
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-void Stack::IteratePointers(StackVisitor* visitor) const {
- PushAllRegistersAndIterateStack(this, visitor, &Stack::IteratePointersImpl);
- // No need to deal with callee-saved registers as they will be kept alive by
- // the regular conservative stack iteration.
- IterateSafeStackIfNecessary(visitor);
-}
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
+// Called by the trampoline that pushes registers on the stack. This method
+// should never be inlined to ensure that a possible redzone cannot contain
+// any data that needs to be scanned.
+V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
NO_SANITIZE_ADDRESS
-void Stack::IteratePointersImpl(StackVisitor* visitor,
- intptr_t* stack_end) const {
+void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
+ intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
void* asan_fake_stack = __asan_get_current_fake_stack();
#endif // V8_USE_ADDRESS_SANITIZER
// All supported platforms should have their stack aligned to at least
// sizeof(void*).
constexpr size_t kMinStackAlignment = sizeof(void*);
- // Redzone should not contain any pointers as the iteration is always called
- // from the assembly trampoline. If inline assembly is ever inlined through
- // LTO this may become necessary.
- constexpr size_t kRedZoneBytes = 128;
- void** current = reinterpret_cast<void**>(
- reinterpret_cast<uintptr_t>(stack_end - kRedZoneBytes));
+ void** current = reinterpret_cast<void**>(stack_end);
CHECK_EQ(0u, reinterpret_cast<uintptr_t>(current) & (kMinStackAlignment - 1));
- for (; current < stack_start_; ++current) {
+ for (; current < stack->stack_start(); ++current) {
// MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
// into a local which is unpoisoned.
void* address = *current;
- MSAN_UNPOISON(address, sizeof(address));
+ MSAN_UNPOISON(&address, sizeof(address));
if (address == nullptr) continue;
visitor->VisitPointer(address);
#ifdef V8_USE_ADDRESS_SANITIZER
- IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack, stack_start_,
- stack_end, address);
+ IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack,
+ stack->stack_start(), stack_end, address);
#endif // V8_USE_ADDRESS_SANITIZER
}
}
+} // namespace
+
+void Stack::IteratePointers(StackVisitor* visitor) const {
+ PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
+ // No need to deal with callee-saved registers as they will be kept alive by
+ // the regular conservative stack iteration.
+ IterateSafeStackIfNecessary(visitor);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/stack.h b/deps/v8/src/heap/cppgc/stack.h
index 599bf3a54a..3f561aed08 100644
--- a/deps/v8/src/heap/cppgc/stack.h
+++ b/deps/v8/src/heap/cppgc/stack.h
@@ -7,11 +7,6 @@
#include "src/base/macros.h"
-// TODO(chromium:1056170): Implement all platforms.
-#if defined(V8_TARGET_ARCH_X64)
-#define CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN 1
-#endif
-
namespace cppgc {
namespace internal {
@@ -33,13 +28,12 @@ class V8_EXPORT_PRIVATE Stack final {
// Word-aligned iteration of the stack. Slot values are passed on to
// |visitor|.
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
void IteratePointers(StackVisitor* visitor) const;
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
- private:
- void IteratePointersImpl(StackVisitor* visitor, intptr_t* stack_end) const;
+ // Returns the start of the stack.
+ const void* stack_start() const { return stack_start_; }
+ private:
const void* stack_start_;
};
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
new file mode 100644
index 0000000000..77d2d3c33e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -0,0 +1,213 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/sweeper.h"
+
+#include <vector>
+
+#include "src/heap/cppgc/free-list.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class ObjectStartBitmapVerifier
+ : private HeapVisitor<ObjectStartBitmapVerifier> {
+ friend class HeapVisitor<ObjectStartBitmapVerifier>;
+
+ public:
+ void Verify(RawHeap* heap) { Traverse(heap); }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ // Remember bitmap and reset previous pointer.
+ bitmap_ = &page->object_start_bitmap();
+ prev_ = nullptr;
+ return false;
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsLargeObject()) return true;
+
+ auto* raw_header = reinterpret_cast<ConstAddress>(header);
+ CHECK(bitmap_->CheckBit(raw_header));
+ if (prev_) {
+ CHECK_EQ(prev_, bitmap_->FindHeader(raw_header - 1));
+ }
+ prev_ = header;
+ return true;
+ }
+
+ ObjectStartBitmap* bitmap_ = nullptr;
+ HeapObjectHeader* prev_ = nullptr;
+};
+
+struct SpaceState {
+ BaseSpace::Pages unswept_pages;
+};
+using SpaceStates = std::vector<SpaceState>;
+
+bool SweepNormalPage(NormalPage* page) {
+ constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+
+ auto* space = NormalPageSpace::From(page->space());
+ ObjectStartBitmap& bitmap = page->object_start_bitmap();
+ bitmap.Clear();
+
+ Address start_of_gap = page->PayloadStart();
+ for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
+ begin != end;) {
+ HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
+ const size_t size = header->GetSize();
+ // Check if this is a free list entry.
+ if (header->IsFree<kAtomicAccess>()) {
+ SET_MEMORY_INACCESIBLE(header, std::min(kFreeListEntrySize, size));
+ begin += size;
+ continue;
+ }
+ // Check if object is not marked (not reachable).
+ if (!header->IsMarked<kAtomicAccess>()) {
+ header->Finalize();
+ SET_MEMORY_INACCESIBLE(header, size);
+ begin += size;
+ continue;
+ }
+ // The object is alive.
+ const Address header_address = reinterpret_cast<Address>(header);
+ if (start_of_gap != header_address) {
+ space->AddToFreeList(start_of_gap,
+ static_cast<size_t>(header_address - start_of_gap));
+ }
+ header->Unmark<kAtomicAccess>();
+ bitmap.SetBit(begin);
+ begin += size;
+ start_of_gap = begin;
+ }
+
+ if (start_of_gap != page->PayloadStart() &&
+ start_of_gap != page->PayloadEnd()) {
+ space->AddToFreeList(
+ start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
+ }
+
+ const bool is_empty = (start_of_gap == page->PayloadStart());
+ return is_empty;
+}
+
+// This visitor:
+// - resets linear allocation buffers and clears free lists for all spaces;
+// - moves all Heap pages to local Sweeper's state (SpaceStates).
+class PrepareForSweepVisitor final
+ : public HeapVisitor<PrepareForSweepVisitor> {
+ public:
+ explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ space->ResetLinearAllocationBuffer();
+ space->free_list().Clear();
+ (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+ return true;
+ }
+
+ bool VisitLargePageSpace(LargePageSpace* space) {
+ (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+
+ return true;
+ }
+
+ private:
+ SpaceStates* states_;
+};
+
+class MutatorThreadSweepVisitor final
+ : private HeapVisitor<MutatorThreadSweepVisitor> {
+ friend class HeapVisitor<MutatorThreadSweepVisitor>;
+
+ public:
+ explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
+ for (SpaceState& state : *space_states) {
+ for (BasePage* page : state.unswept_pages) {
+ Traverse(page);
+ }
+ state.unswept_pages.clear();
+ }
+ }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ const bool is_empty = SweepNormalPage(page);
+ if (is_empty) {
+ NormalPage::Destroy(page);
+ } else {
+ page->space()->AddPage(page);
+ }
+ return true;
+ }
+
+ bool VisitLargePage(LargePage* page) {
+ if (page->ObjectHeader()->IsMarked()) {
+ page->space()->AddPage(page);
+ } else {
+ page->ObjectHeader()->Finalize();
+ LargePage::Destroy(page);
+ }
+ return true;
+ }
+};
+
+} // namespace
+
+class Sweeper::SweeperImpl final {
+ public:
+ explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
+ space_states_.resize(heap_->size());
+ }
+
+ void Start(Config config) {
+ is_in_progress_ = true;
+#if DEBUG
+ ObjectStartBitmapVerifier().Verify(heap_);
+#endif
+ PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+ if (config == Config::kAtomic) {
+ Finish();
+ } else {
+ DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
+ // TODO(chromium:1056170): Schedule concurrent sweeping.
+ }
+ }
+
+ void Finish() {
+ if (!is_in_progress_) return;
+
+ MutatorThreadSweepVisitor s(&space_states_);
+
+ is_in_progress_ = false;
+ }
+
+ private:
+ SpaceStates space_states_;
+ RawHeap* heap_;
+ bool is_in_progress_ = false;
+};
+
+Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
+Sweeper::~Sweeper() = default;
+
+void Sweeper::Start(Config config) { impl_->Start(config); }
+void Sweeper::Finish() { impl_->Finish(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
new file mode 100644
index 0000000000..3e38773168
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_SWEEPER_H_
+#define V8_HEAP_CPPGC_SWEEPER_H_
+
+#include <memory>
+
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class RawHeap;
+
+class V8_EXPORT_PRIVATE Sweeper final {
+ public:
+ enum class Config { kAtomic, kIncrementalAndConcurrent };
+
+ explicit Sweeper(RawHeap*);
+ ~Sweeper();
+
+ Sweeper(const Sweeper&) = delete;
+ Sweeper& operator=(const Sweeper&) = delete;
+
+ void Start(Config);
+ void Finish();
+
+ private:
+ class SweeperImpl;
+ std::unique_ptr<SweeperImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_SWEEPER_H_
diff --git a/deps/v8/src/heap/cppgc/visitor.h b/deps/v8/src/heap/cppgc/visitor.h
new file mode 100644
index 0000000000..caa840b4dc
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/visitor.h
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_VISITOR_H_
+#define V8_HEAP_CPPGC_VISITOR_H_
+
+#include "include/cppgc/visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+// Base visitor that is allowed to create a public cppgc::Visitor object and
+// use its internals.
+class VisitorBase : public cppgc::Visitor {
+ public:
+ VisitorBase() = default;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_VISITOR_H_
diff --git a/deps/v8/src/heap/cppgc/worklist.h b/deps/v8/src/heap/cppgc/worklist.h
new file mode 100644
index 0000000000..5993d6a04e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/worklist.h
@@ -0,0 +1,473 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_WORKLIST_H_
+#define V8_HEAP_CPPGC_WORKLIST_H_
+
+#include <cstddef>
+#include <utility>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace cppgc {
+namespace internal {
+
+// A concurrent worklist based on segments. Each tasks gets private
+// push and pop segments. Empty pop segments are swapped with their
+// corresponding push segments. Full push segments are published to a global
+// pool of segments and replaced with empty segments.
+//
+// Work stealing is best effort, i.e., there is no way to inform other tasks
+// of the need of items.
+template <typename EntryType_, int SEGMENT_SIZE, int max_num_tasks = 8>
+class Worklist {
+ using WorklistType = Worklist<EntryType_, SEGMENT_SIZE, max_num_tasks>;
+
+ public:
+ using EntryType = EntryType_;
+ static constexpr int kMaxNumTasks = max_num_tasks;
+ static constexpr size_t kSegmentCapacity = SEGMENT_SIZE;
+
+ class View {
+ public:
+ View(WorklistType* worklist, int task_id)
+ : worklist_(worklist), task_id_(task_id) {}
+
+ // Pushes an entry onto the worklist.
+ bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
+
+ // Pops an entry from the worklist.
+ bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
+
+ // Returns true if the local portion of the worklist is empty.
+ bool IsLocalEmpty() const { return worklist_->IsLocalEmpty(task_id_); }
+
+ // Returns true if the worklist is empty. Can only be used from the main
+ // thread without concurrent access.
+ bool IsEmpty() const { return worklist_->IsEmpty(); }
+
+ bool IsGlobalPoolEmpty() const { return worklist_->IsGlobalPoolEmpty(); }
+
+ // Returns true if the local portion and the global pool are empty (i.e.
+ // whether the current view cannot pop anymore).
+ bool IsLocalViewEmpty() const {
+ return worklist_->IsLocalViewEmpty(task_id_);
+ }
+
+ void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
+
+ void* operator new(size_t, void* location) = delete;
+ void* operator new(size_t) = delete;
+
+ private:
+ WorklistType* const worklist_;
+ const int task_id_;
+ };
+
+ Worklist() : Worklist(kMaxNumTasks) {}
+
+ explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
+ DCHECK_LE(num_tasks_, kMaxNumTasks);
+ for (int i = 0; i < num_tasks_; i++) {
+ private_push_segment(i) = NewSegment();
+ private_pop_segment(i) = NewSegment();
+ }
+ }
+
+ ~Worklist() {
+ CHECK(IsEmpty());
+ for (int i = 0; i < num_tasks_; i++) {
+ DCHECK_NOT_NULL(private_push_segment(i));
+ DCHECK_NOT_NULL(private_pop_segment(i));
+ delete private_push_segment(i);
+ delete private_pop_segment(i);
+ }
+ }
+
+ // Swaps content with the given worklist. Local buffers need to
+ // be empty, not thread safe.
+ void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
+ CHECK(AreLocalsEmpty());
+ CHECK(other.AreLocalsEmpty());
+
+ global_pool_.Swap(other.global_pool_);
+ }
+
+ bool Push(int task_id, EntryType entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_push_segment(task_id));
+ if (!private_push_segment(task_id)->Push(entry)) {
+ PublishPushSegmentToGlobal(task_id);
+ bool success = private_push_segment(task_id)->Push(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool Pop(int task_id, EntryType* entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_pop_segment(task_id));
+ if (!private_pop_segment(task_id)->Pop(entry)) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ Segment* tmp = private_pop_segment(task_id);
+ private_pop_segment(task_id) = private_push_segment(task_id);
+ private_push_segment(task_id) = tmp;
+ } else if (!StealPopSegmentFromGlobal(task_id)) {
+ return false;
+ }
+ bool success = private_pop_segment(task_id)->Pop(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ size_t LocalPushSegmentSize(int task_id) const {
+ return private_push_segment(task_id)->Size();
+ }
+
+ bool IsLocalEmpty(int task_id) const {
+ return private_pop_segment(task_id)->IsEmpty() &&
+ private_push_segment(task_id)->IsEmpty();
+ }
+
+ bool IsGlobalPoolEmpty() const { return global_pool_.IsEmpty(); }
+
+ bool IsEmpty() const {
+ if (!AreLocalsEmpty()) return false;
+ return IsGlobalPoolEmpty();
+ }
+
+ bool AreLocalsEmpty() const {
+ for (int i = 0; i < num_tasks_; i++) {
+ if (!IsLocalEmpty(i)) return false;
+ }
+ return true;
+ }
+
+ bool IsLocalViewEmpty(int task_id) const {
+ return IsLocalEmpty(task_id) && IsGlobalPoolEmpty();
+ }
+
+ size_t LocalSize(int task_id) const {
+ return private_pop_segment(task_id)->Size() +
+ private_push_segment(task_id)->Size();
+ }
+
+ // Thread-safe but may return an outdated result.
+ size_t GlobalPoolSize() const { return global_pool_.Size(); }
+
+ // Clears all segments. Frees the global segment pool.
+ //
+ // Assumes that no other tasks are running.
+ void Clear() {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Clear();
+ private_push_segment(i)->Clear();
+ }
+ global_pool_.Clear();
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback.
+ // The signature of the callback is
+ // bool Callback(EntryType old, EntryType* new).
+ // If the callback returns |false| then the element is removed from the
+ // worklist. Otherwise the |new| entry is updated.
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Update(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Update(callback);
+ private_push_segment(i)->Update(callback);
+ }
+ global_pool_.Update(callback);
+ }
+
+ // Calls the specified callback on each element of the deques.
+ // The signature of the callback is:
+ // void Callback(EntryType entry).
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Iterate(callback);
+ private_push_segment(i)->Iterate(callback);
+ }
+ global_pool_.Iterate(callback);
+ }
+
+ template <typename Callback>
+ void IterateGlobalPool(Callback callback) {
+ global_pool_.Iterate(callback);
+ }
+
+ void FlushToGlobal(int task_id) {
+ PublishPushSegmentToGlobal(task_id);
+ PublishPopSegmentToGlobal(task_id);
+ }
+
+ void MergeGlobalPool(Worklist* other) {
+ global_pool_.Merge(&other->global_pool_);
+ }
+
+ private:
+ FRIEND_TEST(CppgcWorkListTest, SegmentCreate);
+ FRIEND_TEST(CppgcWorkListTest, SegmentPush);
+ FRIEND_TEST(CppgcWorkListTest, SegmentPushPop);
+ FRIEND_TEST(CppgcWorkListTest, SegmentIsEmpty);
+ FRIEND_TEST(CppgcWorkListTest, SegmentIsFull);
+ FRIEND_TEST(CppgcWorkListTest, SegmentClear);
+ FRIEND_TEST(CppgcWorkListTest, SegmentFullPushFails);
+ FRIEND_TEST(CppgcWorkListTest, SegmentEmptyPopFails);
+ FRIEND_TEST(CppgcWorkListTest, SegmentUpdateFalse);
+ FRIEND_TEST(CppgcWorkListTest, SegmentUpdate);
+
+ class Segment {
+ public:
+ static const size_t kCapacity = kSegmentCapacity;
+
+ Segment() : index_(0) {}
+
+ bool Push(EntryType entry) {
+ if (IsFull()) return false;
+ entries_[index_++] = entry;
+ return true;
+ }
+
+ bool Pop(EntryType* entry) {
+ if (IsEmpty()) return false;
+ *entry = entries_[--index_];
+ return true;
+ }
+
+ size_t Size() const { return index_; }
+ bool IsEmpty() const { return index_ == 0; }
+ bool IsFull() const { return index_ == kCapacity; }
+ void Clear() { index_ = 0; }
+
+ template <typename Callback>
+ void Update(Callback callback) {
+ size_t new_index = 0;
+ for (size_t i = 0; i < index_; i++) {
+ if (callback(entries_[i], &entries_[new_index])) {
+ new_index++;
+ }
+ }
+ index_ = new_index;
+ }
+
+ template <typename Callback>
+ void Iterate(Callback callback) const {
+ for (size_t i = 0; i < index_; i++) {
+ callback(entries_[i]);
+ }
+ }
+
+ Segment* next() const { return next_; }
+ void set_next(Segment* segment) { next_ = segment; }
+
+ private:
+ Segment* next_;
+ size_t index_;
+ EntryType entries_[kCapacity];
+ };
+
+ struct PrivateSegmentHolder {
+ Segment* private_push_segment;
+ Segment* private_pop_segment;
+ char cache_line_padding[64];
+ };
+
+ class GlobalPool {
+ public:
+ GlobalPool() : top_(nullptr) {}
+
+ // Swaps contents, not thread safe.
+ void Swap(GlobalPool& other) {
+ Segment* temp = top_;
+ set_top(other.top_);
+ other.set_top(temp);
+ size_t other_size = other.size_.exchange(
+ size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ size_.store(other_size, std::memory_order_relaxed);
+ }
+
+ V8_INLINE void Push(Segment* segment) {
+ v8::base::MutexGuard guard(&lock_);
+ segment->set_next(top_);
+ set_top(segment);
+ size_.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ V8_INLINE bool Pop(Segment** segment) {
+ v8::base::MutexGuard guard(&lock_);
+ if (top_) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
+ *segment = top_;
+ set_top(top_->next());
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE bool IsEmpty() const {
+ return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
+ nullptr;
+ }
+
+ V8_INLINE size_t Size() const {
+ // It is safe to read |size_| without a lock since this variable is
+ // atomic, keeping in mind that threads may not immediately see the new
+ // value when it is updated.
+ return size_.load(std::memory_order_relaxed);
+ }
+
+ void Clear() {
+ v8::base::MutexGuard guard(&lock_);
+ size_.store(0, std::memory_order_relaxed);
+ Segment* current = top_;
+ while (current) {
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ }
+ set_top(nullptr);
+ }
+
+ // See Worklist::Update.
+ template <typename Callback>
+ void Update(Callback callback) {
+ v8::base::MutexGuard guard(&lock_);
+ Segment* prev = nullptr;
+ Segment* current = top_;
+ while (current) {
+ current->Update(callback);
+ if (current->IsEmpty()) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
+ if (!prev) {
+ top_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ } else {
+ prev = current;
+ current = current->next();
+ }
+ }
+ }
+
+ // See Worklist::Iterate.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ v8::base::MutexGuard guard(&lock_);
+ for (Segment* current = top_; current; current = current->next()) {
+ current->Iterate(callback);
+ }
+ }
+
+ void Merge(GlobalPool* other) {
+ Segment* top = nullptr;
+ size_t other_size = 0;
+ {
+ v8::base::MutexGuard guard(&other->lock_);
+ if (!other->top_) return;
+ top = other->top_;
+ other_size = other->size_.load(std::memory_order_relaxed);
+ other->size_.store(0, std::memory_order_relaxed);
+ other->set_top(nullptr);
+ }
+
+ // It's safe to iterate through these segments because the top was
+ // extracted from |other|.
+ Segment* end = top;
+ while (end->next()) end = end->next();
+
+ {
+ v8::base::MutexGuard guard(&lock_);
+ size_.fetch_add(other_size, std::memory_order_relaxed);
+ end->set_next(top_);
+ set_top(top);
+ }
+ }
+
+ void* operator new(size_t, void* location) = delete;
+ void* operator new(size_t) = delete;
+
+ private:
+ void set_top(Segment* segment) {
+ v8::base::AsAtomicPtr(&top_)->store(segment, std::memory_order_relaxed);
+ }
+
+ v8::base::Mutex lock_;
+ Segment* top_;
+ std::atomic<size_t> size_{0};
+ };
+
+ V8_INLINE Segment*& private_push_segment(int task_id) {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ V8_INLINE Segment* const& private_push_segment(int task_id) const {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ V8_INLINE Segment*& private_pop_segment(int task_id) {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ V8_INLINE Segment* const& private_pop_segment(int task_id) const {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_push_segment(task_id));
+ private_push_segment(task_id) = NewSegment();
+ }
+ }
+
+ V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
+ if (!private_pop_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_pop_segment(task_id));
+ private_pop_segment(task_id) = NewSegment();
+ }
+ }
+
+ V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
+ if (global_pool_.IsEmpty()) return false;
+ Segment* new_segment = nullptr;
+ if (global_pool_.Pop(&new_segment)) {
+ delete private_pop_segment(task_id);
+ private_pop_segment(task_id) = new_segment;
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE Segment* NewSegment() {
+ // Bottleneck for filtering in crash dumps.
+ return new Segment();
+ }
+
+ PrivateSegmentHolder private_segments_[kMaxNumTasks];
+ GlobalPool global_pool_;
+ int num_tasks_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_WORKLIST_H_
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 2b46da1feb..9c926bed69 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -50,7 +50,8 @@ void LocalEmbedderHeapTracer::EnterFinalPause() {
remote_tracer_->EnterFinalPause(embedder_stack_state_);
// Resetting to state unknown as there may be follow up garbage collections
// triggered from callbacks that have a different stack state.
- embedder_stack_state_ = EmbedderHeapTracer::kUnknown;
+ embedder_stack_state_ =
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
}
bool LocalEmbedderHeapTracer::Trace(double deadline) {
@@ -68,7 +69,7 @@ void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
if (!InUse()) return;
embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kEmpty == stack_state) {
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state) {
remote_tracer()->NotifyEmptyEmbedderStack();
}
}
@@ -92,8 +93,11 @@ void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
void* pointer0;
void* pointer1;
- if (EmbedderDataSlot(js_object, 0).ToAlignedPointer(&pointer0) && pointer0 &&
- EmbedderDataSlot(js_object, 1).ToAlignedPointer(&pointer1)) {
+ if (EmbedderDataSlot(js_object, 0)
+ .ToAlignedPointer(tracer_->isolate_, &pointer0) &&
+ pointer0 &&
+ EmbedderDataSlot(js_object, 1)
+ .ToAlignedPointer(tracer_->isolate_, &pointer1)) {
wrapper_cache_.push_back({pointer0, pointer1});
}
FlushWrapperCacheIfFull();
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index cc3801e479..728ede4452 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -109,7 +109,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
EmbedderHeapTracer* remote_tracer_ = nullptr;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
- EmbedderHeapTracer::kUnknown;
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
// Indicates whether the embedder worklist was observed empty on the main
// thread. This is opportunistic as concurrent marking tasks may hold local
// segments of potential embedder fields to move to the main thread.
@@ -138,7 +138,8 @@ class V8_EXPORT_PRIVATE EmbedderStackStateScope final {
: local_tracer_(local_tracer),
old_stack_state_(local_tracer_->embedder_stack_state_) {
local_tracer_->embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kEmpty == stack_state) {
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers ==
+ stack_state) {
if (local_tracer->remote_tracer())
local_tracer->remote_tracer()->NotifyEmptyEmbedderStack();
}
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index e2ef3318ce..028949e861 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -10,6 +10,7 @@
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/off-thread-factory-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/logging/log.h"
@@ -228,7 +229,9 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(Handle<String> source,
script->set_flags(0);
script->set_host_defined_options(roots.empty_fixed_array());
- impl()->AddToScriptList(script);
+ if (script_id != Script::kTemporaryScriptId) {
+ impl()->AddToScriptList(script);
+ }
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
return script;
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 85760a31c9..25825f35f7 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -20,6 +20,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/init/bootstrapper.h"
@@ -165,6 +166,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
code->initialize_flags(kind_, has_unwinding_info, is_turbofanned_,
stack_slots_, kIsNotOffHeapTrampoline);
code->set_builtin_index(builtin_index_);
+ code->set_inlined_bytecode_size(inlined_bytecode_size_);
code->set_code_data_container(*data_container);
code->set_deoptimization_data(*deoptimization_data_);
code->set_source_position_table(*source_position_table_);
@@ -530,19 +532,19 @@ Handle<String> Factory::InternalizeUtf8String(
Vector<const uc16>(buffer.get(), decoder.utf16_length()));
}
-template <typename Char>
-Handle<String> Factory::InternalizeString(const Vector<const Char>& string,
+Handle<String> Factory::InternalizeString(Vector<const uint8_t> string,
bool convert_encoding) {
- SequentialStringKey<Char> key(string, HashSeed(isolate()), convert_encoding);
+ SequentialStringKey<uint8_t> key(string, HashSeed(isolate()),
+ convert_encoding);
return InternalizeStringWithKey(&key);
}
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<String> Factory::InternalizeString(
- const Vector<const uint8_t>& string, bool convert_encoding);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<String> Factory::InternalizeString(
- const Vector<const uint16_t>& string, bool convert_encoding);
+Handle<String> Factory::InternalizeString(Vector<const uint16_t> string,
+ bool convert_encoding) {
+ SequentialStringKey<uint16_t> key(string, HashSeed(isolate()),
+ convert_encoding);
+ return InternalizeStringWithKey(&key);
+}
template <typename SeqString>
Handle<String> Factory::InternalizeString(Handle<SeqString> string, int from,
@@ -1049,8 +1051,9 @@ Handle<NativeContext> Factory::NewNativeContext() {
context->set_errors_thrown(Smi::zero());
context->set_math_random_index(Smi::zero());
context->set_serialized_objects(*empty_fixed_array());
- context->set_microtask_queue(nullptr);
+ context->set_microtask_queue(isolate(), nullptr);
context->set_osr_code_cache(*empty_weak_fixed_array());
+ context->set_retained_maps(*empty_weak_array_list());
return context;
}
@@ -1272,15 +1275,15 @@ Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
}
Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
- Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
- Handle<JSReceiver> thenable, Handle<Context> context) {
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> thenable,
+ Handle<JSReceiver> then, Handle<Context> context) {
DCHECK(then->IsCallable());
Handle<PromiseResolveThenableJobTask> microtask =
Handle<PromiseResolveThenableJobTask>::cast(
NewStruct(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE));
microtask->set_promise_to_resolve(*promise_to_resolve);
- microtask->set_then(*then);
microtask->set_thenable(*thenable);
+ microtask->set_then(*then);
microtask->set_context(*context);
return microtask;
}
@@ -1292,7 +1295,7 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
AllocationType::kYoung, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
- foreign->set_foreign_address(addr);
+ foreign->set_foreign_address(isolate(), addr);
return foreign;
}
@@ -1434,7 +1437,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
map.set_visitor_id(Map::GetVisitorId(map));
- map.set_bit_field(0);
+ map.set_relaxed_bit_field(0);
map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
int bit_field3 =
Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -2437,6 +2440,13 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
return Handle<JSGeneratorObject>::cast(NewJSObjectFromMap(map));
}
+Handle<WasmStruct> Factory::NewWasmStruct(Handle<Map> map) {
+ int size = map->instance_size();
+ HeapObject result = AllocateRaw(size, AllocationType::kYoung);
+ result.set_map_after_allocation(*map);
+ return handle(WasmStruct::cast(result), isolate());
+}
+
Handle<SourceTextModule> Factory::NewSourceTextModule(
Handle<SharedFunctionInfo> code) {
Handle<SourceTextModuleInfo> module_info(
@@ -2666,7 +2676,8 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSTypedArray>::cast(NewJSArrayBufferView(
map, empty_byte_array(), buffer, byte_offset, byte_length));
typed_array->set_length(length);
- typed_array->SetOffHeapDataPtr(buffer->backing_store(), byte_offset);
+ typed_array->SetOffHeapDataPtr(isolate(), buffer->backing_store(),
+ byte_offset);
return typed_array;
}
@@ -2677,8 +2688,8 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
map, empty_fixed_array(), buffer, byte_offset, byte_length));
- obj->set_data_pointer(static_cast<uint8_t*>(buffer->backing_store()) +
- byte_offset);
+ obj->set_data_pointer(
+ isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
return obj;
}
@@ -2761,12 +2772,8 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
LOG(isolate(), MapDetails(*map));
- Handle<JSGlobalProxy> proxy = Handle<JSGlobalProxy>::cast(
+ return Handle<JSGlobalProxy>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
- // Create identity hash early in case there is any JS collection containing
- // a global proxy key and needs to be rehashed after deserialization.
- proxy->GetOrCreateIdentityHash(isolate());
- return proxy;
}
void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
@@ -2862,38 +2869,42 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
}
namespace {
-inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi number) {
+V8_INLINE int NumberToStringCacheHash(Handle<FixedArray> cache, Smi number) {
int mask = (cache->length() >> 1) - 1;
return number.value() & mask;
}
-inline int NumberToStringCacheHash(Handle<FixedArray> cache, double number) {
+
+V8_INLINE int NumberToStringCacheHash(Handle<FixedArray> cache, double number) {
int mask = (cache->length() >> 1) - 1;
int64_t bits = bit_cast<int64_t>(number);
return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
}
-} // namespace
-Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
- const char* string,
- bool check_cache) {
+V8_INLINE Handle<String> CharToString(Factory* factory, const char* string,
+ NumberCacheMode mode) {
// We tenure the allocated string since it is referenced from the
// number-string cache which lives in the old space.
- Handle<String> js_string = NewStringFromAsciiChecked(
- string, check_cache ? AllocationType::kOld : AllocationType::kYoung);
- if (!check_cache) return js_string;
+ AllocationType type = mode == NumberCacheMode::kIgnore
+ ? AllocationType::kYoung
+ : AllocationType::kOld;
+ return factory->NewStringFromAsciiChecked(string, type);
+}
+
+} // namespace
+void Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
+ Handle<String> js_string) {
if (!number_string_cache()->get(hash * 2).IsUndefined(isolate())) {
int full_size = isolate()->heap()->MaxNumberToStringCacheSize();
if (number_string_cache()->length() != full_size) {
Handle<FixedArray> new_cache =
NewFixedArray(full_size, AllocationType::kOld);
isolate()->heap()->set_number_string_cache(*new_cache);
- return js_string;
+ return;
}
}
number_string_cache()->set(hash * 2, *number);
number_string_cache()->set(hash * 2 + 1, *js_string);
- return js_string;
}
Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
@@ -2908,27 +2919,29 @@ Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
}
Handle<String> Factory::NumberToString(Handle<Object> number,
- bool check_cache) {
- if (number->IsSmi()) return SmiToString(Smi::cast(*number), check_cache);
+ NumberCacheMode mode) {
+ if (number->IsSmi()) return SmiToString(Smi::cast(*number), mode);
double double_value = Handle<HeapNumber>::cast(number)->value();
// Try to canonicalize doubles.
int smi_value;
if (DoubleToSmiInteger(double_value, &smi_value)) {
- return SmiToString(Smi::FromInt(smi_value), check_cache);
+ return SmiToString(Smi::FromInt(smi_value), mode);
}
return HeapNumberToString(Handle<HeapNumber>::cast(number), double_value,
- check_cache);
+ mode);
}
// Must be large enough to fit any double, int, or size_t.
static const int kNumberToStringBufferSize = 32;
Handle<String> Factory::HeapNumberToString(Handle<HeapNumber> number,
- double value, bool check_cache) {
+ double value, NumberCacheMode mode) {
int hash = 0;
- if (check_cache) {
+ if (mode != NumberCacheMode::kIgnore) {
hash = NumberToStringCacheHash(number_string_cache(), value);
+ }
+ if (mode == NumberCacheMode::kBoth) {
Handle<Object> cached = NumberToStringCacheGet(*number, hash);
if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
@@ -2936,14 +2949,16 @@ Handle<String> Factory::HeapNumberToString(Handle<HeapNumber> number,
char arr[kNumberToStringBufferSize];
Vector<char> buffer(arr, arraysize(arr));
const char* string = DoubleToCString(value, buffer);
-
- return NumberToStringCacheSet(number, hash, string, check_cache);
+ Handle<String> result = CharToString(this, string, mode);
+ if (mode != NumberCacheMode::kIgnore) {
+ NumberToStringCacheSet(number, hash, result);
+ }
+ return result;
}
-Handle<String> Factory::SmiToString(Smi number, bool check_cache) {
- int hash = 0;
- if (check_cache) {
- hash = NumberToStringCacheHash(number_string_cache(), number);
+inline Handle<String> Factory::SmiToString(Smi number, NumberCacheMode mode) {
+ int hash = NumberToStringCacheHash(number_string_cache(), number);
+ if (mode == NumberCacheMode::kBoth) {
Handle<Object> cached = NumberToStringCacheGet(number, hash);
if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
@@ -2951,9 +2966,11 @@ Handle<String> Factory::SmiToString(Smi number, bool check_cache) {
char arr[kNumberToStringBufferSize];
Vector<char> buffer(arr, arraysize(arr));
const char* string = IntToCString(number.value(), buffer);
+ Handle<String> result = CharToString(this, string, mode);
+ if (mode != NumberCacheMode::kIgnore) {
+ NumberToStringCacheSet(handle(number, isolate()), hash, result);
+ }
- Handle<String> result = NumberToStringCacheSet(handle(number, isolate()),
- hash, string, check_cache);
// Compute the hash here (rather than letting the caller take care of it) so
// that the "cache hit" case above doesn't have to bother with it.
STATIC_ASSERT(Smi::kMaxValue <= std::numeric_limits<uint32_t>::max());
@@ -2967,15 +2984,16 @@ Handle<String> Factory::SmiToString(Smi number, bool check_cache) {
Handle<String> Factory::SizeToString(size_t value, bool check_cache) {
Handle<String> result;
+ NumberCacheMode cache_mode =
+ check_cache ? NumberCacheMode::kBoth : NumberCacheMode::kIgnore;
if (value <= Smi::kMaxValue) {
int32_t int32v = static_cast<int32_t>(static_cast<uint32_t>(value));
// SmiToString sets the hash when needed, we can return immediately.
- return SmiToString(Smi::FromInt(int32v), check_cache);
+ return SmiToString(Smi::FromInt(int32v), cache_mode);
} else if (value <= kMaxSafeInteger) {
// TODO(jkummerow): Refactor the cache to not require Objects as keys.
double double_value = static_cast<double>(value);
- result =
- HeapNumberToString(NewHeapNumber(double_value), value, check_cache);
+ result = HeapNumberToString(NewHeapNumber(double_value), value, cache_mode);
} else {
char arr[kNumberToStringBufferSize];
Vector<char> buffer(arr, arraysize(arr));
@@ -3023,6 +3041,15 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info;
}
+Handle<WasmValue> Factory::NewWasmValue(int value_type, Handle<Object> ref) {
+ DCHECK(value_type == 6 || ref->IsByteArray());
+ Handle<WasmValue> wasm_value =
+ Handle<WasmValue>::cast(NewStruct(WASM_VALUE_TYPE, AllocationType::kOld));
+ wasm_value->set_value_type(value_type);
+ wasm_value->set_bytes_or_ref(*ref);
+ return wasm_value;
+}
+
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
NewStruct(BREAK_POINT_INFO_TYPE, AllocationType::kOld));
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 81041f7f40..2840c711cd 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -98,6 +98,8 @@ enum FunctionMode {
kWithReadonlyPrototypeBit | kWithNameBit,
};
+enum class NumberCacheMode { kIgnore, kSetOnly, kBoth };
+
// Interface for handle based allocation.
class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
public:
@@ -184,10 +186,14 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return InternalizeUtf8String(CStrVector(str));
}
- template <typename Char>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<String> InternalizeString(const Vector<const Char>& str,
+ Handle<String> InternalizeString(Vector<const uint8_t> str,
+ bool convert_encoding = false);
+ Handle<String> InternalizeString(Vector<const uint16_t> str,
bool convert_encoding = false);
+ Handle<String> InternalizeString(Vector<const char> str,
+ bool convert_encoding = false) {
+ return InternalizeString(Vector<const uint8_t>::cast(str));
+ }
template <typename SeqString>
Handle<String> InternalizeString(Handle<SeqString>, int from, int length,
@@ -230,7 +236,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
const char (&str)[N],
AllocationType allocation = AllocationType::kYoung) {
DCHECK_EQ(N, strlen(str) + 1);
- return NewStringFromOneByte(StaticCharVector(str), allocation)
+ return NewStringFromOneByte(StaticOneByteVector(str), allocation)
.ToHandleChecked();
}
@@ -370,8 +376,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<CallbackTask> NewCallbackTask(Handle<Foreign> callback,
Handle<Foreign> data);
Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
- Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
- Handle<JSReceiver> thenable, Handle<Context> context);
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> thenable,
+ Handle<JSReceiver> then, Handle<Context> context);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr);
@@ -542,6 +548,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<JSModuleNamespace> NewJSModuleNamespace();
+ Handle<WasmStruct> NewWasmStruct(Handle<Map> map);
+
Handle<SourceTextModule> NewSourceTextModule(Handle<SharedFunctionInfo> code);
Handle<SyntheticModule> NewSyntheticModule(
Handle<String> module_name, Handle<FixedArray> export_names,
@@ -675,10 +683,13 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
DECLARE_ERROR(WasmRuntimeError)
#undef DECLARE_ERROR
- Handle<String> NumberToString(Handle<Object> number, bool check_cache = true);
- Handle<String> SmiToString(Smi number, bool check_cache = true);
- Handle<String> HeapNumberToString(Handle<HeapNumber> number, double value,
- bool check_cache = true);
+ Handle<String> NumberToString(Handle<Object> number,
+ NumberCacheMode mode = NumberCacheMode::kBoth);
+ Handle<String> SmiToString(Smi number,
+ NumberCacheMode mode = NumberCacheMode::kBoth);
+ Handle<String> HeapNumberToString(
+ Handle<HeapNumber> number, double value,
+ NumberCacheMode mode = NumberCacheMode::kBoth);
Handle<String> SizeToString(size_t value, bool check_cache = true);
inline Handle<String> Uint32ToString(uint32_t value,
@@ -734,6 +745,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
+ Handle<WasmValue> NewWasmValue(int32_t value_type, Handle<Object> ref);
+
// Return a map for given number of properties using the map cache in the
// native context.
Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
@@ -801,6 +814,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
+ CodeBuilder& set_inlined_bytecode_size(uint32_t size) {
+ inlined_bytecode_size_ = size;
+ return *this;
+ }
+
CodeBuilder& set_source_position_table(Handle<ByteArray> table) {
DCHECK(!table.is_null());
source_position_table_ = table;
@@ -852,6 +870,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<Object> self_reference_;
int32_t builtin_index_ = Builtins::kNoBuiltinId;
+ uint32_t inlined_bytecode_size_ = 0;
int32_t kind_specific_flags_ = 0;
Handle<ByteArray> source_position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
@@ -927,11 +946,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
- Handle<Object> NumberToStringCacheGet(Object number, int hash);
+ V8_INLINE Handle<Object> NumberToStringCacheGet(Object number, int hash);
// Update the cache with a new number-string pair.
- Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
- const char* string, bool check_cache);
+ V8_INLINE void NumberToStringCacheSet(Handle<Object> number, int hash,
+ Handle<String> js_string);
// Creates a new JSArray with the given backing storage. Performs no
// verification of the backing storage because it may not yet be filled.
diff --git a/deps/v8/src/heap/finalization-registry-cleanup-task.cc b/deps/v8/src/heap/finalization-registry-cleanup-task.cc
index c1868d4862..2acfa31ffb 100644
--- a/deps/v8/src/heap/finalization-registry-cleanup-task.cc
+++ b/deps/v8/src/heap/finalization-registry-cleanup-task.cc
@@ -37,7 +37,6 @@ void FinalizationRegistryCleanupTask::SlowAssertNoActiveJavaScript() {
void FinalizationRegistryCleanupTask::RunInternal() {
Isolate* isolate = heap_->isolate();
- DCHECK(!isolate->host_cleanup_finalization_group_callback());
SlowAssertNoActiveJavaScript();
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8",
@@ -65,8 +64,21 @@ void FinalizationRegistryCleanupTask::RunInternal() {
// Exceptions are reported via the message handler. This is ensured by the
// verbose TryCatch.
+ //
+ // Cleanup is interrupted if there is an exception. The HTML spec calls for a
+ // microtask checkpoint after each cleanup task, so the task should return
+ // after an exception so the host can perform a microtask checkpoint. In case
+ // of exception, check if the FinalizationRegistry still needs cleanup
+ // and should be requeued.
+ //
+ // TODO(syg): Implement better scheduling for finalizers.
InvokeFinalizationRegistryCleanupFromTask(context, finalization_registry,
callback);
+ if (finalization_registry->NeedsCleanup() &&
+ !finalization_registry->scheduled_for_cleanup()) {
+ auto nop = [](HeapObject, ObjectSlot, Object) {};
+ heap_->EnqueueDirtyJSFinalizationRegistry(*finalization_registry, nop);
+ }
// Repost if there are remaining dirty FinalizationRegistries.
heap_->set_is_finalization_registry_cleanup_task_posted(false);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index f023bd7c74..ce682f9c1f 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -253,11 +253,10 @@ void GCTracer::Start(GarbageCollector collector,
current_.reduce_memory = heap_->ShouldReduceMemory();
current_.start_time = start_time;
- current_.start_object_size = heap_->SizeOfObjects();
- current_.start_memory_size = heap_->memory_allocator()->Size();
- current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.young_object_size =
- heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
+ current_.start_object_size = 0;
+ current_.start_memory_size = 0;
+ current_.start_holes_size = 0;
+ current_.young_object_size = 0;
current_.incremental_marking_bytes = 0;
current_.incremental_marking_duration = 0;
@@ -281,6 +280,14 @@ void GCTracer::Start(GarbageCollector collector,
}
}
+void GCTracer::StartInSafepoint() {
+ current_.start_object_size = heap_->SizeOfObjects();
+ current_.start_memory_size = heap_->memory_allocator()->Size();
+ current_.start_holes_size = CountTotalHolesSize(heap_);
+ current_.young_object_size =
+ heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
+}
+
void GCTracer::ResetIncrementalMarkingCounters() {
incremental_marking_bytes_ = 0;
incremental_marking_duration_ = 0;
@@ -289,6 +296,13 @@ void GCTracer::ResetIncrementalMarkingCounters() {
}
}
+void GCTracer::StopInSafepoint() {
+ current_.end_object_size = heap_->SizeOfObjects();
+ current_.end_memory_size = heap_->memory_allocator()->Size();
+ current_.end_holes_size = CountTotalHolesSize(heap_);
+ current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
+}
+
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
@@ -309,10 +323,6 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
- current_.end_object_size = heap_->SizeOfObjects();
- current_.end_memory_size = heap_->memory_allocator()->Size();
- current_.end_holes_size = CountTotalHolesSize(heap_);
- current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
AddAllocation(current_.end_time);
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 6ff6e18a59..628e7ca4bb 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -25,19 +25,20 @@ inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
+#define TRACE_GC_CATEGORIES \
+ "devtools.timeline," TRACE_DISABLED_BY_DEFAULT("v8.gc")
+
#define TRACE_GC(tracer, scope_id) \
GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
- GCTracer::Scope::Name(gc_tracer_scope_id))
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id))
#define TRACE_BACKGROUND_GC(tracer, scope_id) \
WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope( \
tracer->worker_thread_runtime_call_stats()); \
GCTracer::BackgroundScope background_scope(tracer, scope_id, \
runtime_call_stats_scope.Get()); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
- GCTracer::BackgroundScope::Name(scope_id))
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, GCTracer::BackgroundScope::Name(scope_id))
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@@ -218,9 +219,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// Start collecting data.
void Start(GarbageCollector collector, GarbageCollectionReason gc_reason,
const char* collector_reason);
+ void StartInSafepoint();
// Stop collecting data and print results.
void Stop(GarbageCollector collector);
+ void StopInSafepoint();
void NotifySweepingCompleted();
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index e618b91058..39f5ec6c66 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -23,6 +23,8 @@
// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
@@ -376,7 +378,7 @@ void Heap::FinalizeExternalString(String string) {
ExternalBackingStoreType::kExternalString,
ext_string.ExternalPayloadSize());
- ext_string.DisposeResource();
+ ext_string.DisposeResource(isolate());
}
Address Heap::NewSpaceTop() { return new_space_->top(); }
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 518bbcf162..606ba0fe65 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -13,6 +13,7 @@
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/once.h"
+#include "src/base/platform/mutex.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/accessors.h"
#include "src/codegen/assembler-inl.h"
@@ -40,16 +41,18 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/large-spaces.h"
#include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
@@ -71,7 +74,7 @@
#include "src/objects/slots-inl.h"
#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/strings/string-stream.h"
#include "src/strings/unicode-decoder.h"
@@ -202,8 +205,9 @@ Heap::Heap()
: isolate_(isolate()),
memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- safepoint_(new Safepoint(this)),
- external_string_table_(this) {
+ safepoint_(new GlobalSafepoint(this)),
+ external_string_table_(this),
+ collection_barrier_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -1108,14 +1112,32 @@ void Heap::DeoptMarkedAllocationSites() {
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}
-
-void Heap::GarbageCollectionEpilogue() {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
- if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
- ZapFromSpace();
+void Heap::GarbageCollectionEpilogueInSafepoint() {
+#define UPDATE_COUNTERS_FOR_SPACE(space) \
+ isolate_->counters()->space##_bytes_available()->Set( \
+ static_cast<int>(space()->Available())); \
+ isolate_->counters()->space##_bytes_committed()->Set( \
+ static_cast<int>(space()->CommittedMemory())); \
+ isolate_->counters()->space##_bytes_used()->Set( \
+ static_cast<int>(space()->SizeOfObjects()));
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
+ if (space()->CommittedMemory() > 0) { \
+ isolate_->counters()->external_fragmentation_##space()->AddSample( \
+ static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
+ space()->CommittedMemory())); \
}
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+ UPDATE_COUNTERS_FOR_SPACE(space) \
+ UPDATE_FRAGMENTATION_FOR_SPACE(space)
- AllowHeapAllocation for_the_rest_of_the_epilogue;
+ UPDATE_COUNTERS_FOR_SPACE(new_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
+#undef UPDATE_COUNTERS_FOR_SPACE
+#undef UPDATE_FRAGMENTATION_FOR_SPACE
+#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
#ifdef DEBUG
// Old-to-new slot sets must be empty after each collection.
@@ -1133,6 +1155,15 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
+}
+
+void Heap::GarbageCollectionEpilogue() {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
+ if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
+ ZapFromSpace();
+ }
+
+ AllowHeapAllocation for_the_rest_of_the_epilogue;
UpdateMaximumCommitted();
@@ -1160,33 +1191,6 @@ void Heap::GarbageCollectionEpilogue() {
static_cast<int>(MaximumCommittedMemory() / KB));
}
-#define UPDATE_COUNTERS_FOR_SPACE(space) \
- isolate_->counters()->space##_bytes_available()->Set( \
- static_cast<int>(space()->Available())); \
- isolate_->counters()->space##_bytes_committed()->Set( \
- static_cast<int>(space()->CommittedMemory())); \
- isolate_->counters()->space##_bytes_used()->Set( \
- static_cast<int>(space()->SizeOfObjects()));
-#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
- if (space()->CommittedMemory() > 0) { \
- isolate_->counters()->external_fragmentation_##space()->AddSample( \
- static_cast<int>(100 - \
- (space()->SizeOfObjects() * 100.0) / \
- space()->CommittedMemory())); \
- }
-#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
- UPDATE_COUNTERS_FOR_SPACE(space) \
- UPDATE_FRAGMENTATION_FOR_SPACE(space)
-
- UPDATE_COUNTERS_FOR_SPACE(new_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
-#undef UPDATE_COUNTERS_FOR_SPACE
-#undef UPDATE_FRAGMENTATION_FOR_SPACE
-#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
-
#ifdef DEBUG
ReportStatisticsAfterGC();
#endif // DEBUG
@@ -1197,16 +1201,6 @@ void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
-
- if (FLAG_harmony_weak_refs &&
- isolate()->host_cleanup_finalization_group_callback()) {
- HandleScope handle_scope(isolate());
- Handle<JSFinalizationRegistry> finalization_registry;
- while (
- DequeueDirtyJSFinalizationRegistry().ToHandle(&finalization_registry)) {
- isolate()->RunHostCleanupFinalizationGroupCallback(finalization_registry);
- }
- }
}
class GCCallbacksScope {
@@ -1387,16 +1381,15 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
- set_current_gc_flags(kReduceMemoryFootprintMask);
+ set_current_gc_flags(
+ kReduceMemoryFootprintMask |
+ (gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
+ : 0));
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
- const v8::GCCallbackFlags callback_flags =
- gc_reason == GarbageCollectionReason::kLowMemoryNotification
- ? v8::kGCCallbackFlagForced
- : v8::kGCCallbackFlagCollectAllAvailableGarbage;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
+ if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -1446,9 +1439,15 @@ void Heap::ReportExternalMemoryPressure() {
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
- if (isolate()->isolate_data()->external_memory_ >
- (isolate()->isolate_data()->external_memory_low_since_mark_compact_ +
- external_memory_hard_limit())) {
+ int64_t current = isolate()->isolate_data()->external_memory_;
+ int64_t baseline =
+ isolate()->isolate_data()->external_memory_low_since_mark_compact_;
+ int64_t limit = isolate()->isolate_data()->external_memory_limit_;
+ TRACE_EVENT2(
+ "devtools.timeline,v8", "V8.ExternalMemoryPressure", "external_memory_mb",
+ static_cast<int>((current - baseline) / MB), "external_memory_limit_mb",
+ static_cast<int>((limit - baseline) / MB));
+ if (current > baseline + external_memory_hard_limit()) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
@@ -1472,10 +1471,7 @@ void Heap::ReportExternalMemoryPressure() {
const double kMaxStepSize = 10;
const double ms_step = Min(
kMaxStepSize,
- Max(kMinStepSize,
- static_cast<double>(isolate()->isolate_data()->external_memory_) /
- isolate()->isolate_data()->external_memory_limit_ *
- kMinStepSize));
+ Max(kMinStepSize, static_cast<double>(current) / limit * kMinStepSize));
const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
@@ -1516,7 +1512,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
const v8::GCCallbackFlags gc_callback_flags) {
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
+ is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
+ current_gc_flags_ & kForcedGC;
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
@@ -1558,7 +1555,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
- bool next_gc_likely_to_collect_more = false;
+ size_t freed_global_handles = 0;
+
size_t committed_memory_before = 0;
if (collector == MARK_COMPACTOR) {
@@ -1584,18 +1582,69 @@ bool Heap::CollectGarbage(AllocationSpace space,
OptionalTimedHistogramScope histogram_timer_priority_scope(
gc_type_priority_timer, isolate_, mode);
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, gc_callback_flags);
+ if (!IsYoungGenerationCollector(collector)) {
+ PROFILE(isolate_, CodeMovingGCEvent());
+ }
+
+ GCType gc_type = collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact
+ : kGCTypeScavenge;
+ {
+ GCCallbacksScope scope(this);
+ // Temporary override any embedder stack state as callbacks may create
+ // their own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ local_embedder_heap_tracer(),
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ }
+ }
+
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ tp_heap_->CollectGarbage();
+ } else {
+ freed_global_handles +=
+ PerformGarbageCollection(collector, gc_callback_flags);
+ }
+ // Clear is_current_gc_forced now that the current GC is complete. Do this
+ // before GarbageCollectionEpilogue() since that could trigger another
+ // unforced GC.
+ is_current_gc_forced_ = false;
+
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ gc_post_processing_depth_++;
+ {
+ AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
+ freed_global_handles +=
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
+ }
+ gc_post_processing_depth_--;
+ }
+
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
+ }
if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
tracer()->RecordGCPhasesHistograms(gc_type_timer);
}
}
- // Clear is_current_gc_forced now that the current GC is complete. Do this
- // before GarbageCollectionEpilogue() since that could trigger another
- // unforced GC.
- is_current_gc_forced_ = false;
-
GarbageCollectionEpilogue();
if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
@@ -1610,11 +1659,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
- // - there are live detached contexts.
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
- HasHighFragmentation(used_memory_after, committed_memory_after) ||
- (detached_contexts().length() > 0);
+ HasHighFragmentation(used_memory_after, committed_memory_after);
event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
@@ -1634,6 +1681,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
+ collection_barrier_.Increment();
+
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) {
@@ -1642,7 +1691,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
kGCCallbackScheduleIdleGarbageCollection);
}
- return next_gc_likely_to_collect_more;
+ return freed_global_handles > 0;
}
@@ -1659,9 +1708,10 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
if (!isolate()->context().is_null()) {
RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
+ isolate()->raw_native_context().set_retained_maps(
+ ReadOnlyRoots(this).empty_weak_array_list());
}
- number_of_disposed_maps_ = retained_maps().length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
@@ -1670,6 +1720,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags) {
DCHECK(incremental_marking()->IsStopped());
+ SafepointScope safepoint(this);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
incremental_marking()->Start(gc_reason);
@@ -1692,6 +1743,21 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
}
}
+void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
+ if (!incremental_marking()->IsStopped() ||
+ !incremental_marking()->CanBeActivated()) {
+ return;
+ }
+
+ const size_t old_generation_space_available = OldGenerationSpaceAvailable();
+ const size_t global_memory_available = GlobalMemoryAvailable();
+
+ if (old_generation_space_available < new_space_->Capacity() ||
+ global_memory_available < new_space_->Capacity()) {
+ incremental_marking()->incremental_marking_job()->ScheduleTask(this);
+ }
+}
+
void Heap::StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
@@ -1947,6 +2013,26 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
+void Heap::CollectionBarrier::Increment() {
+ base::MutexGuard guard(&mutex_);
+ requested_ = false;
+ cond_.NotifyAll();
+}
+
+void Heap::CollectionBarrier::Wait() {
+ base::MutexGuard guard(&mutex_);
+
+ if (!requested_) {
+ heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
+ requested_ = true;
+ }
+
+ while (requested_) {
+ cond_.Wait(&mutex_);
+ }
+}
+
+void Heap::RequestAndWaitForCollection() { collection_barrier_.Wait(); }
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -1970,52 +2056,22 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
tracer()->AddSurvivalRatio(survival_rate);
}
-bool Heap::PerformGarbageCollection(
+size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
-
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- return tp_heap_->CollectGarbage();
- }
-
- size_t freed_global_handles = 0;
-
- if (!IsYoungGenerationCollector(collector)) {
- PROFILE(isolate_, CodeMovingGCEvent());
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyStringTable(this->isolate());
+ base::Optional<SafepointScope> optional_safepoint_scope;
+ if (FLAG_local_heaps) {
+ optional_safepoint_scope.emplace(this);
+ // Fill and reset all LABs
+ safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
-#endif
-
- GCType gc_type =
- collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
-
- {
- GCCallbacksScope scope(this);
- // Temporary override any embedder stack state as callbacks may create their
- // own state on the stack and recursively trigger GC.
- EmbedderStackStateScope embedder_scope(
- local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kUnknown);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
- }
- }
-
- if (FLAG_local_heaps) safepoint()->Start();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
+ tracer()->StartInSafepoint();
EnsureFromSpaceIsCommitted();
@@ -2024,33 +2080,13 @@ bool Heap::PerformGarbageCollection(
switch (collector) {
case MARK_COMPACTOR:
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
MarkCompact();
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which
- // can cause another GC. Take into account the objects promoted during
- // GC.
- old_generation_allocation_counter_at_last_gc_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
break;
case MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
case SCAVENGER:
- if ((fast_promotion_mode_ &&
- CanExpandOldGeneration(new_space()->Size() +
- new_lo_space()->Size()))) {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kFastPromotionDuringScavenge);
- EvacuateYoungGeneration();
- } else {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kRegularScavenge);
-
- Scavenge();
- }
+ Scavenge();
break;
}
@@ -2072,6 +2108,13 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0);
+ isolate_->eternal_handles()->PostGarbageCollectionProcessing();
+
+ // Update relocatables.
+ Relocatable::PostGarbageCollectionProcessing(isolate_);
+
+ size_t freed_global_handles;
+
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
// First round weak callbacks are not supposed to allocate and trigger
@@ -2095,47 +2138,14 @@ bool Heap::PerformGarbageCollection(
Verify();
}
#endif
- if (FLAG_local_heaps) safepoint()->End();
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
- gc_post_processing_depth_++;
- {
- AllowHeapAllocation allow_allocation;
- AllowJavascriptExecution allow_js(isolate());
- freed_global_handles +=
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
- }
- gc_post_processing_depth_--;
- }
-
- isolate_->eternal_handles()->PostGarbageCollectionProcessing();
-
- // Update relocatables.
- Relocatable::PostGarbageCollectionProcessing(isolate_);
RecomputeLimits(collector);
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
- }
- }
+ GarbageCollectionEpilogueInSafepoint();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyStringTable(this->isolate());
- }
-#endif
+ tracer()->StopInSafepoint();
- return freed_global_handles > 0;
+ return freed_global_handles;
}
void Heap::RecomputeLimits(GarbageCollector collector) {
@@ -2247,10 +2257,11 @@ void Heap::MarkCompact() {
LOG(isolate_, ResourceEvent("markcompact", "begin"));
- uint64_t size_of_objects_before_gc = SizeOfObjects();
-
CodeSpaceMemoryModificationScope code_modifcation(this);
+ UpdateOldGenerationAllocationCounter();
+ uint64_t size_of_objects_before_gc = SizeOfObjects();
+
mark_compact_collector()->Prepare();
ms_count_++;
@@ -2266,6 +2277,14 @@ void Heap::MarkCompact() {
if (FLAG_allocation_site_pretenuring) {
EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
}
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during
+ // GC.
+ old_generation_allocation_counter_at_last_gc_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
+ global_memory_at_last_gc_ = GlobalSizeOfObjects();
}
void Heap::MinorMarkCompact() {
@@ -2380,6 +2399,16 @@ void Heap::EvacuateYoungGeneration() {
}
void Heap::Scavenge() {
+ if ((fast_promotion_mode_ &&
+ CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()))) {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kFastPromotionDuringScavenge);
+ EvacuateYoungGeneration();
+ return;
+ }
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kRegularScavenge);
+
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
@@ -2813,7 +2842,7 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
return 0;
}
-
+// static
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kTaggedSize;
@@ -2826,24 +2855,28 @@ size_t Heap::GetCodeRangeReservedAreaSize() {
return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
}
-HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
- CreateFillerObjectAt(object.address(), filler_size, ClearRecordedSlots::kNo);
+// static
+HeapObject Heap::PrecedeWithFiller(ReadOnlyRoots roots, HeapObject object,
+ int filler_size) {
+ CreateFillerObjectAt(roots, object.address(), filler_size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
return HeapObject::FromAddress(object.address() + filler_size);
}
-HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
- int allocation_size,
+// static
+HeapObject Heap::AlignWithFiller(ReadOnlyRoots roots, HeapObject object,
+ int object_size, int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object.address(), alignment);
if (pre_filler) {
- object = PrecedeWithFiller(object, pre_filler);
+ object = PrecedeWithFiller(roots, object, pre_filler);
filler_size -= pre_filler;
}
if (filler_size) {
- CreateFillerObjectAt(object.address() + object_size, filler_size,
- ClearRecordedSlots::kNo);
+ CreateFillerObjectAt(roots, object.address() + object_size, filler_size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
}
return object;
}
@@ -2929,47 +2962,83 @@ void Heap::FlushNumberStringCache() {
}
}
-HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots clear_slots_mode,
- ClearFreedMemoryMode clear_memory_mode) {
+namespace {
+
+HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return HeapObject();
HeapObject filler = HeapObject::FromAddress(addr);
- bool clear_memory =
- (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory ||
- clear_slots_mode == ClearRecordedSlots::kYes);
if (size == kTaggedSize) {
- filler.set_map_after_allocation(
- Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
- SKIP_WRITE_BARRIER);
+ filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
+ SKIP_WRITE_BARRIER);
} else if (size == 2 * kTaggedSize) {
- filler.set_map_after_allocation(
- Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
- SKIP_WRITE_BARRIER);
- if (clear_memory) {
+ filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
+ SKIP_WRITE_BARRIER);
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
AtomicSlot slot(ObjectSlot(addr) + 1);
*slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
}
} else {
DCHECK_GT(size, 2 * kTaggedSize);
- filler.set_map_after_allocation(
- Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
- SKIP_WRITE_BARRIER);
+ filler.set_map_after_allocation(roots.unchecked_free_space_map(),
+ SKIP_WRITE_BARRIER);
FreeSpace::cast(filler).relaxed_write_size(size);
- if (clear_memory) {
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
}
}
- if (clear_slots_mode == ClearRecordedSlots::kYes &&
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- ClearRecordedSlotRange(addr, addr + size);
- }
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
- !deserialization_complete_) ||
+ !Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
+
+ return filler;
+}
+
+#ifdef DEBUG
+void VerifyNoNeedToClearSlots(Address start, Address end) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(start);
+ // TODO(ulan): Support verification of large pages.
+ if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
+ Space* space = chunk->owner();
+ if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
+ space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
+}
+#else
+void VerifyNoNeedToClearSlots(Address start, Address end) {}
+#endif // DEBUG
+
+} // namespace
+
+// static
+HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
+ int size,
+ ClearFreedMemoryMode clear_memory_mode) {
+ // TODO(leszeks): Verify that no slots need to be recorded.
+ HeapObject filler =
+ CreateFillerObjectAtImpl(roots, addr, size, clear_memory_mode);
+ VerifyNoNeedToClearSlots(addr, addr + size);
+ return filler;
+}
+
+HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots clear_slots_mode) {
+ if (size == 0) return HeapObject();
+ HeapObject filler = CreateFillerObjectAtImpl(
+ ReadOnlyRoots(this), addr, size,
+ clear_slots_mode == ClearRecordedSlots::kYes
+ ? ClearFreedMemoryMode::kClearFreedMemory
+ : ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ if (clear_slots_mode == ClearRecordedSlots::kYes) {
+ ClearRecordedSlotRange(addr, addr + size);
+ } else {
+ VerifyNoNeedToClearSlots(addr, addr + size);
+ }
+ }
return filler;
}
@@ -3158,7 +3227,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// to the original FixedArray (which is now the filler object).
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
- IterateRoots(&root_visitor, VISIT_ALL);
+ IterateRoots(&root_visitor, {});
}
#endif // ENABLE_SLOW_DCHECKS
@@ -3440,6 +3509,28 @@ void Heap::FinalizeIncrementalMarkingAtomically(
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
}
+void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+ }
+}
+
+void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+ }
+}
+
void Heap::FinalizeIncrementalMarkingIncrementally(
GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
@@ -3456,27 +3547,10 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
- }
+ SafepointScope safepoint(this);
+ InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
- }
+ InvokeIncrementalMarkingEpilogueCallbacks();
}
void Heap::RegisterDeserializedObjectsForBlackAllocation(
@@ -3758,19 +3832,15 @@ void Heap::CheckMemoryPressure() {
// the finalizers.
memory_pressure_level_ = MemoryPressureLevel::kNone;
if (memory_pressure_level == MemoryPressureLevel::kCritical) {
+ TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
CollectGarbageOnMemoryPressure();
} else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+ TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
StartIncrementalMarking(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure);
}
}
- if (memory_reducer_) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyPossibleGarbage(event);
- }
}
void Heap::CollectGarbageOnMemoryPressure() {
@@ -3811,6 +3881,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
+ TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
+ static_cast<int>(level));
MemoryPressureLevel previous = memory_pressure_level_;
memory_pressure_level_ = level;
if ((previous != MemoryPressureLevel::kCritical &&
@@ -3830,12 +3902,17 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
void Heap::EagerlyFreeExternalMemory() {
- for (Page* page : *old_space()) {
- if (!page->SweepingDone()) {
- base::MutexGuard guard(page->mutex());
+ if (FLAG_array_buffer_extension) {
+ array_buffer_sweeper()->EnsureFinished();
+ } else {
+ CHECK(!FLAG_local_heaps);
+ for (Page* page : *old_space()) {
if (!page->SweepingDone()) {
- ArrayBufferTracker::FreeDead(
- page, mark_compact_collector()->non_atomic_marking_state());
+ base::MutexGuard guard(page->mutex());
+ if (!page->SweepingDone()) {
+ ArrayBufferTracker::FreeDead(
+ page, mark_compact_collector()->non_atomic_marking_state());
+ }
}
}
}
@@ -4121,7 +4198,7 @@ void Heap::Verify() {
array_buffer_sweeper()->EnsureFinished();
VerifyPointersVisitor visitor(this);
- IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ IterateRoots(&visitor, {});
if (!isolate()->context().is_null() &&
!isolate()->normalized_map_cache()->IsUndefined(isolate())) {
@@ -4143,6 +4220,7 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
new_lo_space_->Verify(isolate());
+ VerifyStringTable(isolate());
}
void Heap::VerifyReadOnlyHeap() {
@@ -4191,10 +4269,14 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
+ CHECK(
+ InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(COMPRESSED_OBJECT_SLOT,
+ rinfo->constant_pool_entry_address())) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
}
}
@@ -4308,6 +4390,13 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
+ if (FLAG_local_heaps) {
+ // Ensure heap is iterable
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
+ }
+
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
@@ -4361,20 +4450,13 @@ void Heap::set_builtin(int index, Code builtin) {
isolate()->builtins_table()[index] = builtin.ptr();
}
-void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
- IterateStrongRoots(v, mode);
- IterateWeakRoots(v, mode);
-}
-
-void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
- const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
- mode == VISIT_ALL_IN_MINOR_MC_MARK ||
- mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
+void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
+ DCHECK(!options.contains(SkipRoot::kWeak));
v->VisitRootPointer(Root::kStringTable, nullptr,
FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
- mode != VISIT_FOR_SERIALIZATION) {
+ if (!options.contains(SkipRoot::kExternalStringTable) &&
+ !options.contains(SkipRoot::kUnserializable)) {
// Scavenge collections have special processing for this.
// Do not visit for serialization, since the external string table will
// be populated from scratch upon deserialization.
@@ -4441,10 +4523,7 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
Heap* heap_;
};
-void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
- const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
- mode == VISIT_ALL_IN_MINOR_MC_MARK ||
- mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
+void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
v->VisitRootPointers(Root::kStrongRootList, nullptr,
roots_table().strong_roots_begin(),
roots_table().strong_roots_end());
@@ -4452,11 +4531,6 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
- if (mode != VISIT_ONLY_STRONG_IGNORE_STACK) {
- isolate_->Iterate(v);
- isolate_->global_handles()->IterateStrongStackRoots(v);
- v->Synchronize(VisitorSynchronization::kTop);
- }
Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
isolate_->debug()->Iterate(v);
@@ -4465,87 +4539,107 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
isolate_->compilation_cache()->Iterate(v);
v->Synchronize(VisitorSynchronization::kCompilationCache);
- // Iterate over local handles in handle scopes.
- FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
- isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
- isolate_->handle_scope_implementer()->Iterate(v);
-
- if (FLAG_local_heaps) {
- safepoint_->Iterate(&left_trim_visitor);
- safepoint_->Iterate(v);
- }
-
- isolate_->IterateDeferredHandles(&left_trim_visitor);
- isolate_->IterateDeferredHandles(v);
- v->Synchronize(VisitorSynchronization::kHandleScope);
-
- // Iterate over the builtin code objects in the heap. Note that it is not
- // necessary to iterate over code objects on scavenge collections.
- if (!isMinorGC) {
+ if (!options.contains(SkipRoot::kOldGeneration)) {
IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
}
- // Iterate over global handles.
- switch (mode) {
- case VISIT_FOR_SERIALIZATION:
- // Global handles are not iterated by the serializer. Values referenced by
- // global handles need to be added manually.
- break;
- case VISIT_ONLY_STRONG:
- case VISIT_ONLY_STRONG_IGNORE_STACK:
- isolate_->global_handles()->IterateStrongRoots(v);
- break;
- case VISIT_ALL_IN_SCAVENGE:
- case VISIT_ALL_IN_MINOR_MC_MARK:
- isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
- break;
- case VISIT_ALL_IN_MINOR_MC_UPDATE:
- isolate_->global_handles()->IterateAllYoungRoots(v);
- break;
- case VISIT_ALL_IN_SWEEP_NEWSPACE:
- case VISIT_ALL:
- isolate_->global_handles()->IterateAllRoots(v);
- break;
- }
- v->Synchronize(VisitorSynchronization::kGlobalHandles);
+ // Iterate over pointers being held by inactive threads.
+ isolate_->thread_manager()->Iterate(v);
+ v->Synchronize(VisitorSynchronization::kThreadManager);
+
+ // Visitors in this block only run when not serializing. These include:
+ //
+ // - Thread-local and stack.
+ // - Handles.
+ // - Microtasks.
+ // - The startup object cache.
+ //
+ // When creating real startup snapshot, these areas are expected to be empty.
+ // It is also possible to create a snapshot of a *running* isolate for testing
+ // purposes. In this case, these areas are likely not empty and will simply be
+ // skipped.
+ //
+ // The general guideline for adding visitors to this section vs. adding them
+ // above is that non-transient heap state is always visited, transient heap
+ // state is visited only when not serializing.
+ if (!options.contains(SkipRoot::kUnserializable)) {
+ if (!options.contains(SkipRoot::kGlobalHandles)) {
+ if (options.contains(SkipRoot::kWeak)) {
+ if (options.contains(SkipRoot::kOldGeneration)) {
+ // Skip handles that are either weak or old.
+ isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
+ } else {
+ // Skip handles that are weak.
+ isolate_->global_handles()->IterateStrongRoots(v);
+ }
+ } else {
+ // Do not skip weak handles.
+ if (options.contains(SkipRoot::kOldGeneration)) {
+ // Skip handles that are old.
+ isolate_->global_handles()->IterateAllYoungRoots(v);
+ } else {
+ // Do not skip any handles.
+ isolate_->global_handles()->IterateAllRoots(v);
+ }
+ }
+ }
+ v->Synchronize(VisitorSynchronization::kGlobalHandles);
+
+ if (!options.contains(SkipRoot::kStack)) {
+ IterateStackRoots(v);
+ v->Synchronize(VisitorSynchronization::kTop);
+ }
+
+ // Iterate over local handles in handle scopes.
+ FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
+ isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
+ isolate_->handle_scope_implementer()->Iterate(v);
+
+ if (FLAG_local_heaps) {
+ safepoint_->Iterate(&left_trim_visitor);
+ safepoint_->Iterate(v);
+ isolate_->persistent_handles_list()->Iterate(&left_trim_visitor);
+ isolate_->persistent_handles_list()->Iterate(v);
+ }
+
+ isolate_->IterateDeferredHandles(&left_trim_visitor);
+ isolate_->IterateDeferredHandles(v);
+ v->Synchronize(VisitorSynchronization::kHandleScope);
- // Iterate over eternal handles. Eternal handles are not iterated by the
- // serializer. Values referenced by eternal handles need to be added manually.
- if (mode != VISIT_FOR_SERIALIZATION) {
- if (isMinorGC) {
+ if (options.contains(SkipRoot::kOldGeneration)) {
isolate_->eternal_handles()->IterateYoungRoots(v);
} else {
isolate_->eternal_handles()->IterateAllRoots(v);
}
- }
- v->Synchronize(VisitorSynchronization::kEternalHandles);
+ v->Synchronize(VisitorSynchronization::kEternalHandles);
- // Iterate over pointers being held by inactive threads.
- isolate_->thread_manager()->Iterate(v);
- v->Synchronize(VisitorSynchronization::kThreadManager);
+ // Iterate over pending Microtasks stored in MicrotaskQueues.
+ MicrotaskQueue* default_microtask_queue =
+ isolate_->default_microtask_queue();
+ if (default_microtask_queue) {
+ MicrotaskQueue* microtask_queue = default_microtask_queue;
+ do {
+ microtask_queue->IterateMicrotasks(v);
+ microtask_queue = microtask_queue->next();
+ } while (microtask_queue != default_microtask_queue);
+ }
- // Iterate over other strong roots (currently only identity maps).
- for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
- v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
- }
- v->Synchronize(VisitorSynchronization::kStrongRoots);
+ // Iterate over other strong roots (currently only identity maps and
+ // deoptimization entries).
+ for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
+ v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
+ }
+ v->Synchronize(VisitorSynchronization::kStrongRoots);
- // Iterate over pending Microtasks stored in MicrotaskQueues.
- MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
- if (default_microtask_queue) {
- MicrotaskQueue* microtask_queue = default_microtask_queue;
- do {
- microtask_queue->IterateMicrotasks(v);
- microtask_queue = microtask_queue->next();
- } while (microtask_queue != default_microtask_queue);
+ // Iterate over the startup object cache unless serializing or
+ // deserializing.
+ SerializerDeserializer::Iterate(isolate_, v);
+ v->Synchronize(VisitorSynchronization::kStartupObjectCache);
}
- // Iterate over the partial snapshot cache unless serializing or
- // deserializing.
- if (mode != VISIT_FOR_SERIALIZATION) {
- SerializerDeserializer::Iterate(isolate_, v);
- v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
+ if (!options.contains(SkipRoot::kWeak)) {
+ IterateWeakRoots(v, options);
}
}
@@ -4563,6 +4657,11 @@ void Heap::IterateBuiltins(RootVisitor* v) {
STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
}
+void Heap::IterateStackRoots(RootVisitor* v) {
+ isolate_->Iterate(v);
+ isolate_->global_handles()->IterateStrongStackRoots(v);
+}
+
namespace {
size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
const size_t kGlobalMemoryToV8Ratio = 2;
@@ -4866,10 +4965,13 @@ bool Heap::ShouldOptimizeForLoadTime() {
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
-bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
+bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
+ // Ensure that retry of allocation on background thread succeeds
+ if (IsRetryOfFailedAllocation(local_heap)) return true;
+
if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true;
@@ -4886,6 +4988,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
return true;
}
+bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
+ if (!local_heap) return false;
+ return local_heap->allocation_failed_;
+}
+
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
@@ -4910,6 +5017,22 @@ size_t Heap::GlobalMemoryAvailable() {
: new_space_->Capacity() + 1;
}
+double Heap::PercentToOldGenerationLimit() {
+ double size_at_gc = old_generation_size_at_last_gc_;
+ double size_now = OldGenerationObjectsAndPromotedExternalMemorySize();
+ double current_bytes = size_now - size_at_gc;
+ double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
+}
+
+double Heap::PercentToGlobalMemoryLimit() {
+ double size_at_gc = old_generation_size_at_last_gc_;
+ double size_now = OldGenerationObjectsAndPromotedExternalMemorySize();
+ double current_bytes = size_now - size_at_gc;
+ double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
+}
+
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
// The kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
@@ -4937,37 +5060,42 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
}
if (FLAG_stress_marking > 0) {
- double gained_since_last_gc =
- PromotedSinceLastGC() +
- (isolate()->isolate_data()->external_memory_ -
- isolate()->isolate_data()->external_memory_low_since_mark_compact_);
- double size_before_gc =
- OldGenerationObjectsAndPromotedExternalMemorySize() -
- gained_since_last_gc;
- double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
- if (bytes_to_limit > 0) {
- double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
-
+ int current_percent = static_cast<int>(
+ std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
+ if (current_percent > 0) {
if (FLAG_trace_stress_marking) {
isolate()->PrintWithTimestamp(
- "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
+ "[IncrementalMarking] %d%% of the memory limit reached\n",
current_percent);
}
-
if (FLAG_fuzzer_gc_analysis) {
// Skips values >=100% since they already trigger marking.
- if (current_percent < 100.0) {
+ if (current_percent < 100) {
max_marking_limit_reached_ =
- std::max(max_marking_limit_reached_, current_percent);
+ std::max<double>(max_marking_limit_reached_, current_percent);
}
- } else if (static_cast<int>(current_percent) >=
- stress_marking_percentage_) {
+ } else if (current_percent >= stress_marking_percentage_) {
stress_marking_percentage_ = NextStressMarkingLimit();
return IncrementalMarkingLimit::kHardLimit;
}
}
}
+ if (FLAG_incremental_marking_soft_trigger > 0 ||
+ FLAG_incremental_marking_hard_trigger > 0) {
+ int current_percent = static_cast<int>(
+ std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
+ if (current_percent > FLAG_incremental_marking_hard_trigger &&
+ FLAG_incremental_marking_hard_trigger > 0) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ if (current_percent > FLAG_incremental_marking_soft_trigger &&
+ FLAG_incremental_marking_soft_trigger > 0) {
+ return IncrementalMarkingLimit::kSoftLimit;
+ }
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+
size_t old_generation_space_available = OldGenerationSpaceAvailable();
const size_t global_memory_available = GlobalMemoryAvailable();
@@ -5173,6 +5301,12 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
}
+void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
+ CHECK(V8_SHARED_RO_HEAP_BOOL);
+ delete read_only_space_;
+ space_[RO_SPACE] = read_only_space_ = space;
+}
+
void Heap::SetUpSpaces() {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
@@ -5322,20 +5456,6 @@ void Heap::NotifyOldGenerationExpansion() {
}
}
-void Heap::NotifyOffThreadSpaceMerged() {
- // TODO(leszeks): Ideally we would do this check during off-thread page
- // allocation too, to proactively do GC. We should also probably do this check
- // before merging rather than after.
- if (!CanExpandOldGeneration(0)) {
- // TODO(leszeks): We should try to invoke the near-heap limit callback and
- // do a last-resort GC first.
- FatalProcessOutOfMemory("Failed to merge off-thread pages into heap.");
- }
- StartIncrementalMarkingIfAllocationLimitIsReached(
- GCFlagsForIncrementalMarking(), kGCCallbackScheduleIdleGarbageCollection);
- NotifyOldGenerationExpansion();
-}
-
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
@@ -5346,8 +5466,11 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
}
EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
- if (ShouldReduceMemory())
+ if (is_current_gc_forced()) {
+ return EmbedderHeapTracer::TraceFlags::kForced;
+ } else if (ShouldReduceMemory()) {
return EmbedderHeapTracer::TraceFlags::kReduceMemory;
+ }
return EmbedderHeapTracer::TraceFlags::kNoFlags;
}
@@ -5376,6 +5499,7 @@ void Heap::StartTearDown() {
// a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate.
if (FLAG_verify_heap) {
+ SafepointScope scope(this);
Verify();
}
#endif
@@ -5590,11 +5714,11 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
set_script_list(*scripts);
}
-void Heap::AddRetainedMap(Handle<Map> map) {
+void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
if (map->is_in_retained_map_list()) {
return;
}
- Handle<WeakArrayList> array(retained_maps(), isolate());
+ Handle<WeakArrayList> array(context->retained_maps(), isolate());
if (array->IsFull()) {
CompactRetainedMaps(*array);
}
@@ -5603,17 +5727,15 @@ void Heap::AddRetainedMap(Handle<Map> map) {
array = WeakArrayList::AddToEnd(
isolate(), array,
MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
- if (*array != retained_maps()) {
- set_retained_maps(*array);
+ if (*array != context->retained_maps()) {
+ context->set_retained_maps(*array);
}
map->set_is_in_retained_map_list(true);
}
void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
- DCHECK_EQ(retained_maps, this->retained_maps());
int length = retained_maps.length();
int new_length = 0;
- int new_number_of_disposed_maps = 0;
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
MaybeObject maybe_object = retained_maps.Get(i);
@@ -5629,12 +5751,8 @@ void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
retained_maps.Set(new_length, maybe_object);
retained_maps.Set(new_length + 1, age);
}
- if (i < number_of_disposed_maps_) {
- new_number_of_disposed_maps += 2;
- }
new_length += 2;
}
- number_of_disposed_maps_ = new_number_of_disposed_maps;
HeapObject undefined = ReadOnlyRoots(this).undefined_value();
for (int i = new_length; i < length; i++) {
retained_maps.Set(i, HeapObjectReference::Strong(undefined));
@@ -5722,6 +5840,15 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
#endif
}
+
+void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
+ Page* page = Page::FromAddress(start);
+ DCHECK(!page->IsLargePage());
+ DCHECK(!page->InYoungGeneration());
+ RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
+#endif
+}
#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
@@ -5875,7 +6002,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void MarkReachableObjects() {
MarkingVisitor visitor(this);
- heap_->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, {});
visitor.TransitiveClosure();
}
@@ -6071,13 +6198,16 @@ void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
set_builtins_constants_table(cache);
}
+void Heap::SetDetachedContexts(WeakArrayList detached_contexts) {
+ set_detached_contexts(detached_contexts);
+}
+
void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code.builtin_index());
set_interpreter_entry_trampoline_for_profiling(code);
}
void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
- DCHECK(!isolate()->host_cleanup_finalization_group_callback());
// Only one cleanup task is posted at a time.
if (!HasDirtyJSFinalizationRegistries() ||
is_finalization_registry_cleanup_task_posted_) {
@@ -6109,10 +6239,9 @@ void Heap::EnqueueDirtyJSFinalizationRegistry(
JSFinalizationRegistry tail = JSFinalizationRegistry::cast(
dirty_js_finalization_registries_list_tail());
tail.set_next_dirty(finalization_registry);
- gc_notify_updated_slot(tail,
- finalization_registry.RawField(
- JSFinalizationRegistry::kNextDirtyOffset),
- finalization_registry);
+ gc_notify_updated_slot(
+ tail, tail.RawField(JSFinalizationRegistry::kNextDirtyOffset),
+ finalization_registry);
}
set_dirty_js_finalization_registries_list_tail(finalization_registry);
// dirty_js_finalization_registries_list_tail_ is rescanned by
@@ -6138,7 +6267,6 @@ MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
if (!FLAG_harmony_weak_refs) return;
- if (isolate()->host_cleanup_finalization_group_callback()) return;
DisallowHeapAllocation no_gc;
@@ -6252,6 +6380,17 @@ std::vector<Handle<NativeContext>> Heap::FindAllNativeContexts() {
return result;
}
+std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
+ std::vector<WeakArrayList> result;
+ Object context = native_contexts_list();
+ while (!context.IsUndefined(isolate())) {
+ NativeContext native_context = NativeContext::cast(context);
+ result.push_back(native_context.retained_maps());
+ context = native_context.next_context_link();
+ }
+ return result;
+}
+
size_t Heap::NumberOfDetachedContexts() {
// The detached_contexts() array has two entries per detached context.
return detached_contexts().length() / 2;
@@ -6609,12 +6748,11 @@ void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = COMPRESSED_OBJECT_SLOT;
} else {
- // Constant pools don't currently support compressed objects, as
- // their values are all pointer sized (though this could change
- // therefore we have a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = OBJECT_SLOT;
+ slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 6d6eddf61a..888d174c02 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -17,6 +17,8 @@
#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/base/atomic-utils.h"
+#include "src/base/enum-set.h"
+#include "src/base/platform/condition-variable.h"
#include "src/builtins/accessors.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
@@ -66,6 +68,7 @@ class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
+class GlobalSafepoint;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
@@ -74,19 +77,21 @@ class JSFinalizationRegistry;
class LocalEmbedderHeapTracer;
class LocalHeap;
class MemoryAllocator;
+class MemoryChunk;
class MemoryMeasurement;
class MemoryReducer;
class MinorMarkCompactCollector;
class ObjectIterator;
class ObjectStats;
+class OffThreadHeap;
class Page;
class PagedSpace;
class ReadOnlyHeap;
class RootVisitor;
-class Safepoint;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
+class SharedReadOnlySpace;
class Space;
class StressScavengeObserver;
class TimedHistogram;
@@ -160,6 +165,15 @@ enum class YoungGenerationHandling {
enum class GCIdleTimeAction : uint8_t;
+enum class SkipRoot {
+ kExternalStringTable,
+ kGlobalHandles,
+ kOldGeneration,
+ kStack,
+ kUnserializable,
+ kWeak
+};
+
class AllocationResult {
public:
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
@@ -252,7 +266,7 @@ class Heap {
// object references.
base::Mutex* relocation_mutex() { return &relocation_mutex_; }
- // Support for partial snapshots. After calling this we have a linear
+ // Support for context snapshots. After calling this we have a linear
// space to write objects in each space.
struct Chunk {
uint32_t size;
@@ -298,6 +312,9 @@ class Heap {
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
+ // GCs that are forced, either through testing configurations (requring
+ // --expose-gc) or through DevTools (using LowMemoryNotificaton).
+ static const int kForcedGC = 2;
// The minimum size of a HeapObject on the heap.
static const int kMinObjectSizeInTaggedWords = 2;
@@ -434,9 +451,6 @@ class Heap {
void NotifyOldGenerationExpansion();
- // Notifies the heap that an off-thread space has been merged into it.
- void NotifyOffThreadSpaceMerged();
-
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
@@ -456,13 +470,9 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
- // pass ClearRecordedSlots::kNo. If the memory after the object header of
- // the filler should be cleared, pass in kClearFreedMemory. The default is
- // kDontClearFreedMemory.
+ // pass ClearRecordedSlots::kNo. Clears memory if clearing slots.
V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
- Address addr, int size, ClearRecordedSlots clear_slots_mode,
- ClearFreedMemoryMode clear_memory_mode =
- ClearFreedMemoryMode::kDontClearFreedMemory);
+ Address addr, int size, ClearRecordedSlots clear_slots_mode);
template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
@@ -596,6 +606,8 @@ class Heap {
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
+ void RequestAndWaitForCollection();
+
//
// Support for the API.
//
@@ -620,7 +632,7 @@ class Heap {
void AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension);
- Safepoint* safepoint() { return safepoint_.get(); }
+ GlobalSafepoint* safepoint() { return safepoint_.get(); }
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
@@ -668,7 +680,8 @@ class Heap {
void CompactWeakArrayLists(AllocationType allocation);
- V8_EXPORT_PRIVATE void AddRetainedMap(Handle<Map> map);
+ V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
+ Handle<Map> map);
// This event is triggered after successful allocation of a new object made
// by runtime. Allocations of target space for object evacuation do not
@@ -714,6 +727,8 @@ class Heap {
// Sets read-only heap and space.
void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
+ void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);
+
// Sets up the heap memory without creating any objects.
void SetUpSpaces();
@@ -808,6 +823,7 @@ class Heap {
void UnregisterStrongRoots(FullObjectSlot start);
void SetBuiltinsConstantsTable(FixedArray cache);
+ void SetDetachedContexts(WeakArrayList detached_contexts);
// A full copy of the interpreter entry trampoline, used as a template to
// create copies of the builtin at runtime. The copies are used to create
@@ -916,18 +932,15 @@ class Heap {
// (de)serialization or heap verification.
// Iterates over the strong roots and the weak roots.
- void IterateRoots(RootVisitor* v, VisitMode mode);
- // Iterates over the strong roots.
- void IterateStrongRoots(RootVisitor* v, VisitMode mode);
+ void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(RootVisitor* v);
// Iterates over weak string tables.
- void IterateWeakRoots(RootVisitor* v, VisitMode mode);
- // Iterates over weak global handles.
+ void IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
void IterateWeakGlobalHandles(RootVisitor* v);
- // Iterates over builtins.
void IterateBuiltins(RootVisitor* v);
+ void IterateStackRoots(RootVisitor* v);
// ===========================================================================
// Store buffer API. =========================================================
@@ -950,6 +963,7 @@ class Heap {
#ifdef DEBUG
void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
+ void VerifySlotRangeHasNoRecordedSlots(Address start, Address end);
#endif
// ===========================================================================
@@ -976,6 +990,7 @@ class Heap {
void StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
+ void StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
// Synchronously finalizes incremental marking.
@@ -1323,15 +1338,15 @@ class Heap {
// ===========================================================================
// Creates a filler object and returns a heap object immediately after it.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT HeapObject
- PrecedeWithFiller(HeapObject object, int filler_size);
+ V8_EXPORT_PRIVATE static V8_WARN_UNUSED_RESULT HeapObject
+ PrecedeWithFiller(ReadOnlyRoots roots, HeapObject object, int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
// another filler object is created so the over allocated memory is iterable.
- V8_WARN_UNUSED_RESULT HeapObject
- AlignWithFiller(HeapObject object, int object_size, int allocation_size,
- AllocationAlignment alignment);
+ static V8_WARN_UNUSED_RESULT HeapObject
+ AlignWithFiller(ReadOnlyRoots roots, HeapObject object, int object_size,
+ int allocation_size, AllocationAlignment alignment);
// Allocate an external backing store with the given allocation callback.
// If the callback fails (indicated by a nullptr result) then this function
@@ -1520,6 +1535,19 @@ class Heap {
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
};
+ class CollectionBarrier {
+ Heap* heap_;
+ base::Mutex mutex_;
+ base::ConditionVariable cond_;
+ bool requested_;
+
+ public:
+ explicit CollectionBarrier(Heap* heap) : heap_(heap), requested_(false) {}
+
+ void Increment();
+ void Wait();
+ };
+
struct StrongRootsList;
struct StringTypeTable {
@@ -1607,10 +1635,9 @@ class Heap {
// over all objects. May cause a GC.
void MakeHeapIterable();
- // Performs garbage collection
- // Returns whether there is a chance another major GC could
- // collect more garbage.
- bool PerformGarbageCollection(
+ // Performs garbage collection in a safepoint.
+ // Returns the number of freed global handles.
+ size_t PerformGarbageCollection(
GarbageCollector collector,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -1633,6 +1660,15 @@ class Heap {
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
int size_in_bytes);
+ // Initialize a filler object to keep the ability to iterate over the heap
+ // when introducing gaps within pages. If the memory after the object header
+ // of the filler should be cleared, pass in kClearFreedMemory. The default is
+ // kDontClearFreedMemory.
+ V8_EXPORT_PRIVATE static HeapObject CreateFillerObjectAt(
+ ReadOnlyRoots roots, Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode =
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+
// Range write barrier implementation.
template <int kModeMask, typename TSlot>
V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page,
@@ -1704,6 +1740,9 @@ class Heap {
void FinalizeIncrementalMarkingIncrementally(
GarbageCollectionReason gc_reason);
+ void InvokeIncrementalMarkingPrologueCallbacks();
+ void InvokeIncrementalMarkingEpilogueCallbacks();
+
// Returns the timer used for a given GC type.
// - GCScavenger: young generation GC
// - GCCompactor: full GC
@@ -1733,6 +1772,7 @@ class Heap {
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
void GarbageCollectionEpilogue();
+ void GarbageCollectionEpilogueInSafepoint();
// Performs a major collection in the whole heap.
void MarkCompact();
@@ -1812,10 +1852,14 @@ class Heap {
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
- bool ShouldExpandOldGenerationOnSlowAllocation();
+ bool ShouldExpandOldGenerationOnSlowAllocation(
+ LocalHeap* local_heap = nullptr);
+ bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode();
+ double PercentToOldGenerationLimit();
+ double PercentToGlobalMemoryLimit();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
@@ -1922,6 +1966,7 @@ class Heap {
#endif // DEBUG
std::vector<Handle<NativeContext>> FindAllNativeContexts();
+ std::vector<WeakArrayList> FindAllRetainedMaps();
MemoryMeasurement* memory_measurement() { return memory_measurement_.get(); }
// The amount of memory that has been freed concurrently.
@@ -1978,11 +2023,6 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_ = 0;
- // The length of the retained_maps array at the time of context disposal.
- // This separates maps in the retained_maps array that were created before
- // and after context disposal.
- int number_of_disposed_maps_ = 0;
-
NewSpace* new_space_ = nullptr;
OldSpace* old_space_ = nullptr;
CodeSpace* code_space_ = nullptr;
@@ -2136,6 +2176,9 @@ class Heap {
// The size of objects in old generation after the last MarkCompact GC.
size_t old_generation_size_at_last_gc_ = 0;
+ // The size of global memory after the last MarkCompact GC.
+ size_t global_memory_at_last_gc_ = 0;
+
// The feedback storage is used to store allocation sites (keys) and how often
// they have been visited (values) by finding a memento behind an object. The
// storage is only alive temporary during a GC. The invariant is that all
@@ -2166,7 +2209,7 @@ class Heap {
GCCallbackFlags current_gc_callback_flags_ =
GCCallbackFlags::kNoGCCallbackFlags;
- std::unique_ptr<Safepoint> safepoint_;
+ std::unique_ptr<GlobalSafepoint> safepoint_;
bool is_current_gc_forced_ = false;
@@ -2174,6 +2217,8 @@ class Heap {
base::Mutex relocation_mutex_;
+ CollectionBarrier collection_barrier_;
+
int gc_callbacks_depth_ = 0;
bool deserialization_complete_ = false;
@@ -2223,6 +2268,7 @@ class Heap {
friend class ScavengeTaskObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
+ friend class OffThreadHeap;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
@@ -2243,6 +2289,7 @@ class Heap {
// The allocator interface.
friend class Factory;
+ friend class OffThreadFactory;
// The Isolate constructs us.
friend class Isolate;
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 63e47ca313..e1979a2aea 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -4,6 +4,7 @@
#include "src/heap/incremental-marking-job.h"
+#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
@@ -47,15 +48,18 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
- if (!IsTaskPending(task_type) && !heap->IsTearingDown()) {
+ base::MutexGuard guard(&mutex_);
+
+ if (!IsTaskPending(task_type) && !heap->IsTearingDown() &&
+ FLAG_incremental_marking_task) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
const EmbedderHeapTracer::EmbedderStackState stack_state =
taskrunner->NonNestableTasksEnabled()
- ? EmbedderHeapTracer::EmbedderStackState::kEmpty
- : EmbedderHeapTracer::EmbedderStackState::kUnknown;
+ ? EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers
+ : EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
auto task =
std::make_unique<Task>(heap->isolate(), this, stack_state, task_type);
if (task_type == TaskType::kNormal) {
@@ -111,8 +115,11 @@ void IncrementalMarkingJob::Task::RunInternal() {
}
// Clear this flag after StartIncrementalMarking call to avoid
- // scheduling a new task when startining incremental marking.
- job_->SetTaskPending(task_type_, false);
+ // scheduling a new task when starting incremental marking.
+ {
+ base::MutexGuard guard(&job_->mutex_);
+ job_->SetTaskPending(task_type_, false);
+ }
if (!incremental_marking->IsStopped()) {
StepResult step_result = Step(heap);
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index ed133e88e5..63c700be4f 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -28,15 +28,15 @@ class IncrementalMarkingJob final {
double CurrentTimeToTask(Heap* heap) const;
+ private:
+ class Task;
+ static constexpr double kDelayInSeconds = 10.0 / 1000.0;
+
bool IsTaskPending(TaskType task_type) const {
return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_;
}
- private:
- class Task;
- static constexpr double kDelayInSeconds = 10.0 / 1000.0;
-
void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) {
normal_task_pending_ = value;
@@ -45,6 +45,7 @@ class IncrementalMarkingJob final {
}
}
+ base::Mutex mutex_;
double scheduled_time_ = 0.0;
bool normal_task_pending_ = false;
bool delayed_task_pending_ = false;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 76fdbc80c8..8fb1492fe1 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -16,9 +16,11 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/safepoint.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/numbers/conversions.h"
@@ -321,6 +323,9 @@ void IncrementalMarking::StartMarking() {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Start marking\n");
}
+
+ heap_->InvokeIncrementalMarkingPrologueCallbacks();
+
is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
collector_->StartMarking();
@@ -351,6 +356,8 @@ void IncrementalMarking::StartMarking() {
heap_->local_embedder_heap_tracer()->TracePrologue(
heap_->flags_for_embedder_tracer());
}
+
+ heap_->InvokeIncrementalMarkingEpilogueCallbacks();
}
void IncrementalMarking::StartBlackAllocation() {
@@ -407,7 +414,8 @@ void IncrementalMarking::MarkRoots() {
DCHECK(IsMarking());
IncrementalMarkingRootMarkingVisitor visitor(this);
- heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG_IGNORE_STACK);
+ heap_->IterateRoots(
+ &visitor, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kWeak});
}
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
@@ -432,42 +440,41 @@ void IncrementalMarking::RetainMaps() {
// - GC is requested by tests or dev-tools (abort_incremental_marking_).
bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
FLAG_retain_maps_for_n_gc == 0;
- WeakArrayList retained_maps = heap()->retained_maps();
- int length = retained_maps.length();
- // The number_of_disposed_maps separates maps in the retained_maps
- // array that were created before and after context disposal.
- // We do not age and retain disposed maps to avoid memory leaks.
- int number_of_disposed_maps = heap()->number_of_disposed_maps_;
- for (int i = 0; i < length; i += 2) {
- MaybeObject value = retained_maps.Get(i);
- HeapObject map_heap_object;
- if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
- continue;
- }
- int age = retained_maps.Get(i + 1).ToSmi().value();
- int new_age;
- Map map = Map::cast(map_heap_object);
- if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
- marking_state()->IsWhite(map)) {
- if (ShouldRetainMap(map, age)) {
- WhiteToGreyAndPush(map);
+ std::vector<WeakArrayList> retained_maps_list = heap()->FindAllRetainedMaps();
+
+ for (WeakArrayList retained_maps : retained_maps_list) {
+ int length = retained_maps.length();
+
+ for (int i = 0; i < length; i += 2) {
+ MaybeObject value = retained_maps.Get(i);
+ HeapObject map_heap_object;
+ if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
+ continue;
}
- Object prototype = map.prototype();
- if (age > 0 && prototype.IsHeapObject() &&
- marking_state()->IsWhite(HeapObject::cast(prototype))) {
- // The prototype is not marked, age the map.
- new_age = age - 1;
+ int age = retained_maps.Get(i + 1).ToSmi().value();
+ int new_age;
+ Map map = Map::cast(map_heap_object);
+ if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
+ if (ShouldRetainMap(map, age)) {
+ WhiteToGreyAndPush(map);
+ }
+ Object prototype = map.prototype();
+ if (age > 0 && prototype.IsHeapObject() &&
+ marking_state()->IsWhite(HeapObject::cast(prototype))) {
+ // The prototype is not marked, age the map.
+ new_age = age - 1;
+ } else {
+ // The prototype and the constructor are marked, this map keeps only
+ // transition tree alive, not JSObjects. Do not age the map.
+ new_age = age;
+ }
} else {
- // The prototype and the constructor are marked, this map keeps only
- // transition tree alive, not JSObjects. Do not age the map.
- new_age = age;
+ new_age = FLAG_retain_maps_for_n_gc;
+ }
+ // Compact the array and update the age.
+ if (new_age != age) {
+ retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
}
- } else {
- new_age = FLAG_retain_maps_for_n_gc;
- }
- // Compact the array and update the age.
- if (new_age != age) {
- retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
}
}
}
@@ -656,6 +663,27 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
DCHECK(!Heap::InYoungGeneration(candidate));
});
#endif
+
+ if (FLAG_harmony_weak_refs) {
+ weak_objects_->js_weak_refs.Update(
+ [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
+ JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
+
+ if (!forwarded.is_null()) {
+ *js_weak_ref_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+
+#ifdef DEBUG
+ // TODO(syg, marja): Support WeakCells in the young generation.
+ weak_objects_->weak_cells.Iterate([](WeakCell weak_cell) {
+ DCHECK(!Heap::InYoungGeneration(weak_cell));
+ });
+#endif
+ }
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
@@ -677,34 +705,33 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
return StepResult::kNoImmediateWork;
}
- constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
+ constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
const double start = heap_->MonotonicallyIncreasingTimeInMs();
const double deadline = start + expected_duration_ms;
- double current;
bool empty_worklist;
- bool remote_tracing_done = false;
- do {
- {
- LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
- HeapObject object;
- size_t cnt = 0;
- empty_worklist = true;
- while (marking_worklists()->PopEmbedder(&object)) {
- scope.TracePossibleWrapper(JSObject::cast(object));
- if (++cnt == kObjectsToProcessBeforeInterrupt) {
- cnt = 0;
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
+ HeapObject object;
+ size_t cnt = 0;
+ empty_worklist = true;
+ while (marking_worklists()->PopEmbedder(&object)) {
+ scope.TracePossibleWrapper(JSObject::cast(object));
+ if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
+ if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
empty_worklist = false;
break;
}
+ cnt = 0;
}
}
- remote_tracing_done = local_tracer->Trace(deadline);
- current = heap_->MonotonicallyIncreasingTimeInMs();
- } while (!empty_worklist && !remote_tracing_done && (current < deadline));
- local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
+ }
+ bool remote_tracing_done =
+ local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
+ double current = heap_->MonotonicallyIncreasingTimeInMs();
+ local_tracer->SetEmbedderWorklistEmpty(true);
*duration_ms = current - start;
return (empty_worklist && remote_tracing_done)
? StepResult::kNoImmediateWork
@@ -931,6 +958,11 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
+#ifdef DEBUG
+ // Enforce safepoint here such that background threads cannot allocate between
+ // completing sweeping and VerifyCountersAfterSweeping().
+ SafepointScope scope(heap());
+#endif
if (collector_->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
!collector_->sweeper()->AreSweeperTasksRunning())) {
@@ -939,6 +971,8 @@ void IncrementalMarking::FinalizeSweeping() {
if (!collector_->sweeping_in_progress()) {
#ifdef DEBUG
heap_->VerifyCountersAfterSweeping();
+#else
+ SafepointScope scope(heap());
#endif
StartMarking();
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 04000be352..7d06c08649 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -28,7 +28,7 @@ enum class StepResult {
class V8_EXPORT_PRIVATE IncrementalMarking final {
public:
- enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
+ enum State : uint8_t { STOPPED, SWEEPING, MARKING, COMPLETE };
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
@@ -317,7 +317,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
size_t bytes_marked_concurrently_ = 0;
// Must use SetState() above to update state_
- State state_;
+ // Atomic since main thread can complete marking (= changing state), while a
+ // background thread's slow allocation path will check whether incremental
+ // marking is currently running.
+ std::atomic<State> state_;
bool is_compacting_ = false;
bool was_activated_ = false;
@@ -325,7 +328,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_;
- GCRequestType request_type_ = NONE;
+ std::atomic<GCRequestType> request_type_{NONE};
Observer new_generation_observer_;
Observer old_generation_observer_;
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 9f29af218b..b3655aaad8 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
+
#include "src/heap/invalidated-slots-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
new file mode 100644
index 0000000000..4036391949
--- /dev/null
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -0,0 +1,547 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/large-spaces.h"
+
+#include "src/execution/isolate.h"
+#include "src/heap/combined-heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/list.h"
+#include "src/heap/marking.h"
+#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/remembered-set-inl.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces-inl.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/sanitizer/msan.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// This check is here to ensure that the lower 32 bits of any real heap object
+// can't overlap with the lower 32 bits of cleared weak reference value and
+// therefore it's enough to compare only the lower 32 bits of a MaybeObject in
+// order to figure out if it's a cleared weak reference or not.
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable) {
+ if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+ FATAL("Code page is too large.");
+ }
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+ LargePage* page = static_cast<LargePage*>(chunk);
+ page->SetFlag(MemoryChunk::LARGE_PAGE);
+ page->list_node().Initialize();
+ return page;
+}
+
+size_t LargeObjectSpace::Available() {
+ // We return zero here since we cannot take advantage of already allocated
+ // large object memory.
+ return 0;
+}
+
+Address LargePage::GetAddressToShrink(Address object_address,
+ size_t object_size) {
+ if (executable() == EXECUTABLE) {
+ return 0;
+ }
+ size_t used_size = ::RoundUp((object_address - address()) + object_size,
+ MemoryAllocator::GetCommitPageSize());
+ if (used_size < CommittedPhysicalMemory()) {
+ return address() + used_size;
+ }
+ return 0;
+}
+
+void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
+ DCHECK_NULL(this->sweeping_slot_set());
+ RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
+ RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
+}
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpaceObjectIterator
+
+LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
+ LargeObjectSpace* space) {
+ current_ = space->first_page();
+}
+
+HeapObject LargeObjectSpaceObjectIterator::Next() {
+ if (current_ == nullptr) return HeapObject();
+
+ HeapObject object = current_->GetObject();
+ current_ = current_->next_page();
+ return object;
+}
+
+// -----------------------------------------------------------------------------
+// OldLargeObjectSpace
+
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+ : Space(heap, id, new NoFreeList()),
+ size_(0),
+ page_count_(0),
+ objects_size_(0) {}
+
+void LargeObjectSpace::TearDown() {
+ while (!memory_chunk_list_.Empty()) {
+ LargePage* page = first_page();
+ LOG(heap()->isolate(),
+ DeleteEvent("LargeObjectChunk",
+ reinterpret_cast<void*>(page->address())));
+ memory_chunk_list_.Remove(page);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
+ }
+}
+
+AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
+ return AllocateRaw(object_size, NOT_EXECUTABLE);
+}
+
+AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
+ Executability executable) {
+ // Check if we want to force a GC before growing the old space further.
+ // If so, fail the allocation.
+ if (!heap()->CanExpandOldGeneration(object_size) ||
+ !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
+ return AllocationResult::Retry(identity());
+ }
+
+ LargePage* page = AllocateLargePage(object_size, executable);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ HeapObject object = page->GetObject();
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ if (heap()->incremental_marking()->black_allocation()) {
+ heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
+ }
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(object));
+ page->InitializationMemoryFence();
+ heap()->NotifyOldGenerationExpansion();
+ AllocationStep(object_size, object.address(), object_size);
+ return object;
+}
+
+LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
+ Executability executable) {
+ LargePage* page = heap()->memory_allocator()->AllocateLargePage(
+ object_size, this, executable);
+ if (page == nullptr) return nullptr;
+ DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
+
+ AddPage(page, object_size);
+
+ HeapObject object = page->GetObject();
+
+ heap()->CreateFillerObjectAt(object.address(), object_size,
+ ClearRecordedSlots::kNo);
+ return page;
+}
+
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+ // On a platform that provides lazy committing of memory, we over-account
+ // the actually committed memory. There is no easy way right now to support
+ // precise accounting of committed memory in large object space.
+ return CommittedMemory();
+}
+
+LargePage* CodeLargeObjectSpace::FindPage(Address a) {
+ const Address key = MemoryChunk::FromAddress(a)->address();
+ auto it = chunk_map_.find(key);
+ if (it != chunk_map_.end()) {
+ LargePage* page = it->second;
+ CHECK(page->Contains(a));
+ return page;
+ }
+ return nullptr;
+}
+
+void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ LargeObjectSpaceObjectIterator it(this);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ if (marking_state->IsBlackOrGrey(obj)) {
+ Marking::MarkWhite(marking_state->MarkBitFrom(obj));
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
+ chunk->ResetProgressBar();
+ marking_state->SetLiveBytes(chunk, 0);
+ }
+ DCHECK(marking_state->IsWhite(obj));
+ }
+}
+
+void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
+ for (Address current = reinterpret_cast<Address>(page);
+ current < reinterpret_cast<Address>(page) + page->size();
+ current += MemoryChunk::kPageSize) {
+ chunk_map_[current] = page;
+ }
+}
+
+void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
+ for (Address current = page->address();
+ current < reinterpret_cast<Address>(page) + page->size();
+ current += MemoryChunk::kPageSize) {
+ chunk_map_.erase(current);
+ }
+}
+
+void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
+ DCHECK(page->IsLargePage());
+ DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
+ DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
+ size_t object_size = static_cast<size_t>(page->GetObject().Size());
+ static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
+ page->ClearFlag(MemoryChunk::FROM_PAGE);
+ AddPage(page, object_size);
+}
+
+void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
+ size_ += static_cast<int>(page->size());
+ AccountCommitted(page->size());
+ objects_size_ += object_size;
+ page_count_++;
+ memory_chunk_list_.PushBack(page);
+ page->set_owner(this);
+ page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+}
+
+void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
+ size_ -= static_cast<int>(page->size());
+ AccountUncommitted(page->size());
+ objects_size_ -= object_size;
+ page_count_--;
+ memory_chunk_list_.Remove(page);
+ page->set_owner(nullptr);
+}
+
+void LargeObjectSpace::FreeUnmarkedObjects() {
+ LargePage* current = first_page();
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ // Right-trimming does not update the objects_size_ counter. We are lazily
+ // updating it after every GC.
+ size_t surviving_object_size = 0;
+ while (current) {
+ LargePage* next_current = current->next_page();
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ size_t size = static_cast<size_t>(object.Size());
+ if (marking_state->IsBlack(object)) {
+ Address free_start;
+ surviving_object_size += size;
+ if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
+ 0) {
+ DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
+ current->ClearOutOfLiveRangeSlots(free_start);
+ const size_t bytes_to_free =
+ current->size() - (free_start - current->address());
+ heap()->memory_allocator()->PartialFreeMemory(
+ current, free_start, bytes_to_free,
+ current->area_start() + object.Size());
+ size_ -= bytes_to_free;
+ AccountUncommitted(bytes_to_free);
+ }
+ } else {
+ RemovePage(current, size);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
+ current);
+ }
+ current = next_current;
+ }
+ objects_size_ = surviving_object_size;
+}
+
+bool LargeObjectSpace::Contains(HeapObject object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+
+ bool owned = (chunk->owner() == this);
+
+ SLOW_DCHECK(!owned || ContainsSlow(object.address()));
+
+ return owned;
+}
+
+bool LargeObjectSpace::ContainsSlow(Address addr) {
+ for (LargePage* page : *this) {
+ if (page->Contains(addr)) return true;
+ }
+ return false;
+}
+
+std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
+ Heap* heap) {
+ return std::unique_ptr<ObjectIterator>(
+ new LargeObjectSpaceObjectIterator(this));
+}
+
+#ifdef VERIFY_HEAP
+// We do not assume that the large object iterator works, because it depends
+// on the invariants we are checking during verification.
+void LargeObjectSpace::Verify(Isolate* isolate) {
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (LargePage* chunk = first_page(); chunk != nullptr;
+ chunk = chunk->next_page()) {
+ // Each chunk contains an object that starts at the large object page's
+ // object area start.
+ HeapObject object = chunk->GetObject();
+ Page* page = Page::FromHeapObject(object);
+ CHECK(object.address() == page->area_start());
+
+ // The first word should be a map, and we expect all map pointers to be
+ // in map space or read-only space.
+ Map map = object.map();
+ CHECK(map.IsMap());
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+
+ // We have only the following types in the large object space:
+ if (!(object.IsAbstractCode() || object.IsSeqString() ||
+ object.IsExternalString() || object.IsThinString() ||
+ object.IsFixedArray() || object.IsFixedDoubleArray() ||
+ object.IsWeakFixedArray() || object.IsWeakArrayList() ||
+ object.IsPropertyArray() || object.IsByteArray() ||
+ object.IsFeedbackVector() || object.IsBigInt() ||
+ object.IsFreeSpace() || object.IsFeedbackMetadata() ||
+ object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
+ object.IsPreparseData()) &&
+ !FLAG_young_generation_large_objects) {
+ FATAL("Found invalid Object (instance_type=%i) in large object space.",
+ object.map().instance_type());
+ }
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ if (!FLAG_verify_heap_skip_remembered_set) {
+ heap()->VerifyRememberedSetFor(object);
+ }
+
+ // Byte arrays and strings don't have interior pointers.
+ if (object.IsAbstractCode()) {
+ VerifyPointersVisitor code_visitor(heap());
+ object.IterateBody(map, object.Size(), &code_visitor);
+ } else if (object.IsFixedArray()) {
+ FixedArray array = FixedArray::cast(object);
+ for (int j = 0; j < array.length(); j++) {
+ Object element = array.get(j);
+ if (element.IsHeapObject()) {
+ HeapObject element_object = HeapObject::cast(element);
+ CHECK(IsValidHeapObject(heap(), element_object));
+ CHECK(element_object.map().IsMap());
+ }
+ }
+ } else if (object.IsPropertyArray()) {
+ PropertyArray array = PropertyArray::cast(object);
+ for (int j = 0; j < array.length(); j++) {
+ Object property = array.get(j);
+ if (property.IsHeapObject()) {
+ HeapObject property_object = HeapObject::cast(property);
+ CHECK(heap()->Contains(property_object));
+ CHECK(property_object.map().IsMap());
+ }
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
+ }
+}
+#endif
+
+#ifdef DEBUG
+void LargeObjectSpace::Print() {
+ StdoutStream os;
+ LargeObjectSpaceObjectIterator it(this);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ obj.Print(os);
+ }
+}
+#endif // DEBUG
+
+OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, LO_SPACE) {}
+
+OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
+ : LargeObjectSpace(heap, id) {}
+
+void OldLargeObjectSpace::MergeOffThreadSpace(
+ OffThreadLargeObjectSpace* other) {
+ DCHECK(identity() == other->identity());
+
+ while (!other->memory_chunk_list().Empty()) {
+ LargePage* page = other->first_page();
+ HeapObject object = page->GetObject();
+ int size = object.Size();
+ other->RemovePage(page, size);
+ AddPage(page, size);
+
+ // TODO(leszeks): Here we should AllocationStep, see the TODO in
+ // PagedSpace::MergeOffThreadSpace.
+
+ if (heap()->incremental_marking()->black_allocation()) {
+ heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
+ }
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(object));
+ }
+}
+
+NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
+ : LargeObjectSpace(heap, NEW_LO_SPACE),
+ pending_object_(0),
+ capacity_(capacity) {}
+
+AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
+ // Do not allocate more objects if promoting the existing object would exceed
+ // the old generation capacity.
+ if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
+ return AllocationResult::Retry(identity());
+ }
+
+ // Allocation for the first object must succeed independent from the capacity.
+ if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
+ return AllocationResult::Retry(identity());
+ }
+
+ LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+
+ // The size of the first object may exceed the capacity.
+ capacity_ = Max(capacity_, SizeOfObjects());
+
+ HeapObject result = page->GetObject();
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->SetFlag(MemoryChunk::TO_PAGE);
+ pending_object_.store(result.address(), std::memory_order_relaxed);
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ heap()
+ ->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->ClearLiveness(page);
+ }
+#endif // ENABLE_MINOR_MC
+ page->InitializationMemoryFence();
+ DCHECK(page->IsLargePage());
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
+ AllocationStep(object_size, result.address(), object_size);
+ return result;
+}
+
+size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
+
+void NewLargeObjectSpace::Flip() {
+ for (LargePage* chunk = first_page(); chunk != nullptr;
+ chunk = chunk->next_page()) {
+ chunk->SetFlag(MemoryChunk::FROM_PAGE);
+ chunk->ClearFlag(MemoryChunk::TO_PAGE);
+ }
+}
+
+void NewLargeObjectSpace::FreeDeadObjects(
+ const std::function<bool(HeapObject)>& is_dead) {
+ bool is_marking = heap()->incremental_marking()->IsMarking();
+ size_t surviving_object_size = 0;
+ bool freed_pages = false;
+ for (auto it = begin(); it != end();) {
+ LargePage* page = *it;
+ it++;
+ HeapObject object = page->GetObject();
+ size_t size = static_cast<size_t>(object.Size());
+ if (is_dead(object)) {
+ freed_pages = true;
+ RemovePage(page, size);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ if (FLAG_concurrent_marking && is_marking) {
+ heap()->concurrent_marking()->ClearMemoryChunkData(page);
+ }
+ } else {
+ surviving_object_size += size;
+ }
+ }
+ // Right-trimming does not update the objects_size_ counter. We are lazily
+ // updating it after every GC.
+ objects_size_ = surviving_object_size;
+ if (freed_pages) {
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+ }
+}
+
+void NewLargeObjectSpace::SetCapacity(size_t capacity) {
+ capacity_ = Max(capacity, SizeOfObjects());
+}
+
+CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
+ : OldLargeObjectSpace(heap, CODE_LO_SPACE),
+ chunk_map_(kInitialChunkMapCapacity) {}
+
+AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
+ return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
+}
+
+void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
+ OldLargeObjectSpace::AddPage(page, object_size);
+ InsertChunkMapEntries(page);
+ heap()->isolate()->AddCodeMemoryChunk(page);
+}
+
+void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
+ RemoveChunkMapEntries(page);
+ heap()->isolate()->RemoveCodeMemoryChunk(page);
+ OldLargeObjectSpace::RemovePage(page, object_size);
+}
+
+OffThreadLargeObjectSpace::OffThreadLargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, LO_SPACE) {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ // OffThreadLargeObjectSpace doesn't work with third-party heap.
+ UNREACHABLE();
+#endif
+}
+
+AllocationResult OffThreadLargeObjectSpace::AllocateRaw(int object_size) {
+ LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
+ if (page == nullptr) return AllocationResult::Retry(identity());
+
+ return page->GetObject();
+}
+
+void OffThreadLargeObjectSpace::FreeUnmarkedObjects() {
+ // We should never try to free objects in this space.
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/large-spaces.h b/deps/v8/src/heap/large-spaces.h
new file mode 100644
index 0000000000..207b76a093
--- /dev/null
+++ b/deps/v8/src/heap/large-spaces.h
@@ -0,0 +1,232 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_LARGE_SPACES_H_
+#define V8_HEAP_LARGE_SPACES_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <unordered_map>
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces.h"
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+class LargePage : public MemoryChunk {
+ public:
+ // A limit to guarantee that we do not overflow typed slot offset in the old
+ // to old remembered set. Note that this limit is higher than what assembler
+ // already imposes on x64 and ia32 architectures.
+ static const int kMaxCodePageSize = 512 * MB;
+
+ static LargePage* FromHeapObject(HeapObject o) {
+ return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
+ }
+
+ HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
+
+ LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
+
+ // Uncommit memory that is not in use anymore by the object. If the object
+ // cannot be shrunk 0 is returned.
+ Address GetAddressToShrink(Address object_address, size_t object_size);
+
+ void ClearOutOfLiveRangeSlots(Address free_start);
+
+ private:
+ static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable);
+
+ friend class MemoryAllocator;
+};
+
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+
+// -----------------------------------------------------------------------------
+// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
+// the large object space. Large objects do not move during garbage collections.
+
+class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
+ public:
+ using iterator = LargePageIterator;
+
+ ~LargeObjectSpace() override { TearDown(); }
+
+ // Releases internal resources, frees objects in this space.
+ void TearDown();
+
+ // Available bytes for objects in this space.
+ size_t Available() override;
+
+ size_t Size() override { return size_; }
+ size_t SizeOfObjects() override { return objects_size_; }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
+
+ int PageCount() { return page_count_; }
+
+ // Frees unmarked objects.
+ virtual void FreeUnmarkedObjects();
+
+ // Checks whether a heap object is in this space; O(1).
+ bool Contains(HeapObject obj);
+ // Checks whether an address is in the object area in this space. Iterates all
+ // objects in the space. May be slow.
+ bool ContainsSlow(Address addr);
+
+ // Checks whether the space is empty.
+ bool IsEmpty() { return first_page() == nullptr; }
+
+ virtual void AddPage(LargePage* page, size_t object_size);
+ virtual void RemovePage(LargePage* page, size_t object_size);
+
+ LargePage* first_page() {
+ return reinterpret_cast<LargePage*>(Space::first_page());
+ }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+ virtual bool is_off_thread() const { return false; }
+
+#ifdef VERIFY_HEAP
+ virtual void Verify(Isolate* isolate);
+#endif
+
+#ifdef DEBUG
+ void Print() override;
+#endif
+
+ protected:
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
+
+ LargePage* AllocateLargePage(int object_size, Executability executable);
+
+ size_t size_; // allocated bytes
+ int page_count_; // number of chunks
+ size_t objects_size_; // size of objects
+
+ private:
+ friend class LargeObjectSpaceObjectIterator;
+};
+
+class OffThreadLargeObjectSpace;
+
+class OldLargeObjectSpace : public LargeObjectSpace {
+ public:
+ explicit OldLargeObjectSpace(Heap* heap);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
+
+ // Clears the marking state of live objects.
+ void ClearMarkingStateOfLiveObjects();
+
+ void PromoteNewLargeObject(LargePage* page);
+
+ V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
+
+ protected:
+ explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
+};
+
+class NewLargeObjectSpace : public LargeObjectSpace {
+ public:
+ NewLargeObjectSpace(Heap* heap, size_t capacity);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
+
+ // Available bytes for objects in this space.
+ size_t Available() override;
+
+ void Flip();
+
+ void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
+
+ void SetCapacity(size_t capacity);
+
+ // The last allocated object that is not guaranteed to be initialized when the
+ // concurrent marker visits it.
+ Address pending_object() {
+ return pending_object_.load(std::memory_order_relaxed);
+ }
+
+ void ResetPendingObject() { pending_object_.store(0); }
+
+ private:
+ std::atomic<Address> pending_object_;
+ size_t capacity_;
+};
+
+class CodeLargeObjectSpace : public OldLargeObjectSpace {
+ public:
+ explicit CodeLargeObjectSpace(Heap* heap);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
+
+ // Finds a large object page containing the given address, returns nullptr if
+ // such a page doesn't exist.
+ LargePage* FindPage(Address a);
+
+ protected:
+ void AddPage(LargePage* page, size_t object_size) override;
+ void RemovePage(LargePage* page, size_t object_size) override;
+
+ private:
+ static const size_t kInitialChunkMapCapacity = 1024;
+ void InsertChunkMapEntries(LargePage* page);
+ void RemoveChunkMapEntries(LargePage* page);
+
+ // Page-aligned addresses to their corresponding LargePage.
+ std::unordered_map<Address, LargePage*> chunk_map_;
+};
+
+class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
+ public:
+ explicit OffThreadLargeObjectSpace(Heap* heap);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
+
+ void FreeUnmarkedObjects() override;
+
+ bool is_off_thread() const override { return true; }
+
+ protected:
+ // OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
+ friend class OldLargeObjectSpace;
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
+};
+
+class LargeObjectSpaceObjectIterator : public ObjectIterator {
+ public:
+ explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
+
+ HeapObject Next() override;
+
+ private:
+ LargePage* current_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_LARGE_SPACES_H_
diff --git a/deps/v8/src/base/list.h b/deps/v8/src/heap/list.h
index 18e45318a2..5ab9a03610 100644
--- a/deps/v8/src/base/list.h
+++ b/deps/v8/src/heap/list.h
@@ -2,20 +2,33 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BASE_LIST_H_
-#define V8_BASE_LIST_H_
+#ifndef V8_HEAP_LIST_H_
+#define V8_HEAP_LIST_H_
#include <atomic>
#include "src/base/logging.h"
namespace v8 {
-namespace base {
+namespace internal {
+namespace heap {
template <class T>
class List {
public:
List() : front_(nullptr), back_(nullptr) {}
+ List(List&& other) V8_NOEXCEPT : front_(std::exchange(other.front_, nullptr)),
+ back_(std::exchange(other.back_, nullptr)) {}
+ List& operator=(List&& other) V8_NOEXCEPT {
+ front_ = std::exchange(other.front_, nullptr);
+ back_ = std::exchange(other.back_, nullptr);
+ return *this;
+ }
+
+ void ShallowCopyTo(List* other) const {
+ other->front_ = front_;
+ other->back_ = back_;
+ }
void PushBack(T* element) {
DCHECK(!element->list_node().next());
@@ -130,7 +143,8 @@ class ListNode {
friend class List<T>;
};
-} // namespace base
+} // namespace heap
+} // namespace internal
} // namespace v8
-#endif // V8_BASE_LIST_H_
+#endif // V8_HEAP_LIST_H_
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index 10d6ce7370..0f6f7e5453 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -12,10 +12,10 @@
namespace v8 {
namespace internal {
-AllocationResult LocalAllocator::Allocate(AllocationSpace space,
- int object_size,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
+AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
+ int object_size,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
switch (space) {
case NEW_SPACE:
return AllocateInNewSpace(object_size, origin, alignment);
@@ -30,8 +30,8 @@ AllocationResult LocalAllocator::Allocate(AllocationSpace space,
}
}
-void LocalAllocator::FreeLast(AllocationSpace space, HeapObject object,
- int object_size) {
+void EvacuationAllocator::FreeLast(AllocationSpace space, HeapObject object,
+ int object_size) {
switch (space) {
case NEW_SPACE:
FreeLastInNewSpace(object, object_size);
@@ -45,7 +45,8 @@ void LocalAllocator::FreeLast(AllocationSpace space, HeapObject object,
}
}
-void LocalAllocator::FreeLastInNewSpace(HeapObject object, int object_size) {
+void EvacuationAllocator::FreeLastInNewSpace(HeapObject object,
+ int object_size) {
if (!new_space_lab_.TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object.address(), object_size,
@@ -53,7 +54,8 @@ void LocalAllocator::FreeLastInNewSpace(HeapObject object, int object_size) {
}
}
-void LocalAllocator::FreeLastInOldSpace(HeapObject object, int object_size) {
+void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
+ int object_size) {
if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object.address(), object_size,
@@ -61,8 +63,8 @@ void LocalAllocator::FreeLastInOldSpace(HeapObject object, int object_size) {
}
}
-AllocationResult LocalAllocator::AllocateInLAB(int object_size,
- AllocationAlignment alignment) {
+AllocationResult EvacuationAllocator::AllocateInLAB(
+ int object_size, AllocationAlignment alignment) {
AllocationResult allocation;
if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
return AllocationResult::Retry(OLD_SPACE);
@@ -79,22 +81,24 @@ AllocationResult LocalAllocator::AllocateInLAB(int object_size,
return allocation;
}
-bool LocalAllocator::NewLocalAllocationBuffer() {
+bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false;
- LocalAllocationBuffer saved_lab_ = new_space_lab_;
AllocationResult result =
new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ if (result.IsRetry()) {
+ lab_allocation_will_fail_ = true;
+ return false;
+ }
+ LocalAllocationBuffer saved_lab = std::move(new_space_lab_);
new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
- if (new_space_lab_.IsValid()) {
- new_space_lab_.TryMerge(&saved_lab_);
- return true;
+ DCHECK(new_space_lab_.IsValid());
+ if (!new_space_lab_.TryMerge(&saved_lab)) {
+ saved_lab.CloseAndMakeIterable();
}
- new_space_lab_ = saved_lab_;
- lab_allocation_will_fail_ = true;
- return false;
+ return true;
}
-AllocationResult LocalAllocator::AllocateInNewSpace(
+AllocationResult EvacuationAllocator::AllocateInNewSpace(
int object_size, AllocationOrigin origin, AllocationAlignment alignment) {
if (object_size > kMaxLabObjectSize) {
return new_space_->AllocateRawSynchronized(object_size, alignment, origin);
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 5ecfe7d8c1..ba8cd2e610 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -12,27 +12,28 @@
namespace v8 {
namespace internal {
-// Allocator encapsulating thread-local allocation. Assumes that all other
-// allocations also go through LocalAllocator.
-class LocalAllocator {
+// Allocator encapsulating thread-local allocation durning collection. Assumes
+// that all other allocations also go through EvacuationAllocator.
+class EvacuationAllocator {
public:
static const int kLabSize = 32 * KB;
static const int kMaxLabObjectSize = 8 * KB;
- explicit LocalAllocator(Heap* heap, LocalSpaceKind local_space_kind)
+ explicit EvacuationAllocator(Heap* heap, LocalSpaceKind local_space_kind)
: heap_(heap),
new_space_(heap->new_space()),
compaction_spaces_(heap, local_space_kind),
new_space_lab_(LocalAllocationBuffer::InvalidBuffer()),
lab_allocation_will_fail_(false) {}
- // Needs to be called from the main thread to finalize this LocalAllocator.
+ // Needs to be called from the main thread to finalize this
+ // EvacuationAllocator.
void Finalize() {
heap_->old_space()->MergeLocalSpace(compaction_spaces_.Get(OLD_SPACE));
heap_->code_space()->MergeLocalSpace(compaction_spaces_.Get(CODE_SPACE));
- // Give back remaining LAB space if this LocalAllocator's new space LAB
+ // Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
- const LinearAllocationArea info = new_space_lab_.Close();
+ const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
const Address top = new_space_->top();
if (info.limit() != kNullAddress && info.limit() == top) {
DCHECK_NE(info.top(), kNullAddress);
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 392b343236..3aea67411d 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -3,30 +3,62 @@
// found in the LICENSE file.
#include "src/heap/local-heap.h"
+
+#include <memory>
+
+#include "src/base/platform/mutex.h"
#include "src/handles/local-handles.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/safepoint.h"
namespace v8 {
namespace internal {
-LocalHeap::LocalHeap(Heap* heap)
+LocalHeap::LocalHeap(Heap* heap,
+ std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap),
state_(ThreadState::Running),
safepoint_requested_(false),
+ allocation_failed_(false),
prev_(nullptr),
next_(nullptr),
- handles_(new LocalHandles) {
+ handles_(new LocalHandles),
+ persistent_handles_(std::move(persistent_handles)),
+ old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this);
+ if (persistent_handles_) {
+ persistent_handles_->Attach(this);
+ }
}
LocalHeap::~LocalHeap() {
+ // Give up LAB before parking thread
+ old_space_allocator_.FreeLinearAllocationArea();
+
// Park thread since removing the local heap could block.
EnsureParkedBeforeDestruction();
heap_->safepoint()->RemoveLocalHeap(this);
}
+Handle<Object> LocalHeap::NewPersistentHandle(Address value) {
+ if (!persistent_handles_) {
+ persistent_handles_.reset(
+ heap_->isolate()->NewPersistentHandles().release());
+ }
+ return persistent_handles_->NewHandle(value);
+}
+
+std::unique_ptr<PersistentHandles> LocalHeap::DetachPersistentHandles() {
+ if (persistent_handles_) persistent_handles_->Detach();
+ return std::move(persistent_handles_);
+}
+
+bool LocalHeap::IsParked() {
+ base::MutexGuard guard(&state_mutex_);
+ return state_ == ThreadState::Parked;
+}
+
void LocalHeap::Park() {
base::MutexGuard guard(&state_mutex_);
CHECK(state_ == ThreadState::Running);
@@ -67,5 +99,13 @@ void LocalHeap::ClearSafepointRequested() {
void LocalHeap::EnterSafepoint() { heap_->safepoint()->EnterFromThread(this); }
+void LocalHeap::FreeLinearAllocationArea() {
+ old_space_allocator_.FreeLinearAllocationArea();
+}
+
+void LocalHeap::MakeLinearAllocationAreaIterable() {
+ old_space_allocator_.MakeLinearAllocationAreaIterable();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index a6eed1d928..31c66bc2be 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -10,6 +10,8 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/execution/isolate.h"
+#include "src/heap/concurrent-allocator.h"
namespace v8 {
namespace internal {
@@ -17,10 +19,13 @@ namespace internal {
class Heap;
class Safepoint;
class LocalHandles;
+class PersistentHandles;
class LocalHeap {
public:
- V8_EXPORT_PRIVATE explicit LocalHeap(Heap* heap);
+ V8_EXPORT_PRIVATE explicit LocalHeap(
+ Heap* heap,
+ std::unique_ptr<PersistentHandles> persistent_handles = nullptr);
V8_EXPORT_PRIVATE ~LocalHeap();
// Invoked by main thread to signal this thread that it needs to halt in a
@@ -33,6 +38,16 @@ class LocalHeap {
LocalHandles* handles() { return handles_.get(); }
+ V8_EXPORT_PRIVATE Handle<Object> NewPersistentHandle(Address value);
+ V8_EXPORT_PRIVATE std::unique_ptr<PersistentHandles>
+ DetachPersistentHandles();
+
+ bool IsParked();
+
+ Heap* heap() { return heap_; }
+
+ ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
+
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@@ -53,6 +68,9 @@ class LocalHeap {
void EnterSafepoint();
+ void FreeLinearAllocationArea();
+ void MakeLinearAllocationAreaIterable();
+
Heap* heap_;
base::Mutex state_mutex_;
@@ -61,14 +79,20 @@ class LocalHeap {
std::atomic<bool> safepoint_requested_;
+ bool allocation_failed_;
+
LocalHeap* prev_;
LocalHeap* next_;
std::unique_ptr<LocalHandles> handles_;
+ std::unique_ptr<PersistentHandles> persistent_handles_;
+
+ ConcurrentAllocator old_space_allocator_;
friend class Heap;
- friend class Safepoint;
+ friend class GlobalSafepoint;
friend class ParkedScope;
+ friend class ConcurrentAllocator;
};
class ParkedScope {
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 295982beb5..7c06286f97 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -5,14 +5,13 @@
#ifndef V8_HEAP_MARK_COMPACT_INL_H_
#define V8_HEAP_MARK_COMPACT_INL_H_
-#include "src/heap/mark-compact.h"
-
#include "src/base/bits.h"
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/slots-inl.h"
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 7a87adb5f6..a7e1c93e1f 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -20,6 +20,7 @@
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
+#include "src/heap/large-spaces.h"
#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-visitor-inl.h"
@@ -29,6 +30,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
@@ -96,7 +98,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
- void VerifyRoots(VisitMode mode);
+ void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
@@ -105,8 +107,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
Heap* heap_;
};
-void MarkingVerifier::VerifyRoots(VisitMode mode) {
- heap_->IterateStrongRoots(this, mode);
+void MarkingVerifier::VerifyRoots() {
+ heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
@@ -177,7 +179,7 @@ class FullMarkingVerifier : public MarkingVerifier {
heap->mark_compact_collector()->non_atomic_marking_state()) {}
void Run() override {
- VerifyRoots(VISIT_ONLY_STRONG);
+ VerifyRoots();
VerifyMarking(heap_->new_space());
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
@@ -273,7 +275,7 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
- void VerifyRoots(VisitMode mode);
+ void VerifyRoots();
void VerifyEvacuationOnPage(Address start, Address end);
void VerifyEvacuation(NewSpace* new_space);
void VerifyEvacuation(PagedSpace* paged_space);
@@ -281,8 +283,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
Heap* heap_;
};
-void EvacuationVerifier::VerifyRoots(VisitMode mode) {
- heap_->IterateStrongRoots(this, mode);
+void EvacuationVerifier::VerifyRoots() {
+ heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
@@ -323,7 +325,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
void Run() override {
- VerifyRoots(VISIT_ALL);
+ VerifyRoots();
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
@@ -1323,7 +1325,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
src.set_map_word(MapWord::FromForwardingAddress(dst));
}
- EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
+ EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(local_allocator),
@@ -1382,7 +1384,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
#endif // VERIFY_HEAP
Heap* heap_;
- LocalAllocator* local_allocator_;
+ EvacuationAllocator* local_allocator_;
RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_;
@@ -1391,7 +1393,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(
- Heap* heap, LocalAllocator* local_allocator,
+ Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
bool always_promote_young)
@@ -1545,7 +1547,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public:
- EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
+ EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
@@ -1602,7 +1604,7 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
- heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
+ heap()->IterateRoots(root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
// Custom marking for string table and top optimized frame.
MarkStringTable(custom_root_body_visitor);
@@ -2535,9 +2537,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
- if (!isolate()->host_cleanup_finalization_group_callback()) {
- heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
- }
+ heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
}
void MarkCompactCollector::AbortWeakObjects() {
@@ -2575,11 +2575,11 @@ MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = COMPRESSED_OBJECT_SLOT;
} else {
- // Constant pools don't support compressed values at this time
- // (this may change, therefore use a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = OBJECT_SLOT;
+ slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();
@@ -2869,7 +2869,7 @@ class Evacuator : public Malloced {
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
- LocalAllocator* local_allocator, bool always_promote_young)
+ EvacuationAllocator* local_allocator, bool always_promote_young)
: heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, local_allocator, record_visitor,
@@ -2927,7 +2927,7 @@ class Evacuator : public Malloced {
EvacuateOldSpaceVisitor old_space_visitor_;
// Locally cached collector data.
- LocalAllocator* local_allocator_;
+ EvacuationAllocator* local_allocator_;
// Book keeping info.
double duration_;
@@ -3015,7 +3015,7 @@ class FullEvacuator : public Evacuator {
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
EphemeronRememberedSet ephemeron_remembered_set_;
RecordMigratedSlotVisitor record_visitor_;
- LocalAllocator local_allocator_;
+ EvacuationAllocator local_allocator_;
MarkCompactCollector* collector_;
};
@@ -3877,7 +3877,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+ // The external string table is updated at the end.
+ heap_->IterateRoots(&updating_visitor, base::EnumSet<SkipRoot>{
+ SkipRoot::kExternalStringTable});
}
{
@@ -4131,7 +4133,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
void Run() override {
- VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyRoots();
VerifyMarking(heap_->new_space());
}
@@ -4181,7 +4183,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
: EvacuationVerifier(heap) {}
void Run() override {
- VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyRoots();
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
@@ -4461,7 +4463,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
+ heap()->IterateRoots(&updating_visitor,
+ base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
+ SkipRoot::kOldGeneration});
}
{
TRACE_GC(heap()->tracer(),
@@ -4915,7 +4919,15 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
&JSObject::IsUnmodifiedApiObject);
- heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
+ // MinorMC treats all weak roots except for global handles as strong.
+ // That is why we don't set skip_weak = true here and instead visit
+ // global handles separately.
+ heap()->IterateRoots(
+ root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
+ SkipRoot::kGlobalHandles,
+ SkipRoot::kOldGeneration});
+ isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
+ root_visitor);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [&job, &slots](MemoryChunk* chunk) {
@@ -5049,7 +5061,7 @@ class YoungGenerationEvacuator : public Evacuator {
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
YoungGenerationRecordMigratedSlotVisitor record_visitor_;
- LocalAllocator local_allocator_;
+ EvacuationAllocator local_allocator_;
MinorMarkCompactCollector* collector_;
};
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 7e13db2145..30723ede38 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -24,6 +24,7 @@ class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
class MigrationObserver;
+class ReadOnlySpace;
class RecordMigratedSlotVisitor;
class UpdatingItem;
class YoungGenerationMarkingVisitor;
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index e162268e30..ba45bf1674 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -323,26 +323,23 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
this->VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
- if (weak_cell.target().IsHeapObject()) {
- HeapObject target = HeapObject::cast(weak_cell.target());
- HeapObject unregister_token =
- HeapObject::cast(weak_cell.unregister_token());
- concrete_visitor()->SynchronizePageAccess(target);
- concrete_visitor()->SynchronizePageAccess(unregister_token);
- if (concrete_visitor()->marking_state()->IsBlackOrGrey(target) &&
- concrete_visitor()->marking_state()->IsBlackOrGrey(unregister_token)) {
- // Record the slots inside the WeakCell, since the IterateBody above
- // didn't visit it.
- ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
- concrete_visitor()->RecordSlot(weak_cell, slot, target);
- slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
- concrete_visitor()->RecordSlot(weak_cell, slot, unregister_token);
- } else {
- // WeakCell points to a potentially dead object or a dead unregister
- // token. We have to process them when we know the liveness of the whole
- // transitive closure.
- weak_objects_->weak_cells.Push(task_id_, weak_cell);
- }
+ HeapObject target = weak_cell.relaxed_target();
+ HeapObject unregister_token = HeapObject::cast(weak_cell.unregister_token());
+ concrete_visitor()->SynchronizePageAccess(target);
+ concrete_visitor()->SynchronizePageAccess(unregister_token);
+ if (concrete_visitor()->marking_state()->IsBlackOrGrey(target) &&
+ concrete_visitor()->marking_state()->IsBlackOrGrey(unregister_token)) {
+ // Record the slots inside the WeakCell, since the IterateBody above
+ // didn't visit it.
+ ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
+ concrete_visitor()->RecordSlot(weak_cell, slot, target);
+ slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
+ concrete_visitor()->RecordSlot(weak_cell, slot, unregister_token);
+ } else {
+ // WeakCell points to a potentially dead object or a dead unregister
+ // token. We have to process them when we know the liveness of the whole
+ // transitive closure.
+ weak_objects_->weak_cells.Push(task_id_, weak_cell);
}
return size;
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index a94978a8b2..a4c2a9f522 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -8,6 +8,7 @@
#include "src/common/globals.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
index ceda7d68d2..af76bbb12e 100644
--- a/deps/v8/src/heap/marking.cc
+++ b/deps/v8/src/heap/marking.cc
@@ -7,6 +7,8 @@
namespace v8 {
namespace internal {
+const size_t Bitmap::kSize = Bitmap::CellsCount() * Bitmap::kBytesPerCell;
+
template <>
bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
uint32_t start_index, uint32_t end_index) {
@@ -77,7 +79,7 @@ class CellPrinter {
public:
CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
- void Print(uint32_t pos, uint32_t cell) {
+ void Print(size_t pos, uint32_t cell) {
if (cell == seq_type) {
seq_length++;
return;
@@ -92,14 +94,14 @@ class CellPrinter {
return;
}
- PrintF("%d: ", pos);
+ PrintF("%zu: ", pos);
PrintWord(cell);
PrintF("\n");
}
void Flush() {
if (seq_length > 0) {
- PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
+ PrintF("%zu: %dx%zu\n", seq_start, seq_type == 0 ? 0 : 1,
seq_length * Bitmap::kBitsPerCell);
seq_length = 0;
}
@@ -108,9 +110,9 @@ class CellPrinter {
static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
private:
- uint32_t seq_start;
+ size_t seq_start;
uint32_t seq_type;
- uint32_t seq_length;
+ size_t seq_length;
};
} // anonymous namespace
@@ -118,7 +120,7 @@ class CellPrinter {
template <>
void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print() {
CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
+ for (size_t i = 0; i < CellsCount(); i++) {
printer.Print(i, cells()[i]);
}
printer.Flush();
@@ -127,7 +129,7 @@ void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print() {
template <>
bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
+ for (size_t i = 0; i < CellsCount(); i++) {
if (cells()[i] != 0) {
return false;
}
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 6d73b0c4b4..fc87041d4e 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -98,16 +98,18 @@ class V8_EXPORT_PRIVATE Bitmap {
static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
- static const size_t kLength = (1 << kPageSizeBits) >> (kTaggedSizeLog2);
-
- static const size_t kSize = (1 << kPageSizeBits) >>
- (kTaggedSizeLog2 + kBitsPerByteLog2);
-
- static int CellsForLength(int length) {
+ // The length is the number of bits in this bitmap. (+1) accounts for
+ // the case where the markbits are queried for a one-word filler at the
+ // end of the page.
+ static const size_t kLength = ((1 << kPageSizeBits) >> kTaggedSizeLog2) + 1;
+ // The size of the bitmap in bytes is CellsCount() * kBytesPerCell.
+ static const size_t kSize;
+
+ static constexpr size_t CellsForLength(int length) {
return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
}
- int CellsCount() { return CellsForLength(kLength); }
+ static constexpr size_t CellsCount() { return CellsForLength(kLength); }
V8_INLINE static uint32_t IndexToCell(uint32_t index) {
return index >> kBitsPerCellLog2;
diff --git a/deps/v8/src/heap/memory-chunk-inl.h b/deps/v8/src/heap/memory-chunk-inl.h
new file mode 100644
index 0000000000..045de5ea37
--- /dev/null
+++ b/deps/v8/src/heap/memory-chunk-inl.h
@@ -0,0 +1,50 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MEMORY_CHUNK_INL_H_
+#define V8_HEAP_MEMORY_CHUNK_INL_H_
+
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void MemoryChunk::IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+#ifndef V8_ENABLE_THIRD_PARTY_HEAP
+ base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
+ owner()->IncrementExternalBackingStoreBytes(type, amount);
+#endif
+}
+
+void MemoryChunk::DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType type, size_t amount) {
+#ifndef V8_ENABLE_THIRD_PARTY_HEAP
+ base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
+ owner()->DecrementExternalBackingStoreBytes(type, amount);
+#endif
+}
+
+void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
+ MemoryChunk* from,
+ MemoryChunk* to,
+ size_t amount) {
+ DCHECK_NOT_NULL(from->owner());
+ DCHECK_NOT_NULL(to->owner());
+ base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
+ base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
+ Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
+ amount);
+}
+
+AllocationSpace MemoryChunk::owner_identity() const {
+ if (InReadOnlySpace()) return RO_SPACE;
+ return owner()->identity();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MEMORY_CHUNK_INL_H_
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
new file mode 100644
index 0000000000..865e6f1a72
--- /dev/null
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -0,0 +1,157 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-chunk.h"
+
+#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
+ base::AddressRegion memory_area =
+ MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
+ if (memory_area.size() != 0) {
+ MemoryAllocator* memory_allocator = heap_->memory_allocator();
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(executable());
+ CHECK(page_allocator->DiscardSystemPages(
+ reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
+ }
+}
+
+size_t MemoryChunkLayout::CodePageGuardStartOffset() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::CodePageGuardSize() {
+ return MemoryAllocator::GetCommitPageSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
+ // We are guarding code pages: the last OS page will be protected as
+ // non-writable.
+ return Page::kPageSize -
+ static_cast<int>(MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
+ size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
+ return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
+}
+
+size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return ObjectStartOffsetInCodePage();
+ }
+ return ObjectStartOffsetInDataPage();
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
+ size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return AllocatableMemoryInCodePage();
+ }
+ return AllocatableMemoryInDataPage();
+}
+
+#ifdef THREAD_SANITIZER
+void MemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
+}
+#endif
+
+void MemoryChunk::InitializationMemoryFence() {
+ base::SeqCst_MemoryFence();
+#ifdef THREAD_SANITIZER
+ // Since TSAN does not process memory fences, we use the following annotation
+ // to tell TSAN that there is no data race when emitting a
+ // InitializationMemoryFence. Note that the other thread still needs to
+ // perform MemoryChunk::synchronized_heap().
+ base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
+ reinterpret_cast<base::AtomicWord>(heap_));
+#endif
+}
+
+void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::Permission permission) {
+ DCHECK(permission == PageAllocator::kRead ||
+ permission == PageAllocator::kReadExecute);
+ DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
+ // Decrementing the write_unprotect_counter_ and changing the page
+ // protection mode has to be atomic.
+ base::MutexGuard guard(page_protection_change_mutex_);
+ if (write_unprotect_counter_ == 0) {
+ // This is a corner case that may happen when we have a
+ // CodeSpaceMemoryModificationScope open and this page was newly
+ // added.
+ return;
+ }
+ write_unprotect_counter_--;
+ DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
+ if (write_unprotect_counter_ == 0) {
+ Address protect_start =
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAligned(protect_start, page_size));
+ size_t protect_size = RoundUp(area_size(), page_size);
+ CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
+ }
+}
+
+void MemoryChunk::SetReadable() {
+ DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
+}
+
+void MemoryChunk::SetReadAndExecutable() {
+ DCHECK(!FLAG_jitless);
+ DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::kReadExecute);
+}
+
+void MemoryChunk::SetReadAndWritable() {
+ DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
+ // Incrementing the write_unprotect_counter_ and changing the page
+ // protection mode has to be atomic.
+ base::MutexGuard guard(page_protection_change_mutex_);
+ write_unprotect_counter_++;
+ DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
+ if (write_unprotect_counter_ == 1) {
+ Address unprotect_start =
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAligned(unprotect_start, page_size));
+ size_t unprotect_size = RoundUp(area_size(), page_size);
+ CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
+ PageAllocator::kReadWrite));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
new file mode 100644
index 0000000000..4381a229ab
--- /dev/null
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -0,0 +1,471 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MEMORY_CHUNK_H_
+#define V8_HEAP_MEMORY_CHUNK_H_
+
+#include <set>
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/heap.h"
+#include "src/heap/invalidated-slots.h"
+#include "src/heap/list.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeObjectRegistry;
+class FreeListCategory;
+class LocalArrayBufferTracker;
+
+class V8_EXPORT_PRIVATE MemoryChunkLayout {
+ public:
+ static size_t CodePageGuardStartOffset();
+ static size_t CodePageGuardSize();
+ static intptr_t ObjectStartOffsetInCodePage();
+ static intptr_t ObjectEndOffsetInCodePage();
+ static size_t AllocatableMemoryInCodePage();
+ static intptr_t ObjectStartOffsetInDataPage();
+ static size_t AllocatableMemoryInDataPage();
+ static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
+ static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
+};
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accommodate
+// any heap object.
+class MemoryChunk : public BasicMemoryChunk {
+ public:
+ // Use with std data structures.
+ struct Hasher {
+ size_t operator()(MemoryChunk* const chunk) const {
+ return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
+ }
+ };
+
+ using Flags = uintptr_t;
+
+ static const Flags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const Flags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
+
+ static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
+
+ static const Flags kIsLargePageMask = LARGE_PAGE;
+
+ static const Flags kSkipEvacuationSlotsRecordingMask =
+ kEvacuationCandidateMask | kIsInYoungGenerationMask;
+
+ // |kDone|: The page state when sweeping is complete or sweeping must not be
+ // performed on that page. Sweeper threads that are done with their work
+ // will set this value and not touch the page anymore.
+ // |kPending|: This page is ready for parallel sweeping.
+ // |kInProgress|: This page is currently swept by a sweeper thread.
+ enum class ConcurrentSweepingState : intptr_t {
+ kDone,
+ kPending,
+ kInProgress,
+ };
+
+ static const size_t kHeaderSize =
+ BasicMemoryChunk::kHeaderSize // Parent size.
+ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ + kSystemPointerSize // Address owner_
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ + kSystemPointerSize *
+ NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ + kSystemPointerSize *
+ NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ + kSystemPointerSize // base::Mutex* mutex_
+ + kSystemPointerSize // std::atomic<ConcurrentSweepingState>
+ // concurrent_sweeping_
+ + kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ + kSystemPointerSize // unitptr_t write_unprotect_counter_
+ + kSizetSize * ExternalBackingStoreType::kNumTypes
+ // std::atomic<size_t> external_backing_store_bytes_
+ + kSizetSize // size_t allocated_bytes_
+ + kSizetSize // size_t wasted_memory_
+ + kSystemPointerSize * 2 // heap::ListNode
+ + kSystemPointerSize // FreeListCategory** categories__
+ + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ + kSystemPointerSize // Bitmap* young_generation_bitmap_
+ + kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ + kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
+
+ // Page size in bytes. This must be a multiple of the OS page size.
+ static const int kPageSize = 1 << kPageSizeBits;
+
+ // Maximum number of nested code memory modification scopes.
+ static const int kMaxWriteUnprotectCounter = 3;
+
+ // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+ static MemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
+ }
+ // Only works if the object is in the first kPageSize of the MemoryChunk.
+ static MemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
+ }
+
+ void SetOldGenerationPageFlags(bool is_marking);
+ void SetYoungGenerationPageFlags(bool is_marking);
+
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == kNullAddress) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationAreaAddress.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+ intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
+ while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.compare_exchange_weak(
+ old_mark, new_mark, std::memory_order_acq_rel)) {
+ }
+ }
+
+ static inline void MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
+ size_t amount);
+
+ void DiscardUnusedMemory(Address addr, size_t size);
+
+ base::Mutex* mutex() { return mutex_; }
+
+ void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
+ concurrent_sweeping_ = state;
+ }
+
+ ConcurrentSweepingState concurrent_sweeping_state() {
+ return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
+ }
+
+ bool SweepingDone() {
+ return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
+ }
+
+ inline Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
+
+ template <RememberedSetType type>
+ bool ContainsSlots() {
+ return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
+ invalidated_slots<type>() != nullptr;
+ }
+
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
+ SlotSet* slot_set() {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
+ return slot_set_[type];
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ SlotSet* sweeping_slot_set() {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
+ return sweeping_slot_set_;
+ }
+
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
+ TypedSlotSet* typed_slot_set() {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
+ return typed_slot_set_[type];
+ }
+
+ template <RememberedSetType type>
+ V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
+ SlotSet* AllocateSweepingSlotSet();
+ SlotSet* AllocateSlotSet(SlotSet** slot_set);
+
+ // Not safe to be called concurrently.
+ template <RememberedSetType type>
+ void ReleaseSlotSet();
+ void ReleaseSlotSet(SlotSet** slot_set);
+ void ReleaseSweepingSlotSet();
+ template <RememberedSetType type>
+ TypedSlotSet* AllocateTypedSlotSet();
+ // Not safe to be called concurrently.
+ template <RememberedSetType type>
+ void ReleaseTypedSlotSet();
+
+ template <RememberedSetType type>
+ InvalidatedSlots* AllocateInvalidatedSlots();
+ template <RememberedSetType type>
+ void ReleaseInvalidatedSlots();
+ template <RememberedSetType type>
+ V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
+ void InvalidateRecordedSlots(HeapObject object);
+ template <RememberedSetType type>
+ bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
+ template <RememberedSetType type>
+ InvalidatedSlots* invalidated_slots() {
+ return invalidated_slots_[type];
+ }
+
+ void ReleaseLocalTracker();
+
+ void AllocateYoungGenerationBitmap();
+ void ReleaseYoungGenerationBitmap();
+
+ int FreeListsLength();
+
+ // Approximate amount of physical memory committed for this chunk.
+ V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
+
+ Address HighWaterMark() { return address() + high_water_mark_; }
+
+ size_t ProgressBar() {
+ DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
+ return progress_bar_.load(std::memory_order_acquire);
+ }
+
+ bool TrySetProgressBar(size_t old_value, size_t new_value) {
+ DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
+ return progress_bar_.compare_exchange_strong(old_value, new_value,
+ std::memory_order_acq_rel);
+ }
+
+ void ResetProgressBar() {
+ if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ progress_bar_.store(0, std::memory_order_release);
+ }
+ }
+
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
+ return external_backing_store_bytes_[type];
+ }
+
+ // Some callers rely on the fact that this can operate on both
+ // tagged and aligned object addresses.
+ inline uint32_t AddressToMarkbitIndex(Address addr) const {
+ return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
+ }
+
+ inline Address MarkbitIndexToAddress(uint32_t index) const {
+ return this->address() + (index << kTaggedSizeLog2);
+ }
+
+ bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
+
+ void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsEvacuationCandidate() {
+ DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
+ IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
+ return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool ShouldSkipEvacuationSlotRecording() {
+ uintptr_t flags = GetFlags<access_mode>();
+ return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
+ ((flags & COMPACTION_WAS_ABORTED) == 0);
+ }
+
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
+
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
+ bool InYoungGeneration() const {
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
+ }
+ bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
+ bool InNewLargeObjectSpace() const {
+ return InYoungGeneration() && IsLargePage();
+ }
+ bool InOldSpace() const;
+ V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+
+ // Gets the chunk's owner or null if the space has been detached.
+ Space* owner() const { return owner_; }
+
+ void set_owner(Space* space) { owner_ = space; }
+
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
+
+ // Gets the chunk's allocation space, potentially dealing with a null owner_
+ // (like read-only chunks have).
+ inline AllocationSpace owner_identity() const;
+
+ // Emits a memory barrier. For TSAN builds the other thread needs to perform
+ // MemoryChunk::synchronized_heap() to simulate the barrier.
+ void InitializationMemoryFence();
+
+ V8_EXPORT_PRIVATE void SetReadable();
+ V8_EXPORT_PRIVATE void SetReadAndExecutable();
+ V8_EXPORT_PRIVATE void SetReadAndWritable();
+
+ void SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+ }
+
+ heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
+
+ CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
+
+ PossiblyEmptyBuckets* possibly_empty_buckets() {
+ return &possibly_empty_buckets_;
+ }
+
+ // Release memory allocated by the chunk, except that which is needed by
+ // read-only space chunks.
+ void ReleaseAllocatedMemoryNeededForWritableChunk();
+
+ protected:
+ static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ Executability executable, Space* owner,
+ VirtualMemory reservation);
+
+ // Release all memory allocated by the chunk. Should be called when memory
+ // chunk is about to be freed.
+ void ReleaseAllAllocatedMemory();
+
+ // Sets the requested page permissions only if the write unprotect counter
+ // has reached 0.
+ void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::Permission permission);
+
+ VirtualMemory* reserved_memory() { return &reservation_; }
+
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* marking_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
+ }
+
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* young_generation_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
+ }
+
+ // If the chunk needs to remember its memory reservation, it is stored here.
+ VirtualMemory reservation_;
+
+ // The space owning this memory chunk.
+ std::atomic<Space*> owner_;
+
+ // Used by the incremental marker to keep track of the scanning progress in
+ // large objects that have a progress bar and are scanned in increments.
+ std::atomic<size_t> progress_bar_;
+
+ // Count of bytes marked black on page.
+ intptr_t live_byte_count_;
+
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* sweeping_slot_set_;
+ TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
+
+ // Assuming the initial allocation on a page is sequential,
+ // count highest number of bytes ever allocated on the page.
+ std::atomic<intptr_t> high_water_mark_;
+
+ base::Mutex* mutex_;
+
+ std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
+
+ base::Mutex* page_protection_change_mutex_;
+
+ // This field is only relevant for code pages. It depicts the number of
+ // times a component requested this page to be read+writeable. The
+ // counter is decremented when a component resets to read+executable.
+ // If Value() == 0 => The memory is read and executable.
+ // If Value() >= 1 => The Memory is read and writable (and maybe executable).
+ // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
+ // excessive nesting of scopes.
+ // All executable MemoryChunks are allocated rw based on the assumption that
+ // they will be used immediately for an allocation. They are initialized
+ // with the number of open CodeSpaceMemoryModificationScopes. The caller
+ // that triggers the page allocation is responsible for decrementing the
+ // counter.
+ uintptr_t write_unprotect_counter_;
+
+ // Byte allocated on the page, which includes all objects on the page
+ // and the linear allocation area.
+ size_t allocated_bytes_;
+
+ // Tracks off-heap memory used by this memory chunk.
+ std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
+
+ // Freed memory that was not added to the free list.
+ size_t wasted_memory_;
+
+ heap::ListNode<MemoryChunk> list_node_;
+
+ FreeListCategory** categories_;
+
+ LocalArrayBufferTracker* local_tracker_;
+
+ std::atomic<intptr_t> young_generation_live_byte_count_;
+ Bitmap* young_generation_bitmap_;
+
+ CodeObjectRegistry* code_object_registry_;
+
+ PossiblyEmptyBuckets possibly_empty_buckets_;
+
+ private:
+ void InitializeReservedMemory() { reservation_.Reset(); }
+
+ friend class ConcurrentMarkingState;
+ friend class MajorMarkingState;
+ friend class MajorAtomicMarkingState;
+ friend class MajorNonAtomicMarkingState;
+ friend class MemoryAllocator;
+ friend class MinorMarkingState;
+ friend class MinorNonAtomicMarkingState;
+ friend class PagedSpace;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MEMORY_CHUNK_H_
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 04a7cee33b..bd15b50b96 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -821,8 +821,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
ObjectStats::STRING_SPLIT_CACHE_TYPE);
RecordSimpleVirtualObjectStats(HeapObject(), heap_->regexp_multiple_cache(),
ObjectStats::REGEXP_MULTIPLE_CACHE_TYPE);
- RecordSimpleVirtualObjectStats(HeapObject(), heap_->retained_maps(),
- ObjectStats::RETAINED_MAPS_TYPE);
// WeakArrayList.
RecordSimpleVirtualObjectStats(HeapObject(),
@@ -1075,6 +1073,9 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context.IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context.Size());
+ RecordSimpleVirtualObjectStats(context, context.retained_maps(),
+ ObjectStats::RETAINED_MAPS_TYPE);
+
} else if (context.IsFunctionContext()) {
RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context.Size());
} else {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 03df9a175d..30e72d18b6 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -18,7 +18,6 @@
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
#include "src/wasm/wasm-objects.h"
-#include "torque-generated/objects-body-descriptors-tq-inl.h"
namespace v8 {
namespace internal {
@@ -61,8 +60,6 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
return visitor->VisitStruct(map, object);
case kVisitFreeSpace:
return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
- case kVisitWeakArray:
- return visitor->VisitWeakArray(map, object);
case kDataOnlyVisitorIdCount:
case kVisitorIdCount:
UNREACHABLE();
@@ -169,19 +166,6 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
return static_cast<ResultType>(object.size());
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
- Map map, HeapObject object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = WeakArrayBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object);
- }
- WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
- return size;
-}
-
template <typename ConcreteVisitor>
int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
NativeContext object) {
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index ea70f36ac0..c962e6eaaf 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -9,6 +9,7 @@
#include "src/objects/map.h"
#include "src/objects/objects.h"
#include "src/objects/visitors.h"
+#include "torque-generated/field-offsets-tq.h"
namespace v8 {
namespace internal {
@@ -21,7 +22,6 @@ namespace internal {
V(Cell) \
V(Code) \
V(CodeDataContainer) \
- V(ConsString) \
V(Context) \
V(CoverageInfo) \
V(DataHandler) \
@@ -31,7 +31,6 @@ namespace internal {
V(FeedbackCell) \
V(FeedbackMetadata) \
V(FeedbackVector) \
- V(FixedArray) \
V(FixedDoubleArray) \
V(JSArrayBuffer) \
V(JSDataView) \
@@ -48,23 +47,21 @@ namespace internal {
V(PropertyArray) \
V(PropertyCell) \
V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
V(SharedFunctionInfo) \
- V(SlicedString) \
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
V(SourceTextModule) \
V(Symbol) \
V(SyntheticModule) \
- V(ThinString) \
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
+ V(WasmArray) \
V(WasmCapiFunctionData) \
V(WasmIndirectFunctionTable) \
- V(WasmInstanceObject)
+ V(WasmInstanceObject) \
+ V(WasmStruct)
#define FORWARD_DECLARE(TypeName) class TypeName;
TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
@@ -112,7 +109,6 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE ResultType VisitJSApiObject(Map map, JSObject object);
V8_INLINE ResultType VisitStruct(Map map, HeapObject object);
V8_INLINE ResultType VisitFreeSpace(Map map, FreeSpace object);
- V8_INLINE ResultType VisitWeakArray(Map map, HeapObject object);
template <typename T>
static V8_INLINE T Cast(HeapObject object);
diff --git a/deps/v8/src/heap/off-thread-factory.cc b/deps/v8/src/heap/off-thread-factory.cc
index d8cab5df69..053e9a0ce7 100644
--- a/deps/v8/src/heap/off-thread-factory.cc
+++ b/deps/v8/src/heap/off-thread-factory.cc
@@ -4,192 +4,19 @@
#include "src/heap/off-thread-factory.h"
-#include "src/ast/ast-value-factory.h"
-#include "src/ast/ast.h"
-#include "src/base/logging.h"
#include "src/common/globals.h"
-#include "src/execution/isolate.h"
#include "src/handles/handles.h"
-#include "src/heap/spaces-inl.h"
-#include "src/heap/spaces.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
-#include "src/objects/map-inl.h"
-#include "src/objects/objects-body-descriptors-inl.h"
-#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/objects/visitors.h"
#include "src/roots/roots-inl.h"
-#include "src/roots/roots.h"
-#include "src/tracing/trace-event.h"
+#include "src/strings/string-hasher.h"
namespace v8 {
namespace internal {
-OffThreadFactory::OffThreadFactory(Isolate* isolate)
- : roots_(isolate), space_(isolate->heap()), lo_space_(isolate->heap()) {}
-
-namespace {
-
-class StringSlotCollectingVisitor : public ObjectVisitor {
- public:
- explicit StringSlotCollectingVisitor(ReadOnlyRoots roots) : roots_(roots) {}
-
- void VisitPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) override {
- for (ObjectSlot slot = start; slot != end; ++slot) {
- Object obj = *slot;
- if (obj.IsInternalizedString() &&
- !ReadOnlyHeap::Contains(HeapObject::cast(obj))) {
- string_slots.emplace_back(host.ptr(), slot.address() - host.ptr());
- }
- }
- }
- void VisitPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- for (MaybeObjectSlot slot = start; slot != end; ++slot) {
- MaybeObject maybe_obj = *slot;
- HeapObject obj;
- if (maybe_obj.GetHeapObjectIfStrong(&obj)) {
- if (obj.IsInternalizedString() && !ReadOnlyHeap::Contains(obj)) {
- string_slots.emplace_back(host.ptr(), slot.address() - host.ptr());
- }
- }
- }
- }
-
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- UNREACHABLE();
- }
-
- std::vector<RelativeSlot> string_slots;
-
- private:
- ReadOnlyRoots roots_;
-};
-
-} // namespace
-
-void OffThreadFactory::FinishOffThread() {
- DCHECK(!is_finished);
-
- StringSlotCollectingVisitor string_slot_collector(read_only_roots());
-
- // First iterate all objects in the spaces to find string slots. At this point
- // all string slots have to point to off-thread strings or read-only strings.
- {
- PagedSpaceObjectIterator it(&space_);
- for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- obj.IterateBodyFast(&string_slot_collector);
- }
- }
- {
- LargeObjectSpaceObjectIterator it(&lo_space_);
- for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- obj.IterateBodyFast(&string_slot_collector);
- }
- }
-
- string_slots_ = std::move(string_slot_collector.string_slots);
-
- is_finished = true;
-}
-
-void OffThreadFactory::Publish(Isolate* isolate) {
- DCHECK(is_finished);
-
- HandleScope handle_scope(isolate);
-
- // First, handlify all the string slot holder objects, so that we can keep
- // track of them if they move.
- //
- // TODO(leszeks): We might be able to create a HandleScope-compatible
- // structure off-thread and merge it into the current handle scope all in one
- // go (DeferredHandles maybe?).
- std::vector<Handle<HeapObject>> heap_object_handles;
- std::vector<Handle<Script>> script_handles;
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish.CollectHandles");
- heap_object_handles.reserve(string_slots_.size());
- for (RelativeSlot relative_slot : string_slots_) {
- // TODO(leszeks): Group slots in the same parent object to avoid creating
- // multiple duplicate handles.
- heap_object_handles.push_back(handle(
- HeapObject::cast(Object(relative_slot.object_address)), isolate));
-
- // De-internalize the string so that we can re-internalize it later.
- ObjectSlot slot(relative_slot.object_address + relative_slot.slot_offset);
- String string = String::cast(slot.Acquire_Load());
- bool one_byte = string.IsOneByteRepresentation();
- Map map = one_byte ? read_only_roots().one_byte_string_map()
- : read_only_roots().string_map();
- string.set_map_no_write_barrier(map);
- }
-
- script_handles.reserve(script_list_.size());
- for (Script script : script_list_) {
- script_handles.push_back(handle(script, isolate));
- }
- }
-
- // Then merge the spaces. At this point, we are allowed to point between (no
- // longer) off-thread pages and main-thread heap pages, and objects in the
- // previously off-thread page can move.
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish.Merge");
- isolate->heap()->old_space()->MergeLocalSpace(&space_);
- isolate->heap()->lo_space()->MergeOffThreadSpace(&lo_space_);
- }
-
- // Iterate the string slots, as an offset from the holders we have handles to.
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish.UpdateHandles");
- for (size_t i = 0; i < string_slots_.size(); ++i) {
- int slot_offset = string_slots_[i].slot_offset;
-
- // There's currently no cases where the holder object could have been
- // resized.
- DCHECK_LT(slot_offset, heap_object_handles[i]->Size());
-
- ObjectSlot slot(heap_object_handles[i]->ptr() + slot_offset);
-
- String string = String::cast(slot.Acquire_Load());
- if (string.IsThinString()) {
- // We may have already internalized this string via another slot.
- slot.Release_Store(ThinString::cast(string).GetUnderlying());
- } else {
- HandleScope handle_scope(isolate);
-
- Handle<String> string_handle = handle(string, isolate);
- Handle<String> internalized_string =
- isolate->factory()->InternalizeString(string_handle);
-
- // Recalculate the slot in case there was GC and the holder moved.
- ObjectSlot slot(heap_object_handles[i]->ptr() + slot_offset);
-
- DCHECK(string_handle->IsThinString() ||
- string_handle->IsInternalizedString());
- if (*string_handle != *internalized_string) {
- slot.Release_Store(*internalized_string);
- }
- }
- }
-
- // Merge the recorded scripts into the isolate's script list.
- // This for loop may seem expensive, but practically there's unlikely to be
- // more than one script in the OffThreadFactory.
- Handle<WeakArrayList> scripts = isolate->factory()->script_list();
- for (Handle<Script> script_handle : script_handles) {
- scripts = WeakArrayList::Append(isolate, scripts,
- MaybeObjectHandle::Weak(script_handle));
- }
- isolate->heap()->SetRootScriptList(*scripts);
- }
-}
+OffThreadFactory::OffThreadFactory(Isolate* isolate) : roots_(isolate) {}
// Hacky method for creating a simple object with a slot pointing to a string.
// TODO(leszeks): Remove once we have full FixedArray support.
@@ -237,21 +64,12 @@ Handle<String> OffThreadFactory::InternalizeString(
}
void OffThreadFactory::AddToScriptList(Handle<Script> shared) {
- script_list_.push_back(*shared);
+ isolate()->heap()->AddToScriptList(shared);
}
HeapObject OffThreadFactory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
- DCHECK(!is_finished);
-
- DCHECK_EQ(allocation, AllocationType::kOld);
- AllocationResult result;
- if (size > kMaxRegularHeapObjectSize) {
- result = lo_space_.AllocateRaw(size);
- } else {
- result = space_.AllocateRaw(size, alignment);
- }
- return result.ToObjectChecked();
+ return isolate()->heap()->AllocateRaw(size, allocation, alignment);
}
} // namespace internal
diff --git a/deps/v8/src/heap/off-thread-factory.h b/deps/v8/src/heap/off-thread-factory.h
index f297bd30c6..738511f772 100644
--- a/deps/v8/src/heap/off-thread-factory.h
+++ b/deps/v8/src/heap/off-thread-factory.h
@@ -28,15 +28,6 @@ class AstRawString;
class AstConsString;
class OffThreadIsolate;
-struct RelativeSlot {
- RelativeSlot() = default;
- RelativeSlot(Address object_address, int slot_offset)
- : object_address(object_address), slot_offset(slot_offset) {}
-
- Address object_address;
- int slot_offset;
-};
-
class V8_EXPORT_PRIVATE OffThreadFactory
: public FactoryBase<OffThreadFactory> {
public:
@@ -56,9 +47,6 @@ class V8_EXPORT_PRIVATE OffThreadFactory
Handle<String> InternalizeString(const Vector<const uint8_t>& string);
Handle<String> InternalizeString(const Vector<const uint16_t>& string);
- void FinishOffThread();
- void Publish(Isolate* isolate);
-
// The parser shouldn't allow the OffThreadFactory to get into a state where
// it generates errors.
Handle<Object> NewInvalidStringLengthError() { UNREACHABLE(); }
@@ -93,11 +81,6 @@ class V8_EXPORT_PRIVATE OffThreadFactory
// ------
ReadOnlyRoots roots_;
- OffThreadSpace space_;
- OffThreadLargeObjectSpace lo_space_;
- std::vector<RelativeSlot> string_slots_;
- std::vector<Script> script_list_;
- bool is_finished = false;
};
} // namespace internal
diff --git a/deps/v8/src/heap/off-thread-heap.cc b/deps/v8/src/heap/off-thread-heap.cc
new file mode 100644
index 0000000000..fec93f8068
--- /dev/null
+++ b/deps/v8/src/heap/off-thread-heap.cc
@@ -0,0 +1,241 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/off-thread-heap.h"
+
+#include "src/heap/spaces-inl.h"
+#include "src/heap/spaces.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/roots/roots.h"
+
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+OffThreadHeap::OffThreadHeap(Heap* heap) : space_(heap), lo_space_(heap) {}
+
+class OffThreadHeap::StringSlotCollectingVisitor : public ObjectVisitor {
+ public:
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot slot = start; slot != end; ++slot) {
+ Object obj = *slot;
+ if (obj.IsInternalizedString() &&
+ !ReadOnlyHeap::Contains(HeapObject::cast(obj))) {
+ string_slots.emplace_back(host.address(),
+ slot.address() - host.address());
+ }
+ }
+ }
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ for (MaybeObjectSlot slot = start; slot != end; ++slot) {
+ MaybeObject maybe_obj = *slot;
+ HeapObject obj;
+ if (maybe_obj.GetHeapObjectIfStrong(&obj)) {
+ if (obj.IsInternalizedString() && !ReadOnlyHeap::Contains(obj)) {
+ string_slots.emplace_back(host.address(),
+ slot.address() - host.address());
+ }
+ }
+ }
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ UNREACHABLE();
+ }
+
+ std::vector<RelativeSlot> string_slots;
+};
+
+void OffThreadHeap::FinishOffThread() {
+ DCHECK(!is_finished);
+
+ StringSlotCollectingVisitor string_slot_collector;
+
+ // First iterate all objects in the spaces to find string slots. At this point
+ // all string slots have to point to off-thread strings or read-only strings.
+ {
+ PagedSpaceObjectIterator it(&space_);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ obj.IterateBodyFast(&string_slot_collector);
+ }
+ }
+ {
+ LargeObjectSpaceObjectIterator it(&lo_space_);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
+ obj.IterateBodyFast(&string_slot_collector);
+ }
+ }
+
+ string_slots_ = std::move(string_slot_collector.string_slots);
+
+ is_finished = true;
+}
+
+void OffThreadHeap::Publish(Heap* heap) {
+ DCHECK(is_finished);
+ Isolate* isolate = heap->isolate();
+ ReadOnlyRoots roots(isolate);
+
+ HandleScope handle_scope(isolate);
+
+ // First, handlify all the string slot holder objects, so that we can keep
+ // track of them if they move.
+ //
+ // TODO(leszeks): We might be able to create a HandleScope-compatible
+ // structure off-thread and merge it into the current handle scope all in one
+ // go (DeferredHandles maybe?).
+ std::vector<Handle<HeapObject>> heap_object_handles;
+ std::vector<Handle<Script>> script_handles;
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.CollectHandles");
+ heap_object_handles.reserve(string_slots_.size());
+ for (RelativeSlot relative_slot : string_slots_) {
+ // TODO(leszeks): Group slots in the same parent object to avoid creating
+ // multiple duplicate handles.
+ HeapObject obj = HeapObject::FromAddress(relative_slot.object_address);
+ heap_object_handles.push_back(handle(obj, isolate));
+
+ // De-internalize the string so that we can re-internalize it later.
+ String string =
+ String::cast(RELAXED_READ_FIELD(obj, relative_slot.slot_offset));
+ bool one_byte = string.IsOneByteRepresentation();
+ Map map = one_byte ? roots.one_byte_string_map() : roots.string_map();
+ string.set_map_no_write_barrier(map);
+ }
+
+ script_handles.reserve(script_list_.size());
+ for (Script script : script_list_) {
+ script_handles.push_back(handle(script, isolate));
+ }
+ }
+
+ // Then merge the spaces. At this point, we are allowed to point between (no
+ // longer) off-thread pages and main-thread heap pages, and objects in the
+ // previously off-thread page can move.
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.Merge");
+ Heap* heap = isolate->heap();
+
+ // Ensure that the old-space can expand do the size needed for the
+ // off-thread objects. Use capacity rather than size since we're adding
+ // entire pages.
+ size_t off_thread_size = space_.Capacity() + lo_space_.Size();
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->InvokeNearHeapLimitCallback();
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->FatalProcessOutOfMemory(
+ "Can't expand old-space enough to merge off-thread pages.");
+ }
+ }
+ }
+
+ heap->old_space()->MergeLocalSpace(&space_);
+ heap->lo_space()->MergeOffThreadSpace(&lo_space_);
+
+ DCHECK(heap->CanExpandOldGeneration(0));
+ heap->NotifyOldGenerationExpansion();
+
+ // Possibly trigger a GC if we're close to exhausting the old generation.
+ // TODO(leszeks): Adjust the heuristics here.
+ heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+
+ if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
+ !heap->CanExpandOldGeneration(1 * MB)) {
+ heap->CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kAllocationFailure);
+ }
+ }
+
+ // Iterate the string slots, as an offset from the holders we have handles to.
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.UpdateHandles");
+ for (size_t i = 0; i < string_slots_.size(); ++i) {
+ HeapObject obj = *heap_object_handles[i];
+ int slot_offset = string_slots_[i].slot_offset;
+
+ // There's currently no cases where the holder object could have been
+ // resized.
+ DCHECK_LT(slot_offset, obj.Size());
+
+ String string = String::cast(RELAXED_READ_FIELD(obj, slot_offset));
+ if (string.IsThinString()) {
+ // We may have already internalized this string via another slot.
+ String value = ThinString::cast(string).GetUnderlying();
+ RELAXED_WRITE_FIELD(obj, slot_offset, value);
+ WRITE_BARRIER(obj, slot_offset, value);
+ } else {
+ HandleScope handle_scope(isolate);
+
+ Handle<String> string_handle = handle(string, isolate);
+ Handle<String> internalized_string =
+ isolate->factory()->InternalizeString(string_handle);
+
+ DCHECK(string_handle->IsThinString() ||
+ string_handle->IsInternalizedString());
+ if (*string_handle != *internalized_string) {
+ // Re-read the object from the handle in case there was GC during
+ // internalization and it moved.
+ HeapObject obj = *heap_object_handles[i];
+ String value = *internalized_string;
+ RELAXED_WRITE_FIELD(obj, slot_offset, value);
+ WRITE_BARRIER(obj, slot_offset, value);
+ }
+ }
+ }
+
+ // Merge the recorded scripts into the isolate's script list.
+ // This for loop may seem expensive, but practically there's unlikely to be
+ // more than one script in the OffThreadFactory.
+ Handle<WeakArrayList> scripts = isolate->factory()->script_list();
+ for (Handle<Script> script_handle : script_handles) {
+ scripts = WeakArrayList::Append(isolate, scripts,
+ MaybeObjectHandle::Weak(script_handle));
+ }
+ heap->SetRootScriptList(*scripts);
+ }
+}
+
+void OffThreadHeap::AddToScriptList(Handle<Script> shared) {
+ script_list_.push_back(*shared);
+}
+
+HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation,
+ AllocationAlignment alignment) {
+ DCHECK(!is_finished);
+
+ DCHECK_EQ(allocation, AllocationType::kOld);
+ AllocationResult result;
+ if (size > kMaxRegularHeapObjectSize) {
+ result = lo_space_.AllocateRaw(size);
+ } else {
+ result = space_.AllocateRaw(size, alignment);
+ }
+ return result.ToObjectChecked();
+}
+
+HeapObject OffThreadHeap::CreateFillerObjectAt(
+ Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
+ ReadOnlyRoots roots(this);
+ HeapObject filler =
+ Heap::CreateFillerObjectAt(roots, addr, size, clear_memory_mode);
+ return filler;
+}
+
+} // namespace internal
+} // namespace v8
+
+// Undefine the heap manipulation macros.
+#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/heap/off-thread-heap.h b/deps/v8/src/heap/off-thread-heap.h
new file mode 100644
index 0000000000..de902be52f
--- /dev/null
+++ b/deps/v8/src/heap/off-thread-heap.h
@@ -0,0 +1,52 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_OFF_THREAD_HEAP_H_
+#define V8_HEAP_OFF_THREAD_HEAP_H_
+
+#include <vector>
+#include "src/common/globals.h"
+#include "src/heap/large-spaces.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE OffThreadHeap {
+ public:
+ explicit OffThreadHeap(Heap* heap);
+
+ HeapObject AllocateRaw(int size, AllocationType allocation,
+ AllocationAlignment alignment = kWordAligned);
+ void AddToScriptList(Handle<Script> shared);
+
+ HeapObject CreateFillerObjectAt(Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode);
+
+ void FinishOffThread();
+ void Publish(Heap* heap);
+
+ private:
+ class StringSlotCollectingVisitor;
+
+ struct RelativeSlot {
+ RelativeSlot() = default;
+ RelativeSlot(Address object_address, int slot_offset)
+ : object_address(object_address), slot_offset(slot_offset) {}
+
+ Address object_address;
+ int slot_offset;
+ };
+
+ OffThreadSpace space_;
+ OffThreadLargeObjectSpace lo_space_;
+ std::vector<RelativeSlot> string_slots_;
+ std::vector<Script> script_list_;
+ bool is_finished = false;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_OFF_THREAD_HEAP_H_
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 5b0e29bf12..e2387984cc 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -6,11 +6,14 @@
#include <cstring>
+#include "include/v8.h"
+#include "src/base/lazy-instance.h"
#include "src/base/lsan.h"
-#include "src/base/once.h"
-#include "src/heap/heap-inl.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/heap/spaces.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/heap/third-party/heap-api.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
@@ -20,7 +23,28 @@ namespace v8 {
namespace internal {
#ifdef V8_SHARED_RO_HEAP
-V8_DECLARE_ONCE(setup_ro_heap_once);
+namespace {
+// Mutex used to ensure that ReadOnlyArtifacts creation is only done once.
+base::LazyMutex read_only_heap_creation_mutex_ = LAZY_MUTEX_INITIALIZER;
+
+// Weak pointer holding ReadOnlyArtifacts. ReadOnlyHeap::SetUp creates a
+// std::shared_ptr from this when it attempts to reuse it. Since all Isolates
+// hold a std::shared_ptr to this, the object is destroyed when no Isolates
+// remain.
+base::LazyInstance<std::weak_ptr<ReadOnlyArtifacts>>::type
+ read_only_artifacts_ = LAZY_INSTANCE_INITIALIZER;
+
+std::shared_ptr<ReadOnlyArtifacts> InitializeSharedReadOnlyArtifacts() {
+ auto artifacts = std::make_shared<ReadOnlyArtifacts>();
+ *read_only_artifacts_.Pointer() = artifacts;
+ return artifacts;
+}
+} // namespace
+
+// This ReadOnlyHeap instance will only be accessed by Isolates that are already
+// set up. As such it doesn't need to be guarded by a mutex or shared_ptrs,
+// since an already set up Isolate will hold a shared_ptr to
+// read_only_artifacts_.
ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
#endif
@@ -28,45 +52,54 @@ ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
- bool call_once_ran = false;
- base::Optional<uint32_t> des_checksum;
-#ifdef DEBUG
- if (des != nullptr) des_checksum = des->GetChecksum();
-#endif // DEBUG
- base::CallOnce(&setup_ro_heap_once,
- [isolate, des, des_checksum, &call_once_ran]() {
- USE(des_checksum);
- shared_ro_heap_ = CreateAndAttachToIsolate(isolate);
- if (des != nullptr) {
+ bool read_only_heap_created = false;
+
+ if (des != nullptr) {
+ base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer());
+ std::shared_ptr<ReadOnlyArtifacts> artifacts =
+ read_only_artifacts_.Get().lock();
+ if (!artifacts) {
+ artifacts = InitializeSharedReadOnlyArtifacts();
+ shared_ro_heap_ = CreateAndAttachToIsolate(isolate, artifacts);
#ifdef DEBUG
- shared_ro_heap_->read_only_blob_checksum_ = des_checksum;
+ shared_ro_heap_->read_only_blob_checksum_ = des->GetChecksum();
#endif // DEBUG
- shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
- }
- call_once_ran = true;
- });
+ shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
+ read_only_heap_created = true;
+ } else {
+ isolate->SetUpFromReadOnlyArtifacts(artifacts);
+ }
+ } else {
+ // This path should only be taken in mksnapshot, should only be run once
+ // before tearing down the Isolate that holds this ReadOnlyArtifacts and is
+ // not thread-safe.
+ std::shared_ptr<ReadOnlyArtifacts> artifacts =
+ read_only_artifacts_.Get().lock();
+ CHECK(!artifacts);
+ artifacts = InitializeSharedReadOnlyArtifacts();
+ shared_ro_heap_ = CreateAndAttachToIsolate(isolate, artifacts);
+ read_only_heap_created = true;
+ }
- USE(call_once_ran);
- USE(des_checksum);
#ifdef DEBUG
const base::Optional<uint32_t> last_checksum =
shared_ro_heap_->read_only_blob_checksum_;
if (last_checksum) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
- CHECK_WITH_MSG(des_checksum,
- "Attempt to create the read-only heap after "
- "already creating from a snapshot.");
- CHECK_EQ(last_checksum, des_checksum);
+ CHECK_WITH_MSG(des->GetChecksum(),
+ "Attempt to create the read-only heap after already "
+ "creating from a snapshot.");
+ CHECK_EQ(last_checksum, des->GetChecksum());
} else {
// The read-only heap objects were created. Make sure this happens only
// once, during this call.
- CHECK(call_once_ran);
+ CHECK(read_only_heap_created);
}
#endif // DEBUG
+ USE(read_only_heap_created);
- isolate->SetUpFromReadOnlyHeap(shared_ro_heap_);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
@@ -74,7 +107,8 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
kEntriesCount * sizeof(Address));
}
#else
- auto* ro_heap = CreateAndAttachToIsolate(isolate);
+ auto artifacts = std::make_shared<ReadOnlyArtifacts>();
+ auto* ro_heap = CreateAndAttachToIsolate(isolate, artifacts);
if (des != nullptr) ro_heap->DeseralizeIntoIsolate(isolate, des);
#endif // V8_SHARED_RO_HEAP
}
@@ -92,24 +126,30 @@ void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
}
// static
-ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
- auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
- isolate->SetUpFromReadOnlyHeap(ro_heap);
- return ro_heap;
+ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
+ Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts) {
+ std::unique_ptr<ReadOnlyHeap> ro_heap(
+ new ReadOnlyHeap(new ReadOnlySpace(isolate->heap())));
+ artifacts->set_read_only_heap(std::move(ro_heap));
+ isolate->SetUpFromReadOnlyArtifacts(artifacts);
+ return artifacts->read_only_heap();
}
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
read_only_space_->ShrinkImmortalImmovablePages();
#ifdef V8_SHARED_RO_HEAP
+ std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
+ read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
+ read_only_space_ = artifacts->shared_read_only_space();
+
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
- // N.B. Since pages are manually allocated with mmap, Lsan doesn't track
- // their pointers. Seal explicitly ignores the necessary objects.
+ // N.B. Since pages are manually allocated with mmap, Lsan doesn't track their
+ // pointers. Seal explicitly ignores the necessary objects.
LSAN_IGNORE_OBJECT(this);
- read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
#endif
@@ -119,24 +159,26 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
void ReadOnlyHeap::OnHeapTearDown() {
#ifndef V8_SHARED_RO_HEAP
delete read_only_space_;
- delete this;
#endif
}
-#ifdef V8_SHARED_RO_HEAP
// static
-const ReadOnlyHeap* ReadOnlyHeap::Instance() { return shared_ro_heap_; }
-#endif
-
-// static
-void ReadOnlyHeap::ClearSharedHeapForTest() {
+void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
+ SharedMemoryStatistics* statistics) {
+ statistics->read_only_space_size_ = 0;
+ statistics->read_only_space_used_size_ = 0;
+ statistics->read_only_space_physical_size_ = 0;
#ifdef V8_SHARED_RO_HEAP
- DCHECK_NOT_NULL(shared_ro_heap_);
- // TODO(v8:7464): Just leak read-only space for now. The paged-space heap
- // is null so there isn't a nice way to do this.
- shared_ro_heap_ = nullptr;
- setup_ro_heap_once = 0;
-#endif
+ std::shared_ptr<ReadOnlyArtifacts> artifacts =
+ read_only_artifacts_.Get().lock();
+ if (artifacts) {
+ auto ro_space = artifacts->shared_read_only_space();
+ statistics->read_only_space_size_ = ro_space->CommittedMemory();
+ statistics->read_only_space_used_size_ = ro_space->SizeOfObjects();
+ statistics->read_only_space_physical_size_ =
+ ro_space->CommittedPhysicalMemory();
+ }
+#endif // V8_SHARED_RO_HEAP
}
// static
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index 27d34b4c03..ed10521129 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_READ_ONLY_HEAP_H_
#define V8_HEAP_READ_ONLY_HEAP_H_
+#include <memory>
#include <utility>
#include "src/base/macros.h"
@@ -14,10 +15,14 @@
#include "src/roots/roots.h"
namespace v8 {
+
+class SharedMemoryStatistics;
+
namespace internal {
class Isolate;
class Page;
+class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
class ReadOnlySpace;
@@ -28,23 +33,27 @@ class ReadOnlyHeap final {
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
- // If necessary creates read-only heap and initializes its artifacts (if
- // the deserializer is provided). Then attaches the read-only heap to the
- // isolate.
+ // If necessary creates read-only heap and initializes its artifacts (if the
+ // deserializer is provided). Then attaches the read-only heap to the isolate.
+ // If the deserializer is not provided, then the read-only heap will be only
+ // finish initializing when initial heap object creation in the Isolate is
+ // completed, which is signalled by calling OnCreateHeapObjectsComplete. When
+ // V8_SHARED_RO_HEAP is enabled, a lock will be held until that method is
+ // called.
// TODO(v8:7464): Ideally we'd create this without needing a heap.
static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
// Indicates that the isolate has been set up and all read-only space objects
- // have been created and will not be written to. This is not thread safe, and
- // should really only be used during snapshot creation or when read-only heap
- // sharing is disabled.
+ // have been created and will not be written to. This should only be called if
+ // a deserializer was not previously provided to Setup. When V8_SHARED_RO_HEAP
+ // is enabled, this releases the ReadOnlyHeap creation lock.
void OnCreateHeapObjectsComplete(Isolate* isolate);
// Indicates that the current isolate no longer requires the read-only heap
// and it may be safely disposed of.
void OnHeapTearDown();
-
-#ifdef V8_SHARED_RO_HEAP
- static const ReadOnlyHeap* Instance();
-#endif
+ // If the read-only heap is shared, then populate |statistics| with its stats,
+ // otherwise the read-only heap stats are set to 0.
+ static void PopulateReadOnlySpaceStatistics(
+ SharedMemoryStatistics* statistics);
// Returns whether the address is within the read-only space.
V8_EXPORT_PRIVATE static bool Contains(Address address);
@@ -56,10 +65,6 @@ class ReadOnlyHeap final {
V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots(
HeapObject object);
- // Clears any shared read-only heap artifacts for testing, forcing read-only
- // heap to be re-created on next set up.
- V8_EXPORT_PRIVATE static void ClearSharedHeapForTest();
-
// Extends the read-only object cache with new zero smi and returns a
// reference to it.
Object* ExtendReadOnlyObjectCache();
@@ -71,14 +76,15 @@ class ReadOnlyHeap final {
private:
// Creates a new read-only heap and attaches it to the provided isolate.
- static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
- // Runs the read-only deserailizer and calls InitFromIsolate to complete
+ static ReadOnlyHeap* CreateAndAttachToIsolate(
+ Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts);
+ // Runs the read-only deserializer and calls InitFromIsolate to complete
// read-only heap initialization.
void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
// Initializes read-only heap from an already set-up isolate, copying
// read-only roots from the isolate. This then seals the space off from
- // further writes, marks it as read-only and detaches it from the heap (unless
- // sharing is disabled).
+ // further writes, marks it as read-only and detaches it from the heap
+ // (unless sharing is disabled).
void InitFromIsolate(Isolate* isolate);
bool init_complete_ = false;
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
new file mode 100644
index 0000000000..a2e7295258
--- /dev/null
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -0,0 +1,172 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/read-only-spaces.h"
+
+#include "src/base/lsan.h"
+#include "src/execution/isolate.h"
+#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/read-only-heap.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/string.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ReadOnlySpace implementation
+
+ReadOnlySpace::ReadOnlySpace(Heap* heap)
+ : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
+ is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
+}
+
+ReadOnlyArtifacts::~ReadOnlyArtifacts() {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
+ MemoryChunk* next_chunk;
+ for (MemoryChunk* chunk = pages_.front(); chunk != nullptr;
+ chunk = next_chunk) {
+ void* chunk_address = reinterpret_cast<void*>(chunk->address());
+ page_allocator->SetPermissions(chunk_address, chunk->size(),
+ PageAllocator::kReadWrite);
+ next_chunk = chunk->list_node().next();
+ size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
+ CHECK(page_allocator->FreePages(chunk_address, size));
+ }
+}
+
+void ReadOnlyArtifacts::set_read_only_heap(
+ std::unique_ptr<ReadOnlyHeap> read_only_heap) {
+ read_only_heap_ = std::move(read_only_heap);
+}
+
+SharedReadOnlySpace::~SharedReadOnlySpace() {
+ // Clear the memory chunk list before the space is deleted, so that the
+ // inherited destructors don't try to destroy the MemoryChunks themselves.
+ memory_chunk_list_ = heap::List<MemoryChunk>();
+}
+
+SharedReadOnlySpace::SharedReadOnlySpace(
+ Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts)
+ : ReadOnlySpace(heap) {
+ artifacts->pages().ShallowCopyTo(&memory_chunk_list_);
+ is_marked_read_only_ = true;
+ accounting_stats_ = artifacts->accounting_stats();
+}
+
+void ReadOnlySpace::DetachPagesAndAddToArtifacts(
+ std::shared_ptr<ReadOnlyArtifacts> artifacts) {
+ Heap* heap = ReadOnlySpace::heap();
+ Seal(SealMode::kDetachFromHeapAndForget);
+ artifacts->set_accounting_stats(accounting_stats_);
+ artifacts->TransferPages(std::move(memory_chunk_list_));
+ artifacts->set_shared_read_only_space(
+ std::make_unique<SharedReadOnlySpace>(heap, artifacts));
+ heap->ReplaceReadOnlySpace(artifacts->shared_read_only_space());
+}
+
+void ReadOnlyPage::MakeHeaderRelocatable() {
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ // Detached read-only space needs to have a valid marking bitmap. Instruct
+ // Lsan to ignore it if required.
+ LSAN_IGNORE_OBJECT(marking_bitmap_);
+ heap_ = nullptr;
+ owner_ = nullptr;
+}
+
+void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
+ PageAllocator::Permission access) {
+ for (Page* p : *this) {
+ // Read only pages don't have valid reservation object so we get proper
+ // page allocator manually.
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(p->executable());
+ CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
+ }
+}
+
+// After we have booted, we have created a map which represents free space
+// on the heap. If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
+// fix them.
+void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
+ free_list_->RepairLists(heap());
+ // Each page may have a small free space that is not tracked by a free list.
+ // Those free spaces still contain null as their map pointer.
+ // Overwrite them with new fillers.
+ for (Page* page : *this) {
+ int size = static_cast<int>(page->wasted_memory());
+ if (size == 0) {
+ // If there is no wasted memory then all free space is in the free list.
+ continue;
+ }
+ Address start = page->HighWaterMark();
+ Address end = page->area_end();
+ if (start < end - size) {
+ // A region at the high watermark is already in free list.
+ HeapObject filler = HeapObject::FromAddress(start);
+ CHECK(filler.IsFreeSpaceOrFiller());
+ start += filler.Size();
+ }
+ CHECK_EQ(size, static_cast<int>(end - start));
+ heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
+ }
+}
+
+void ReadOnlySpace::ClearStringPaddingIfNeeded() {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ // TODO(ulan): Revisit this once third-party heap supports iteration.
+ return;
+ }
+ if (is_string_padding_cleared_) return;
+
+ ReadOnlyHeapObjectIterator iterator(this);
+ for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
+ if (o.IsSeqOneByteString()) {
+ SeqOneByteString::cast(o).clear_padding();
+ } else if (o.IsSeqTwoByteString()) {
+ SeqTwoByteString::cast(o).clear_padding();
+ }
+ }
+ is_string_padding_cleared_ = true;
+}
+
+void ReadOnlySpace::Seal(SealMode ro_mode) {
+ DCHECK(!is_marked_read_only_);
+
+ FreeLinearAllocationArea();
+ is_marked_read_only_ = true;
+ auto* memory_allocator = heap()->memory_allocator();
+
+ if (ro_mode == SealMode::kDetachFromHeapAndForget) {
+ DetachFromHeap();
+ for (Page* p : *this) {
+ memory_allocator->UnregisterMemory(p);
+ static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
+ }
+ } else {
+ for (Page* p : *this) {
+ p->ReleaseAllocatedMemoryNeededForWritableChunk();
+ }
+ }
+
+ free_list_.reset();
+
+ SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
+}
+
+void ReadOnlySpace::Unseal() {
+ DCHECK(is_marked_read_only_);
+ if (HasPages()) {
+ SetPermissionsForPages(heap()->memory_allocator(),
+ PageAllocator::kReadWrite);
+ }
+ is_marked_read_only_ = false;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/read-only-spaces.h b/deps/v8/src/heap/read-only-spaces.h
new file mode 100644
index 0000000000..dd82182b7f
--- /dev/null
+++ b/deps/v8/src/heap/read-only-spaces.h
@@ -0,0 +1,125 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_READ_ONLY_SPACES_H_
+#define V8_HEAP_READ_ONLY_SPACES_H_
+
+#include <memory>
+#include <utility>
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+#include "src/heap/list.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+class ReadOnlyHeap;
+
+class ReadOnlyPage : public Page {
+ public:
+ // Clears any pointers in the header that point out of the page that would
+ // otherwise make the header non-relocatable.
+ void MakeHeaderRelocatable();
+
+ private:
+ friend class ReadOnlySpace;
+};
+
+// -----------------------------------------------------------------------------
+// Artifacts used to construct a new SharedReadOnlySpace
+class ReadOnlyArtifacts {
+ public:
+ ~ReadOnlyArtifacts();
+
+ void set_accounting_stats(const AllocationStats& stats) { stats_ = stats; }
+
+ void set_shared_read_only_space(
+ std::unique_ptr<SharedReadOnlySpace> shared_space) {
+ shared_read_only_space_ = std::move(shared_space);
+ }
+ SharedReadOnlySpace* shared_read_only_space() {
+ return shared_read_only_space_.get();
+ }
+
+ heap::List<MemoryChunk>& pages() { return pages_; }
+ void TransferPages(heap::List<MemoryChunk>&& pages) {
+ pages_ = std::move(pages);
+ }
+
+ const AllocationStats& accounting_stats() const { return stats_; }
+
+ void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
+ ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
+
+ private:
+ heap::List<MemoryChunk> pages_;
+ AllocationStats stats_;
+ std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
+ std::unique_ptr<ReadOnlyHeap> read_only_heap_;
+};
+
+// -----------------------------------------------------------------------------
+// Read Only space for all Immortal Immovable and Immutable objects
+class ReadOnlySpace : public PagedSpace {
+ public:
+ explicit ReadOnlySpace(Heap* heap);
+
+ // Detach the pages and them to artifacts for using in creating a
+ // SharedReadOnlySpace.
+ void DetachPagesAndAddToArtifacts(
+ std::shared_ptr<ReadOnlyArtifacts> artifacts);
+
+ ~ReadOnlySpace() override { Unseal(); }
+
+ bool writable() const { return !is_marked_read_only_; }
+
+ bool Contains(Address a) = delete;
+ bool Contains(Object o) = delete;
+
+ V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
+
+ enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
+
+ // Seal the space by marking it read-only, optionally detaching it
+ // from the heap and forgetting it for memory bookkeeping purposes (e.g.
+ // prevent space's memory from registering as leaked).
+ void Seal(SealMode ro_mode);
+
+ // During boot the free_space_map is created, and afterwards we may need
+ // to write it into the free list nodes that were already created.
+ void RepairFreeListsAfterDeserialization();
+
+ size_t Available() override { return 0; }
+
+ protected:
+ void SetPermissionsForPages(MemoryAllocator* memory_allocator,
+ PageAllocator::Permission access);
+
+ bool is_marked_read_only_ = false;
+
+ private:
+ // Unseal the space after is has been sealed, by making it writable.
+ // TODO(v8:7464): Only possible if the space hasn't been detached.
+ void Unseal();
+
+ //
+ // String padding must be cleared just before serialization and therefore the
+ // string padding in the space will already have been cleared if the space was
+ // deserialized.
+ bool is_string_padding_cleared_;
+};
+
+class SharedReadOnlySpace : public ReadOnlySpace {
+ public:
+ SharedReadOnlySpace(Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts);
+ ~SharedReadOnlySpace() override;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_READ_ONLY_SPACES_H_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set-inl.h
index b5e92d6ec6..034e98a06f 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set-inl.h
@@ -2,14 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_REMEMBERED_SET_H_
-#define V8_HEAP_REMEMBERED_SET_H_
+#ifndef V8_HEAP_REMEMBERED_SET_INL_H_
+#define V8_HEAP_REMEMBERED_SET_INL_H_
#include <memory>
+#include "src/base/bounds.h"
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
+#include "src/common/globals.h"
+#include "src/common/ptr-compr-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@@ -35,8 +39,8 @@ class RememberedSetOperations {
SlotSet::EmptyBucketMode mode) {
int slots = 0;
if (slot_set != nullptr) {
- slots +=
- slot_set->Iterate(chunk->address(), chunk->buckets(), callback, mode);
+ slots += slot_set->Iterate(chunk->address(), 0, chunk->buckets(),
+ callback, mode);
}
return slots;
}
@@ -59,6 +63,25 @@ class RememberedSetOperations {
mode);
}
}
+
+ static void CheckNoneInRange(SlotSet* slot_set, MemoryChunk* chunk,
+ Address start, Address end) {
+ if (slot_set != nullptr) {
+ size_t start_bucket = SlotSet::BucketForSlot(start - chunk->address());
+ // Both 'end' and 'end_bucket' are exclusive limits, so do some index
+ // juggling to make sure we get the right bucket even if the end address
+ // is at the start of a bucket.
+ size_t end_bucket =
+ SlotSet::BucketForSlot(end - chunk->address() - kTaggedSize) + 1;
+ slot_set->Iterate(
+ chunk->address(), start_bucket, end_bucket,
+ [start, end](MaybeObjectSlot slot) {
+ CHECK(!base::IsInRange(slot.address(), start, end + 1));
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ }
+ }
};
// TODO(ulan): Investigate performance of de-templatizing this class.
@@ -89,6 +112,11 @@ class RememberedSet : public AllStatic {
return slot_set->Contains(offset);
}
+ static void CheckNoneInRange(MemoryChunk* chunk, Address start, Address end) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::CheckNoneInRange(slot_set, chunk, start, end);
+ }
+
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
@@ -159,8 +187,9 @@ class RememberedSet : public AllStatic {
if (slot_set != nullptr) {
PossiblyEmptyBuckets* possibly_empty_buckets =
chunk->possibly_empty_buckets();
- slots += slot_set->IterateAndTrackEmptyBuckets(
- chunk->address(), chunk->buckets(), callback, possibly_empty_buckets);
+ slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
+ chunk->buckets(), callback,
+ possibly_empty_buckets);
if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
}
return slots;
@@ -286,7 +315,18 @@ class UpdateTypedSlotHelper {
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case OBJECT_SLOT: {
+ case COMPRESSED_OBJECT_SLOT: {
+ HeapObject old_target = HeapObject::cast(Object(DecompressTaggedAny(
+ heap->isolate(), base::Memory<Tagged_t>(addr))));
+ HeapObject new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ base::Memory<Tagged_t>(addr) = CompressTagged(new_target.ptr());
+ }
+ return result;
+ }
+ case FULL_OBJECT_SLOT: {
return callback(FullMaybeObjectSlot(addr));
}
case CLEARED_SLOT:
@@ -398,4 +438,4 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_REMEMBERED_SET_H_
+#endif // V8_HEAP_REMEMBERED_SET_INL_H_
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index f524b30e74..e6ccf642c0 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -5,19 +5,21 @@
#include "src/heap/safepoint.h"
#include "src/handles/local-handles.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
namespace v8 {
namespace internal {
-Safepoint::Safepoint(Heap* heap) : heap_(heap), local_heaps_head_(nullptr) {}
+GlobalSafepoint::GlobalSafepoint(Heap* heap)
+ : heap_(heap), local_heaps_head_(nullptr), is_active_(false) {}
-void Safepoint::Start() { StopThreads(); }
+void GlobalSafepoint::Start() { StopThreads(); }
-void Safepoint::End() { ResumeThreads(); }
+void GlobalSafepoint::End() { ResumeThreads(); }
-void Safepoint::StopThreads() {
+void GlobalSafepoint::StopThreads() {
local_heaps_mutex_.Lock();
barrier_.Arm();
@@ -35,9 +37,13 @@ void Safepoint::StopThreads() {
current->state_change_.Wait(&current->state_mutex_);
}
}
+
+ is_active_ = true;
}
-void Safepoint::ResumeThreads() {
+void GlobalSafepoint::ResumeThreads() {
+ is_active_ = false;
+
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
current->state_mutex_.Unlock();
@@ -48,7 +54,7 @@ void Safepoint::ResumeThreads() {
local_heaps_mutex_.Unlock();
}
-void Safepoint::EnterFromThread(LocalHeap* local_heap) {
+void GlobalSafepoint::EnterFromThread(LocalHeap* local_heap) {
{
base::MutexGuard guard(&local_heap->state_mutex_);
local_heap->state_ = LocalHeap::ThreadState::Safepoint;
@@ -63,20 +69,20 @@ void Safepoint::EnterFromThread(LocalHeap* local_heap) {
}
}
-void Safepoint::Barrier::Arm() {
+void GlobalSafepoint::Barrier::Arm() {
base::MutexGuard guard(&mutex_);
CHECK(!armed_);
armed_ = true;
}
-void Safepoint::Barrier::Disarm() {
+void GlobalSafepoint::Barrier::Disarm() {
base::MutexGuard guard(&mutex_);
CHECK(armed_);
armed_ = false;
cond_.NotifyAll();
}
-void Safepoint::Barrier::Wait() {
+void GlobalSafepoint::Barrier::Wait() {
base::MutexGuard guard(&mutex_);
while (armed_) {
cond_.Wait(&mutex_);
@@ -84,12 +90,14 @@ void Safepoint::Barrier::Wait() {
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
- safepoint_->StopThreads();
+ if (FLAG_local_heaps) safepoint_->StopThreads();
}
-SafepointScope::~SafepointScope() { safepoint_->ResumeThreads(); }
+SafepointScope::~SafepointScope() {
+ if (FLAG_local_heaps) safepoint_->ResumeThreads();
+}
-void Safepoint::AddLocalHeap(LocalHeap* local_heap) {
+void GlobalSafepoint::AddLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
if (local_heaps_head_) local_heaps_head_->prev_ = local_heap;
local_heap->prev_ = nullptr;
@@ -97,7 +105,7 @@ void Safepoint::AddLocalHeap(LocalHeap* local_heap) {
local_heaps_head_ = local_heap;
}
-void Safepoint::RemoveLocalHeap(LocalHeap* local_heap) {
+void GlobalSafepoint::RemoveLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
if (local_heap->next_) local_heap->next_->prev_ = local_heap->prev_;
if (local_heap->prev_)
@@ -106,7 +114,7 @@ void Safepoint::RemoveLocalHeap(LocalHeap* local_heap) {
local_heaps_head_ = local_heap->next_;
}
-bool Safepoint::ContainsLocalHeap(LocalHeap* local_heap) {
+bool GlobalSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
@@ -118,12 +126,13 @@ bool Safepoint::ContainsLocalHeap(LocalHeap* local_heap) {
return false;
}
-bool Safepoint::ContainsAnyLocalHeap() {
+bool GlobalSafepoint::ContainsAnyLocalHeap() {
base::MutexGuard guard(&local_heaps_mutex_);
return local_heaps_head_ != nullptr;
}
-void Safepoint::Iterate(RootVisitor* visitor) {
+void GlobalSafepoint::Iterate(RootVisitor* visitor) {
+ DCHECK(IsActive());
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
current->handles()->Iterate(visitor);
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 4b0036c047..3ba96e11d5 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -7,6 +7,9 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/handles/persistent-handles.h"
+#include "src/heap/local-heap.h"
+#include "src/objects/visitors.h"
namespace v8 {
namespace internal {
@@ -15,9 +18,11 @@ class Heap;
class LocalHeap;
class RootVisitor;
-class Safepoint {
+// Used to bring all background threads with heap access to a safepoint such
+// that e.g. a garbage collection can be performed.
+class GlobalSafepoint {
public:
- explicit Safepoint(Heap* heap);
+ explicit GlobalSafepoint(Heap* heap);
// Enter the safepoint from a thread
void EnterFromThread(LocalHeap* local_heap);
@@ -28,10 +33,22 @@ class Safepoint {
// Iterate handles in local heaps
void Iterate(RootVisitor* visitor);
+ // Iterate local heaps
+ template <typename Callback>
+ void IterateLocalHeaps(Callback callback) {
+ DCHECK(IsActive());
+ for (LocalHeap* current = local_heaps_head_; current;
+ current = current->next_) {
+ callback(current);
+ }
+ }
+
// Use these methods now instead of the more intrusive SafepointScope
void Start();
void End();
+ bool IsActive() { return is_active_; }
+
private:
class Barrier {
base::Mutex mutex_;
@@ -58,8 +75,11 @@ class Safepoint {
base::Mutex local_heaps_mutex_;
LocalHeap* local_heaps_head_;
+ bool is_active_;
+
friend class SafepointScope;
friend class LocalHeap;
+ friend class PersistentHandles;
};
class SafepointScope {
@@ -68,7 +88,7 @@ class SafepointScope {
V8_EXPORT_PRIVATE ~SafepointScope();
private:
- Safepoint* safepoint_;
+ GlobalSafepoint* safepoint_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index a731e37be0..89451be076 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -30,8 +30,7 @@ class ScavengeJob::Task : public CancelableTask {
};
size_t ScavengeJob::YoungGenerationTaskTriggerSize(Heap* heap) {
- static constexpr double kTaskTriggerFactor = 0.8;
- return heap->new_space()->Capacity() * kTaskTriggerFactor;
+ return heap->new_space()->Capacity() * FLAG_scavenge_task_trigger / 100;
}
bool ScavengeJob::YoungGenerationSizeTaskTriggerReached(Heap* heap) {
@@ -39,7 +38,7 @@ bool ScavengeJob::YoungGenerationSizeTaskTriggerReached(Heap* heap) {
}
void ScavengeJob::ScheduleTaskIfNeeded(Heap* heap) {
- if (!task_pending_ && !heap->IsTearingDown() &&
+ if (FLAG_scavenge_task && !task_pending_ && !heap->IsTearingDown() &&
YoungGenerationSizeTaskTriggerReached(heap)) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
auto taskrunner =
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index de80d2a290..3b3cc77b31 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_SCAVENGER_INL_H_
#define V8_HEAP_SCAVENGER_INL_H_
-#include "src/heap/scavenger.h"
-
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/local-allocator-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/scavenger.h"
#include "src/objects/map.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots-inl.h"
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 08f32e037c..d0d0a30fb1 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -12,6 +12,7 @@
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/memory-chunk-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
@@ -295,7 +296,18 @@ void ScavengerCollector::CollectGarbage() {
{
// Copy roots.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
- heap_->IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ // Scavenger treats all weak roots except for global handles as strong.
+ // That is why we don't set skip_weak = true here and instead visit
+ // global handles separately.
+ base::EnumSet<SkipRoot> options({SkipRoot::kExternalStringTable,
+ SkipRoot::kGlobalHandles,
+ SkipRoot::kOldGeneration});
+ if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
+ options.Add(SkipRoot::kStack);
+ }
+ heap_->IterateRoots(&root_scavenge_visitor, options);
+ isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
+ &root_scavenge_visitor);
}
{
// Parallel phase scavenging all copied and promoted objects.
@@ -304,6 +316,14 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
+
+ if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
+ IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
+ num_scavenge_tasks, kMainThreadId);
+ DCHECK(copied_list.IsEmpty());
+ DCHECK(promotion_list.IsEmpty());
+ }
+
{
// Scavenge weak global handles.
TRACE_GC(heap_->tracer(),
@@ -395,6 +415,39 @@ void ScavengerCollector::CollectGarbage() {
heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
}
+void ScavengerCollector::IterateStackAndScavenge(
+ RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
+ int num_scavenge_tasks, int main_thread_id) {
+ // Scan the stack, scavenge the newly discovered objects, and report
+ // the survival statistics before and afer the stack scanning.
+ // This code is not intended for production.
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
+ size_t survived_bytes_before = 0;
+ for (int i = 0; i < num_scavenge_tasks; i++) {
+ survived_bytes_before +=
+ scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
+ }
+ heap_->IterateStackRoots(root_scavenge_visitor);
+ scavengers[main_thread_id]->Process();
+ size_t survived_bytes_after = 0;
+ for (int i = 0; i < num_scavenge_tasks; i++) {
+ survived_bytes_after +=
+ scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
+ }
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCScavengerStackScanning", "survived_bytes_before",
+ survived_bytes_before, "survived_bytes_after",
+ survived_bytes_after);
+ if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
+ isolate_->PrintWithTimestamp(
+ "Scavenge stack scanning: survived_before=%4zuKB, "
+ "survived_after=%4zuKB delta=%.1f%%\n",
+ survived_bytes_before / KB, survived_bytes_after / KB,
+ (survived_bytes_after - survived_bytes_before) * 100.0 /
+ survived_bytes_after);
+ }
+}
+
void ScavengerCollector::SweepArrayBufferExtensions() {
heap_->array_buffer_sweeper()->RequestSweepYoung();
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 189bd490ce..d96219fd51 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -15,6 +15,8 @@ namespace v8 {
namespace internal {
class OneshotBarrier;
+class RootScavengeVisitor;
+class Scavenger;
enum class CopyAndForwardResult {
SUCCESS_YOUNG_GENERATION,
@@ -53,6 +55,9 @@ class ScavengerCollector {
void SweepArrayBufferExtensions();
+ void IterateStackAndScavenge(RootScavengeVisitor* root_scavenge_visitor,
+ Scavenger** scavengers, int num_scavenge_tasks,
+ int main_thread_id);
Isolate* const isolate_;
Heap* const heap_;
base::Semaphore parallel_scavenge_semaphore_;
@@ -216,7 +221,7 @@ class Scavenger {
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
- LocalAllocator allocator_;
+ EvacuationAllocator allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
EphemeronRememberedSet ephemeron_remembered_set_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 8c9d8cd456..b62dd5c7fd 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/setup-isolate.h"
-
#include "src/builtins/accessors.h"
#include "src/codegen/compilation-cache.h"
#include "src/execution/isolate.h"
@@ -12,6 +10,7 @@
#include "src/heap/heap-inl.h"
#include "src/ic/handler-configuration.h"
#include "src/init/heap-symbols.h"
+#include "src/init/setup-isolate.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/arguments.h"
#include "src/objects/cell-inl.h"
@@ -46,11 +45,27 @@
#include "src/regexp/regexp.h"
#include "src/wasm/wasm-objects.h"
#include "torque-generated/class-definitions-tq.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
+namespace {
+
+Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
+ Isolate* isolate, Builtins::Name builtin_id, int len,
+ FunctionKind kind = FunctionKind::kNormalFunction) {
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate->factory()->empty_string(), builtin_id, kind);
+ shared->set_internal_formal_parameter_count(len);
+ shared->set_length(len);
+ return shared;
+}
+
+} // namespace
+
bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
return heap->CreateHeapObjects();
}
@@ -348,7 +363,7 @@ bool Heap::CreateInitialMaps() {
}
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
- ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+ ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
constructor_function_index) \
@@ -403,17 +418,15 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_NAME_DICTIONARY_TYPE,
small_ordered_name_dictionary)
-#define TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR(V, NAME, Name, name) \
+#define TORQUE_ALLOCATE_MAP(NAME, Name, name) \
ALLOCATE_MAP(NAME, Name::kSize, name)
- TORQUE_INTERNAL_FIXED_CLASS_LIST_GENERATOR(
- TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR, _);
-#undef TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR
+ TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
+#undef TORQUE_ALLOCATE_MAP
-#define TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR(V, NAME, Name, name) \
+#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
ALLOCATE_VARSIZE_MAP(NAME, name)
- TORQUE_INTERNAL_VARSIZE_CLASS_LIST_GENERATOR(
- TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR, _);
-#undef TORQUE_INTERNAL_CLASS_LIST_MAP_ALLOCATOR
+ TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
+#undef TORQUE_ALLOCATE_VARSIZE_MAP
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
@@ -436,14 +449,14 @@ bool Heap::CreateInitialMaps() {
// The "no closures" and "one closure" FeedbackCell maps need
// to be marked unstable because their objects can change maps.
- ALLOCATE_MAP(
- FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, no_closures_cell)
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize,
+ no_closures_cell)
roots.no_closures_cell_map().mark_unstable();
- ALLOCATE_MAP(
- FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, one_closure_cell)
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize,
+ one_closure_cell)
roots.one_closure_cell_map().mark_unstable();
- ALLOCATE_MAP(
- FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, many_closures_cell)
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize,
+ many_closures_cell)
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
@@ -776,7 +789,6 @@ void Heap::CreateInitialObjects() {
}
set_detached_contexts(roots.empty_weak_array_list());
- set_retained_maps(roots.empty_weak_array_list());
set_retaining_path_targets(roots.empty_weak_array_list());
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
@@ -918,6 +930,13 @@ void Heap::CreateInitialObjects() {
Handle<PropertyCell> cell =
factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
+ set_regexp_species_protector(*cell);
+ }
+
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Protectors::kProtectorValid));
set_string_iterator_protector(*cell);
}
@@ -984,6 +1003,123 @@ void Heap::CreateInitialObjects() {
// Initialize compilation cache.
isolate_->compilation_cache()->Clear();
+
+ // Create internal SharedFunctionInfos.
+
+ // Async functions:
+ {
+ Handle<SharedFunctionInfo> info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncFunctionAwaitRejectClosure, 1);
+ set_async_function_await_reject_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncFunctionAwaitResolveClosure, 1);
+ set_async_function_await_resolve_shared_fun(*info);
+ }
+
+ // Async generators:
+ {
+ Handle<SharedFunctionInfo> info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncGeneratorAwaitResolveClosure, 1);
+ set_async_generator_await_resolve_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncGeneratorAwaitRejectClosure, 1);
+ set_async_generator_await_reject_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncGeneratorYieldResolveClosure, 1);
+ set_async_generator_yield_resolve_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncGeneratorReturnResolveClosure, 1);
+ set_async_generator_return_resolve_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncGeneratorReturnClosedResolveClosure, 1);
+ set_async_generator_return_closed_resolve_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate(), Builtins::kAsyncGeneratorReturnClosedRejectClosure, 1);
+ set_async_generator_return_closed_reject_shared_fun(*info);
+ }
+
+ // AsyncIterator:
+ {
+ Handle<SharedFunctionInfo> info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kAsyncIteratorValueUnwrap, 1);
+ set_async_iterator_value_unwrap_shared_fun(*info);
+ }
+
+ // Promises:
+ {
+ Handle<SharedFunctionInfo> info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kPromiseCapabilityDefaultResolve, 1,
+ FunctionKind::kConciseMethod);
+ info->set_native(true);
+ info->set_function_map_index(
+ Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ set_promise_capability_default_resolve_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(isolate_,
+ Builtins::kPromiseCapabilityDefaultReject,
+ 1, FunctionKind::kConciseMethod);
+ info->set_native(true);
+ info->set_function_map_index(
+ Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ set_promise_capability_default_reject_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kPromiseGetCapabilitiesExecutor, 2);
+ set_promise_get_capabilities_executor_shared_fun(*info);
+ }
+
+ // Promises / finally:
+ {
+ Handle<SharedFunctionInfo> info =
+ CreateSharedFunctionInfo(isolate(), Builtins::kPromiseThenFinally, 1);
+ info->set_native(true);
+ set_promise_then_finally_shared_fun(*info);
+
+ info =
+ CreateSharedFunctionInfo(isolate(), Builtins::kPromiseCatchFinally, 1);
+ info->set_native(true);
+ set_promise_catch_finally_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(isolate(),
+ Builtins::kPromiseValueThunkFinally, 0);
+ set_promise_value_thunk_finally_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(isolate(), Builtins::kPromiseThrowerFinally,
+ 0);
+ set_promise_thrower_finally_shared_fun(*info);
+ }
+
+ // Promise combinators:
+ {
+ Handle<SharedFunctionInfo> info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kPromiseAllResolveElementClosure, 1);
+ set_promise_all_resolve_element_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kPromiseAllSettledResolveElementClosure, 1);
+ set_promise_all_settled_resolve_element_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kPromiseAllSettledRejectElementClosure, 1);
+ set_promise_all_settled_reject_element_shared_fun(*info);
+
+ info = CreateSharedFunctionInfo(
+ isolate_, Builtins::kPromiseAnyRejectElementClosure, 1);
+ set_promise_any_reject_element_shared_fun(*info);
+ }
+
+ // ProxyRevoke:
+ {
+ Handle<SharedFunctionInfo> info =
+ CreateSharedFunctionInfo(isolate_, Builtins::kProxyRevoke, 0);
+ set_proxy_revoke_shared_fun(*info);
+ }
}
void Heap::CreateInternalAccessorInfoObjects() {
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 3275d6278e..ca7eef9b7a 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -190,6 +190,12 @@ class SlotSet {
(kTaggedSizeLog2 + kBitsPerBucketLog2);
}
+ // Converts the slot offset into bucket index.
+ static size_t BucketForSlot(size_t slot_offset) {
+ DCHECK(IsAligned(slot_offset, kTaggedSize));
+ return slot_offset >> (kTaggedSizeLog2 + kBitsPerBucketLog2);
+ }
+
// The slot offset specifies a slot at address page_start_ + slot_offset.
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
@@ -335,9 +341,9 @@ class SlotSet {
//
// Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
template <typename Callback>
- size_t Iterate(Address chunk_start, size_t buckets, Callback callback,
- EmptyBucketMode mode) {
- return Iterate(chunk_start, buckets, callback,
+ size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
+ Callback callback, EmptyBucketMode mode) {
+ return Iterate(chunk_start, start_bucket, end_bucket, callback,
[this, mode](size_t bucket_index) {
if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
ReleaseBucket(bucket_index);
@@ -351,11 +357,11 @@ class SlotSet {
// CheckPossiblyEmptyBuckets.
template <typename Callback>
size_t IterateAndTrackEmptyBuckets(
- Address chunk_start, size_t buckets, Callback callback,
- PossiblyEmptyBuckets* possibly_empty_buckets) {
- return Iterate(chunk_start, buckets, callback,
- [possibly_empty_buckets, buckets](size_t bucket_index) {
- possibly_empty_buckets->Insert(bucket_index, buckets);
+ Address chunk_start, size_t start_bucket, size_t end_bucket,
+ Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
+ return Iterate(chunk_start, start_bucket, end_bucket, callback,
+ [possibly_empty_buckets, end_bucket](size_t bucket_index) {
+ possibly_empty_buckets->Insert(bucket_index, end_bucket);
});
}
@@ -461,10 +467,11 @@ class SlotSet {
private:
template <typename Callback, typename EmptyBucketCallback>
- size_t Iterate(Address chunk_start, size_t buckets, Callback callback,
- EmptyBucketCallback empty_bucket_callback) {
+ size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
+ Callback callback, EmptyBucketCallback empty_bucket_callback) {
size_t new_count = 0;
- for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
+ for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
+ bucket_index++) {
Bucket* bucket = LoadBucket(bucket_index);
if (bucket != nullptr) {
size_t in_bucket_count = 0;
@@ -597,7 +604,8 @@ STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT,
COMPRESSED_EMBEDDED_OBJECT_SLOT,
- OBJECT_SLOT,
+ FULL_OBJECT_SLOT,
+ COMPRESSED_OBJECT_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
CLEARED_SLOT
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index a73ea6fe0c..cb8b0a54d7 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,14 +5,14 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
-#include "src/common/globals.h"
-#include "src/heap/spaces.h"
-
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
+#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
#include "src/sanitizer/msan.h"
@@ -207,39 +207,6 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false;
}
-void MemoryChunk::IncrementExternalBackingStoreBytes(
- ExternalBackingStoreType type, size_t amount) {
-#ifndef V8_ENABLE_THIRD_PARTY_HEAP
- base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
- owner()->IncrementExternalBackingStoreBytes(type, amount);
-#endif
-}
-
-void MemoryChunk::DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType type, size_t amount) {
-#ifndef V8_ENABLE_THIRD_PARTY_HEAP
- base::CheckedDecrement(&external_backing_store_bytes_[type], amount);
- owner()->DecrementExternalBackingStoreBytes(type, amount);
-#endif
-}
-
-void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
- MemoryChunk* from,
- MemoryChunk* to,
- size_t amount) {
- DCHECK_NOT_NULL(from->owner());
- DCHECK_NOT_NULL(to->owner());
- base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
- base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
- Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
- amount);
-}
-
-AllocationSpace MemoryChunk::owner_identity() const {
- if (InReadOnlySpace()) return RO_SPACE;
- return owner()->identity();
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -265,10 +232,6 @@ void Page::ClearEvacuationCandidate() {
InitializeFreeListCategories();
}
-HeapObject LargePage::GetObject() {
- return HeapObject::FromAddress(area_start());
-}
-
OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
: heap_(heap),
state_(kOldSpaceState),
@@ -372,8 +335,9 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
allocation_info_.set_top(new_top);
if (filler_size > 0) {
- return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
- filler_size);
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap_),
+ HeapObject::FromAddress(current_top),
+ filler_size);
}
return AllocationResult(HeapObject::FromAddress(current_top));
@@ -406,8 +370,9 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
- return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
- filler_size);
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
+ HeapObject::FromAddress(current_top),
+ filler_size);
}
return HeapObject::FromAddress(current_top);
@@ -521,7 +486,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
- obj = heap()->PrecedeWithFiller(obj, filler_size);
+ obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index a7aec2ff1f..5e8874fafd 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -9,8 +9,8 @@
#include <utility>
#include "src/base/bits.h"
-#include "src/base/lsan.h"
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
@@ -20,16 +20,19 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
+#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
+#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
+#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
#include "src/sanitizer/msan.h"
@@ -45,7 +48,6 @@ namespace internal {
// in order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
-STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
// ----------------------------------------------------------------------------
// PagedSpaceObjectIterator
@@ -413,7 +415,6 @@ bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
return true;
}
@@ -423,7 +424,6 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
PageAllocator::kNoAccess)) {
return false;
}
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -468,149 +468,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
-void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
- base::AddressRegion memory_area =
- MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
- if (memory_area.size() != 0) {
- MemoryAllocator* memory_allocator = heap_->memory_allocator();
- v8::PageAllocator* page_allocator =
- memory_allocator->page_allocator(executable());
- CHECK(page_allocator->DiscardSystemPages(
- reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
- }
-}
-
-size_t MemoryChunkLayout::CodePageGuardStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
-}
-
-size_t MemoryChunkLayout::CodePageGuardSize() {
- return MemoryAllocator::GetCommitPageSize();
-}
-
-intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return CodePageGuardStartOffset() + CodePageGuardSize();
-}
-
-intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
- // We are guarding code pages: the last OS page will be protected as
- // non-writable.
- return Page::kPageSize -
- static_cast<int>(MemoryAllocator::GetCommitPageSize());
-}
-
-size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
- size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
- DCHECK_LE(kMaxRegularHeapObjectSize, memory);
- return memory;
-}
-
-intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
- return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
-}
-
-size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
- AllocationSpace space) {
- if (space == CODE_SPACE) {
- return ObjectStartOffsetInCodePage();
- }
- return ObjectStartOffsetInDataPage();
-}
-
-size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
- size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
- DCHECK_LE(kMaxRegularHeapObjectSize, memory);
- return memory;
-}
-
-size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- AllocationSpace space) {
- if (space == CODE_SPACE) {
- return AllocatableMemoryInCodePage();
- }
- return AllocatableMemoryInDataPage();
-}
-
-#ifdef THREAD_SANITIZER
-void MemoryChunk::SynchronizedHeapLoad() {
- CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
- reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
- InReadOnlySpace());
-}
-#endif
-
-void MemoryChunk::InitializationMemoryFence() {
- base::SeqCst_MemoryFence();
-#ifdef THREAD_SANITIZER
- // Since TSAN does not process memory fences, we use the following annotation
- // to tell TSAN that there is no data race when emitting a
- // InitializationMemoryFence. Note that the other thread still needs to
- // perform MemoryChunk::synchronized_heap().
- base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
- reinterpret_cast<base::AtomicWord>(heap_));
-#endif
-}
-
-void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
- PageAllocator::Permission permission) {
- DCHECK(permission == PageAllocator::kRead ||
- permission == PageAllocator::kReadExecute);
- DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
- // Decrementing the write_unprotect_counter_ and changing the page
- // protection mode has to be atomic.
- base::MutexGuard guard(page_protection_change_mutex_);
- if (write_unprotect_counter_ == 0) {
- // This is a corner case that may happen when we have a
- // CodeSpaceMemoryModificationScope open and this page was newly
- // added.
- return;
- }
- write_unprotect_counter_--;
- DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
- if (write_unprotect_counter_ == 0) {
- Address protect_start =
- address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(protect_start, page_size));
- size_t protect_size = RoundUp(area_size(), page_size);
- CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
- }
-}
-
-void MemoryChunk::SetReadable() {
- DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
-}
-
-void MemoryChunk::SetReadAndExecutable() {
- DCHECK(!FLAG_jitless);
- DecrementWriteUnprotectCounterAndMaybeSetPermissions(
- PageAllocator::kReadExecute);
-}
-
-void MemoryChunk::SetReadAndWritable() {
- DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
- // Incrementing the write_unprotect_counter_ and changing the page
- // protection mode has to be atomic.
- base::MutexGuard guard(page_protection_change_mutex_);
- write_unprotect_counter_++;
- DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
- if (write_unprotect_counter_ == 1) {
- Address unprotect_start =
- address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(unprotect_start, page_size));
- size_t unprotect_size = RoundUp(area_size(), page_size);
- CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
- PageAllocator::kReadWrite));
- }
-}
-
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
auto result = code_object_registry_newly_allocated_.insert(code);
USE(result);
@@ -791,39 +648,28 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
return page;
}
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable) {
- if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
- STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
- FATAL("Code page is too large.");
- }
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
-
- LargePage* page = static_cast<LargePage*>(chunk);
- page->SetFlag(MemoryChunk::LARGE_PAGE);
- page->list_node().Initialize();
- return page;
-}
-
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
- categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
- for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ categories_ =
+ new FreeListCategory*[owner()->free_list()->number_of_categories()]();
+ for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
+ i++) {
DCHECK_NULL(categories_[i]);
categories_[i] = new FreeListCategory();
}
}
void Page::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
+ i++) {
categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
}
}
void Page::ReleaseFreeListCategories() {
if (categories_ != nullptr) {
- for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
+ i++) {
if (categories_[i] != nullptr) {
delete categories_[i];
categories_[i] = nullptr;
@@ -978,11 +824,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
area_end = area_start + commit_area_size;
}
- // Use chunk_size for statistics and callbacks because we assume that they
- // treat reserved but not-yet committed memory regions of chunks as allocated.
- isolate_->counters()->memory_allocated()->Increment(
- static_cast<int>(chunk_size));
-
+ // Use chunk_size for statistics because we assume that treat reserved but
+ // not-yet committed memory regions of chunks as allocated.
LOG(isolate_,
NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
@@ -1163,8 +1006,6 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
const size_t released_bytes = reservation->Release(start_free);
DCHECK_GE(size_, released_bytes);
size_ -= released_bytes;
- isolate_->counters()->memory_allocated()->Decrement(
- static_cast<int>(released_bytes));
}
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
@@ -1174,7 +1015,6 @@ void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
if (chunk->executable() == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
size_executable_ -= size;
@@ -1408,14 +1248,14 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
-}
-void MemoryChunk::ReleaseAllAllocatedMemory() {
if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
}
+}
+void MemoryChunk::ReleaseAllAllocatedMemory() {
ReleaseAllocatedMemoryNeededForWritableChunk();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
@@ -1762,18 +1602,12 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
- if (merging_from_off_thread) {
- // TODO(leszeks): Allocation groups are currently not handled properly by
- // the sampling allocation profiler. We'll have to come up with a better
- // solution for allocation stepping before shipping.
- AllocationStepAfterMerge(
- p->area_start(),
- static_cast<int>(p->HighWaterMark() - p->area_start()));
- }
- }
-
- if (merging_from_off_thread) {
- heap()->NotifyOffThreadSpaceMerged();
+ // TODO(leszeks): Here we should allocation step, but:
+ // 1. Allocation groups are currently not handled properly by the sampling
+ // allocation profiler, and
+ // 2. Observers might try to take the space lock, which isn't reentrant.
+ // We'll have to come up with a better solution for allocation stepping
+ // before shipping, which will likely be using LocalHeap.
}
DCHECK_EQ(0u, other->Size());
@@ -1902,7 +1736,6 @@ bool PagedSpace::Expand() {
return true;
}
-
int PagedSpace::CountTotalPages() {
int count = 0;
for (Page* page : *this) {
@@ -2132,6 +1965,99 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
return true;
}
+base::Optional<std::pair<Address, size_t>>
+PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK(!is_local_space() && identity() == OLD_SPACE);
+ DCHECK_EQ(origin, AllocationOrigin::kRuntime);
+ base::MutexGuard lock(&allocation_mutex_);
+
+ auto result = TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ // Sweeping is still in progress.
+ if (collector->sweeping_in_progress()) {
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ RefillFreeList();
+
+ // Retry the free list allocation.
+ auto result = TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+
+ Sweeper::FreeSpaceMayContainInvalidatedSlots
+ invalidated_slots_in_free_space =
+ Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ const int kMaxPagesToSweep = 1;
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
+ invalidated_slots_in_free_space);
+ RefillFreeList();
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
+ return TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ }
+
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
+ Expand()) {
+ DCHECK((CountTotalPages() > 1) ||
+ (min_size_in_bytes <= free_list_->Available()));
+ return TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ }
+
+ // TODO(dinfuehr): Complete sweeping here and try allocation again.
+
+ return {};
+}
+
+base::Optional<std::pair<Address, size_t>>
+PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
+ DCHECK_EQ(identity(), OLD_SPACE);
+
+ size_t new_node_size = 0;
+ FreeSpace new_node =
+ free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
+ if (new_node.is_null()) return {};
+ DCHECK_GE(new_node_size, min_size_in_bytes);
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ Page* page = Page::FromHeapObject(new_node);
+ IncreaseAllocatedBytes(new_node_size, page);
+
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
+ size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
+
+ Address start = new_node.address();
+ Address end = new_node.address() + new_node_size;
+ Address limit = new_node.address() + used_size_in_bytes;
+ DCHECK_LE(limit, end);
+ DCHECK_LE(min_size_in_bytes, limit - start);
+ if (limit != end) {
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ }
+
+ return std::make_pair(start, used_size_in_bytes);
+}
+
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -2430,12 +2356,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
return true;
}
-LinearAllocationArea LocalAllocationBuffer::Close() {
+LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
if (IsValid()) {
- heap_->CreateFillerObjectAt(
- allocation_info_.top(),
- static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
- ClearRecordedSlots::kNo);
+ MakeIterable();
const LinearAllocationArea old_info = allocation_info_;
allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
return old_info;
@@ -2443,6 +2366,15 @@ LinearAllocationArea LocalAllocationBuffer::Close() {
return LinearAllocationArea(kNullAddress, kNullAddress);
}
+void LocalAllocationBuffer::MakeIterable() {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
+ ClearRecordedSlots::kNo);
+ }
+}
+
LocalAllocationBuffer::LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
: heap_(heap),
@@ -2455,22 +2387,17 @@ LocalAllocationBuffer::LocalAllocationBuffer(
}
}
-LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
+LocalAllocationBuffer::LocalAllocationBuffer(LocalAllocationBuffer&& other)
V8_NOEXCEPT {
- *this = other;
+ *this = std::move(other);
}
LocalAllocationBuffer& LocalAllocationBuffer::operator=(
- const LocalAllocationBuffer& other) V8_NOEXCEPT {
- Close();
+ LocalAllocationBuffer&& other) V8_NOEXCEPT {
heap_ = other.heap_;
allocation_info_ = other.allocation_info_;
- // This is needed since we (a) cannot yet use move-semantics, and (b) want
- // to make the use of the class easy by it as value and (c) implicitly call
- // {Close} upon copy.
- const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
- kNullAddress, kNullAddress);
+ other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
@@ -2577,12 +2504,6 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
-size_t LargeObjectSpace::Available() {
- // We return zero here since we cannot take advantage of already allocated
- // large object memory.
- return 0;
-}
-
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@@ -3689,7 +3610,8 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
int MemoryChunk::FreeListsLength() {
int length = 0;
- for (int cat = kFirstCategory; cat <= free_list()->last_category(); cat++) {
+ for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
+ cat++) {
if (categories_[cat] != nullptr) {
length += categories_[cat]->FreeListLength();
}
@@ -3787,6 +3709,13 @@ bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ base::Optional<base::MutexGuard> optional_mutex;
+
+ if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
+ identity() == OLD_SPACE) {
+ optional_mutex.emplace(&allocation_mutex_);
+ }
+
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
@@ -3935,602 +3864,5 @@ void MapSpace::SortFreeList() {
void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
-// -----------------------------------------------------------------------------
-// ReadOnlySpace implementation
-
-ReadOnlySpace::ReadOnlySpace(Heap* heap)
- : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
- is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
-}
-
-void ReadOnlyPage::MakeHeaderRelocatable() {
- ReleaseAllocatedMemoryNeededForWritableChunk();
- // Detached read-only space needs to have a valid marking bitmap and free list
- // categories. Instruct Lsan to ignore them if required.
- LSAN_IGNORE_OBJECT(categories_);
- for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
- LSAN_IGNORE_OBJECT(categories_[i]);
- }
- LSAN_IGNORE_OBJECT(marking_bitmap_);
- heap_ = nullptr;
- owner_ = nullptr;
-}
-
-void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
- PageAllocator::Permission access) {
- for (Page* p : *this) {
- // Read only pages don't have valid reservation object so we get proper
- // page allocator manually.
- v8::PageAllocator* page_allocator =
- memory_allocator->page_allocator(p->executable());
- CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
- }
-}
-
-// After we have booted, we have created a map which represents free space
-// on the heap. If there was already a free list then the elements on it
-// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
-// fix them.
-void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
- free_list_->RepairLists(heap());
- // Each page may have a small free space that is not tracked by a free list.
- // Those free spaces still contain null as their map pointer.
- // Overwrite them with new fillers.
- for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
- if (size == 0) {
- // If there is no wasted memory then all free space is in the free list.
- continue;
- }
- Address start = page->HighWaterMark();
- Address end = page->area_end();
- if (start < end - size) {
- // A region at the high watermark is already in free list.
- HeapObject filler = HeapObject::FromAddress(start);
- CHECK(filler.IsFreeSpaceOrFiller());
- start += filler.Size();
- }
- CHECK_EQ(size, static_cast<int>(end - start));
- heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
- }
-}
-
-void ReadOnlySpace::ClearStringPaddingIfNeeded() {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- // TODO(ulan): Revisit this once third-party heap supports iteration.
- return;
- }
- if (is_string_padding_cleared_) return;
-
- ReadOnlyHeapObjectIterator iterator(this);
- for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
- if (o.IsSeqOneByteString()) {
- SeqOneByteString::cast(o).clear_padding();
- } else if (o.IsSeqTwoByteString()) {
- SeqTwoByteString::cast(o).clear_padding();
- }
- }
- is_string_padding_cleared_ = true;
-}
-
-void ReadOnlySpace::Seal(SealMode ro_mode) {
- DCHECK(!is_marked_read_only_);
-
- FreeLinearAllocationArea();
- is_marked_read_only_ = true;
- auto* memory_allocator = heap()->memory_allocator();
-
- if (ro_mode == SealMode::kDetachFromHeapAndForget) {
- DetachFromHeap();
- for (Page* p : *this) {
- memory_allocator->UnregisterMemory(p);
- static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
- }
- }
-
- SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
-}
-
-void ReadOnlySpace::Unseal() {
- DCHECK(is_marked_read_only_);
- SetPermissionsForPages(heap()->memory_allocator(), PageAllocator::kReadWrite);
- is_marked_read_only_ = false;
-}
-
-Address LargePage::GetAddressToShrink(Address object_address,
- size_t object_size) {
- if (executable() == EXECUTABLE) {
- return 0;
- }
- size_t used_size = ::RoundUp((object_address - address()) + object_size,
- MemoryAllocator::GetCommitPageSize());
- if (used_size < CommittedPhysicalMemory()) {
- return address() + used_size;
- }
- return 0;
-}
-
-void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
- DCHECK_NULL(this->sweeping_slot_set());
- RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
- RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
-}
-
-// -----------------------------------------------------------------------------
-// LargeObjectSpaceObjectIterator
-
-LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
- LargeObjectSpace* space) {
- current_ = space->first_page();
-}
-
-HeapObject LargeObjectSpaceObjectIterator::Next() {
- if (current_ == nullptr) return HeapObject();
-
- HeapObject object = current_->GetObject();
- current_ = current_->next_page();
- return object;
-}
-
-// -----------------------------------------------------------------------------
-// OldLargeObjectSpace
-
-LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id, new NoFreeList()),
- size_(0),
- page_count_(0),
- objects_size_(0) {}
-
-void LargeObjectSpace::TearDown() {
- while (!memory_chunk_list_.Empty()) {
- LargePage* page = first_page();
- LOG(heap()->isolate(),
- DeleteEvent("LargeObjectChunk",
- reinterpret_cast<void*>(page->address())));
- memory_chunk_list_.Remove(page);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
- }
-}
-
-AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
- return AllocateRaw(object_size, NOT_EXECUTABLE);
-}
-
-AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
- Executability executable) {
- // Check if we want to force a GC before growing the old space further.
- // If so, fail the allocation.
- if (!heap()->CanExpandOldGeneration(object_size) ||
- !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
- return AllocationResult::Retry(identity());
- }
-
- LargePage* page = AllocateLargePage(object_size, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
- page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- HeapObject object = page->GetObject();
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
- if (heap()->incremental_marking()->black_allocation()) {
- heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
- }
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(object));
- page->InitializationMemoryFence();
- heap()->NotifyOldGenerationExpansion();
- AllocationStep(object_size, object.address(), object_size);
- return object;
-}
-
-LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
- Executability executable) {
- LargePage* page = heap()->memory_allocator()->AllocateLargePage(
- object_size, this, executable);
- if (page == nullptr) return nullptr;
- DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
-
- AddPage(page, object_size);
-
- HeapObject object = page->GetObject();
-
- heap()->CreateFillerObjectAt(object.address(), object_size,
- ClearRecordedSlots::kNo);
- return page;
-}
-
-size_t LargeObjectSpace::CommittedPhysicalMemory() {
- // On a platform that provides lazy committing of memory, we over-account
- // the actually committed memory. There is no easy way right now to support
- // precise accounting of committed memory in large object space.
- return CommittedMemory();
-}
-
-LargePage* CodeLargeObjectSpace::FindPage(Address a) {
- const Address key = MemoryChunk::FromAddress(a)->address();
- auto it = chunk_map_.find(key);
- if (it != chunk_map_.end()) {
- LargePage* page = it->second;
- CHECK(page->Contains(a));
- return page;
- }
- return nullptr;
-}
-
-void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- LargeObjectSpaceObjectIterator it(this);
- for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- if (marking_state->IsBlackOrGrey(obj)) {
- Marking::MarkWhite(marking_state->MarkBitFrom(obj));
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- chunk->ResetProgressBar();
- marking_state->SetLiveBytes(chunk, 0);
- }
- DCHECK(marking_state->IsWhite(obj));
- }
-}
-
-void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
- for (Address current = reinterpret_cast<Address>(page);
- current < reinterpret_cast<Address>(page) + page->size();
- current += MemoryChunk::kPageSize) {
- chunk_map_[current] = page;
- }
-}
-
-void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
- for (Address current = page->address();
- current < reinterpret_cast<Address>(page) + page->size();
- current += MemoryChunk::kPageSize) {
- chunk_map_.erase(current);
- }
-}
-
-void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
- DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
- DCHECK(page->IsLargePage());
- DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
- DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
- size_t object_size = static_cast<size_t>(page->GetObject().Size());
- static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
- page->ClearFlag(MemoryChunk::FROM_PAGE);
- page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- AddPage(page, object_size);
-}
-
-void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
- size_ += static_cast<int>(page->size());
- AccountCommitted(page->size());
- objects_size_ += object_size;
- page_count_++;
- memory_chunk_list_.PushBack(page);
- page->set_owner(this);
-}
-
-void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
- size_ -= static_cast<int>(page->size());
- AccountUncommitted(page->size());
- objects_size_ -= object_size;
- page_count_--;
- memory_chunk_list_.Remove(page);
- page->set_owner(nullptr);
-}
-
-void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* current = first_page();
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- // Right-trimming does not update the objects_size_ counter. We are lazily
- // updating it after every GC.
- size_t surviving_object_size = 0;
- while (current) {
- LargePage* next_current = current->next_page();
- HeapObject object = current->GetObject();
- DCHECK(!marking_state->IsGrey(object));
- size_t size = static_cast<size_t>(object.Size());
- if (marking_state->IsBlack(object)) {
- Address free_start;
- surviving_object_size += size;
- if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
- 0) {
- DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
- current->ClearOutOfLiveRangeSlots(free_start);
- const size_t bytes_to_free =
- current->size() - (free_start - current->address());
- heap()->memory_allocator()->PartialFreeMemory(
- current, free_start, bytes_to_free,
- current->area_start() + object.Size());
- size_ -= bytes_to_free;
- AccountUncommitted(bytes_to_free);
- }
- } else {
- RemovePage(current, size);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
- current);
- }
- current = next_current;
- }
- objects_size_ = surviving_object_size;
-}
-
-bool LargeObjectSpace::Contains(HeapObject object) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
-
- bool owned = (chunk->owner() == this);
-
- SLOW_DCHECK(!owned || ContainsSlow(object.address()));
-
- return owned;
-}
-
-bool LargeObjectSpace::ContainsSlow(Address addr) {
- for (LargePage* page : *this) {
- if (page->Contains(addr)) return true;
- }
- return false;
-}
-
-std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
- Heap* heap) {
- return std::unique_ptr<ObjectIterator>(
- new LargeObjectSpaceObjectIterator(this));
-}
-
-#ifdef VERIFY_HEAP
-// We do not assume that the large object iterator works, because it depends
-// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify(Isolate* isolate) {
- size_t external_backing_store_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (LargePage* chunk = first_page(); chunk != nullptr;
- chunk = chunk->next_page()) {
- // Each chunk contains an object that starts at the large object page's
- // object area start.
- HeapObject object = chunk->GetObject();
- Page* page = Page::FromHeapObject(object);
- CHECK(object.address() == page->area_start());
-
- // The first word should be a map, and we expect all map pointers to be
- // in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
-
- // We have only the following types in the large object space:
- if (!(object.IsAbstractCode() || object.IsSeqString() ||
- object.IsExternalString() || object.IsThinString() ||
- object.IsFixedArray() || object.IsFixedDoubleArray() ||
- object.IsWeakFixedArray() || object.IsWeakArrayList() ||
- object.IsPropertyArray() || object.IsByteArray() ||
- object.IsFeedbackVector() || object.IsBigInt() ||
- object.IsFreeSpace() || object.IsFeedbackMetadata() ||
- object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
- object.IsPreparseData()) &&
- !FLAG_young_generation_large_objects) {
- FATAL("Found invalid Object (instance_type=%i) in large object space.",
- object.map().instance_type());
- }
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- if (!FLAG_verify_heap_skip_remembered_set) {
- heap()->VerifyRememberedSetFor(object);
- }
-
- // Byte arrays and strings don't have interior pointers.
- if (object.IsAbstractCode()) {
- VerifyPointersVisitor code_visitor(heap());
- object.IterateBody(map, object.Size(), &code_visitor);
- } else if (object.IsFixedArray()) {
- FixedArray array = FixedArray::cast(object);
- for (int j = 0; j < array.length(); j++) {
- Object element = array.get(j);
- if (element.IsHeapObject()) {
- HeapObject element_object = HeapObject::cast(element);
- CHECK(IsValidHeapObject(heap(), element_object));
- CHECK(element_object.map().IsMap());
- }
- }
- } else if (object.IsPropertyArray()) {
- PropertyArray array = PropertyArray::cast(object);
- for (int j = 0; j < array.length(); j++) {
- Object property = array.get(j);
- if (property.IsHeapObject()) {
- HeapObject property_object = HeapObject::cast(property);
- CHECK(heap()->Contains(property_object));
- CHECK(property_object.map().IsMap());
- }
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
- }
-}
-#endif
-
-#ifdef DEBUG
-void LargeObjectSpace::Print() {
- StdoutStream os;
- LargeObjectSpaceObjectIterator it(this);
- for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- obj.Print(os);
- }
-}
-#endif // DEBUG
-
-OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
- : LargeObjectSpace(heap, LO_SPACE) {}
-
-OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
- : LargeObjectSpace(heap, id) {}
-
-void OldLargeObjectSpace::MergeOffThreadSpace(
- OffThreadLargeObjectSpace* other) {
- DCHECK(identity() == other->identity());
-
- while (!other->memory_chunk_list().Empty()) {
- LargePage* page = other->first_page();
- HeapObject object = page->GetObject();
- int size = object.Size();
- other->RemovePage(page, size);
- AddPage(page, size);
-
- AllocationStepAfterMerge(object.address(), size);
- if (heap()->incremental_marking()->black_allocation()) {
- heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
- }
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(object));
- }
-
- heap()->NotifyOffThreadSpaceMerged();
-}
-
-NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
- : LargeObjectSpace(heap, NEW_LO_SPACE),
- pending_object_(0),
- capacity_(capacity) {}
-
-AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
- // Do not allocate more objects if promoting the existing object would exceed
- // the old generation capacity.
- if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
- return AllocationResult::Retry(identity());
- }
-
- // Allocation for the first object must succeed independent from the capacity.
- if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
- return AllocationResult::Retry(identity());
- }
-
- LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
- if (page == nullptr) return AllocationResult::Retry(identity());
-
- // The size of the first object may exceed the capacity.
- capacity_ = Max(capacity_, SizeOfObjects());
-
- HeapObject result = page->GetObject();
- page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- page->SetFlag(MemoryChunk::TO_PAGE);
- pending_object_.store(result.address(), std::memory_order_relaxed);
-#ifdef ENABLE_MINOR_MC
- if (FLAG_minor_mc) {
- page->AllocateYoungGenerationBitmap();
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
- }
-#endif // ENABLE_MINOR_MC
- page->InitializationMemoryFence();
- DCHECK(page->IsLargePage());
- DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
- AllocationStep(object_size, result.address(), object_size);
- return result;
-}
-
-size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
-
-void NewLargeObjectSpace::Flip() {
- for (LargePage* chunk = first_page(); chunk != nullptr;
- chunk = chunk->next_page()) {
- chunk->SetFlag(MemoryChunk::FROM_PAGE);
- chunk->ClearFlag(MemoryChunk::TO_PAGE);
- }
-}
-
-void NewLargeObjectSpace::FreeDeadObjects(
- const std::function<bool(HeapObject)>& is_dead) {
- bool is_marking = heap()->incremental_marking()->IsMarking();
- size_t surviving_object_size = 0;
- bool freed_pages = false;
- for (auto it = begin(); it != end();) {
- LargePage* page = *it;
- it++;
- HeapObject object = page->GetObject();
- size_t size = static_cast<size_t>(object.Size());
- if (is_dead(object)) {
- freed_pages = true;
- RemovePage(page, size);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
- if (FLAG_concurrent_marking && is_marking) {
- heap()->concurrent_marking()->ClearMemoryChunkData(page);
- }
- } else {
- surviving_object_size += size;
- }
- }
- // Right-trimming does not update the objects_size_ counter. We are lazily
- // updating it after every GC.
- objects_size_ = surviving_object_size;
- if (freed_pages) {
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- }
-}
-
-void NewLargeObjectSpace::SetCapacity(size_t capacity) {
- capacity_ = Max(capacity, SizeOfObjects());
-}
-
-CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
- : OldLargeObjectSpace(heap, CODE_LO_SPACE),
- chunk_map_(kInitialChunkMapCapacity) {}
-
-AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
- return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
-}
-
-void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
- OldLargeObjectSpace::AddPage(page, object_size);
- InsertChunkMapEntries(page);
- heap()->isolate()->AddCodeMemoryChunk(page);
-}
-
-void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
- RemoveChunkMapEntries(page);
- heap()->isolate()->RemoveCodeMemoryChunk(page);
- OldLargeObjectSpace::RemovePage(page, object_size);
-}
-
-OffThreadLargeObjectSpace::OffThreadLargeObjectSpace(Heap* heap)
- : LargeObjectSpace(heap, LO_SPACE) {
-#ifdef V8_ENABLE_THIRD_PARTY_HEAP
- // OffThreadLargeObjectSpace doesn't work with third-party heap.
- UNREACHABLE();
-#endif
-}
-
-AllocationResult OffThreadLargeObjectSpace::AllocateRaw(int object_size) {
- LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
- if (page == nullptr) return AllocationResult::Retry(identity());
-
- return page->GetObject();
-}
-
-void OffThreadLargeObjectSpace::FreeUnmarkedObjects() {
- // We should never try to free objects in this space.
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 8d587443e3..72ae96cadd 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -17,15 +17,17 @@
#include "src/base/bounded-page-allocator.h"
#include "src/base/export-template.h"
#include "src/base/iterator.h"
-#include "src/base/list.h"
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
+#include "src/heap/list.h"
#include "src/heap/marking.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/slot-set.h"
#include "src/objects/free-space.h"
#include "src/objects/heap-object.h"
@@ -50,6 +52,7 @@ class CompactionSpaceCollection;
class FreeList;
class Isolate;
class LargeObjectSpace;
+class LargePage;
class LinearAllocationArea;
class LocalArrayBufferTracker;
class LocalSpace;
@@ -496,7 +499,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
- base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+ heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
FreeList* free_list() { return free_list_.get(); }
@@ -515,7 +518,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
- base::List<MemoryChunk> memory_chunk_list_;
+ heap::List<MemoryChunk> memory_chunk_list_;
// Tracks off-heap memory used by this space.
std::atomic<size_t>* external_backing_store_bytes_;
@@ -525,7 +528,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
AllocationSpace id_;
// Keeps track of committed memory in a space.
- size_t committed_;
+ std::atomic<size_t> committed_;
size_t max_committed_;
std::unique_ptr<FreeList> free_list_;
@@ -551,451 +554,6 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry {
std::set<Address> code_object_registry_newly_allocated_;
};
-class V8_EXPORT_PRIVATE MemoryChunkLayout {
- public:
- static size_t CodePageGuardStartOffset();
- static size_t CodePageGuardSize();
- static intptr_t ObjectStartOffsetInCodePage();
- static intptr_t ObjectEndOffsetInCodePage();
- static size_t AllocatableMemoryInCodePage();
- static intptr_t ObjectStartOffsetInDataPage();
- static size_t AllocatableMemoryInDataPage();
- static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
- static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
-};
-
-// MemoryChunk represents a memory region owned by a specific space.
-// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accommodate
-// any heap object.
-class MemoryChunk : public BasicMemoryChunk {
- public:
- // Use with std data structures.
- struct Hasher {
- size_t operator()(MemoryChunk* const chunk) const {
- return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
- }
- };
-
- using Flags = uintptr_t;
-
- static const Flags kPointersToHereAreInterestingMask =
- POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const Flags kPointersFromHereAreInterestingMask =
- POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
-
- static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
-
- static const Flags kIsLargePageMask = LARGE_PAGE;
-
- static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInYoungGenerationMask;
-
- // |kDone|: The page state when sweeping is complete or sweeping must not be
- // performed on that page. Sweeper threads that are done with their work
- // will set this value and not touch the page anymore.
- // |kPending|: This page is ready for parallel sweeping.
- // |kInProgress|: This page is currently swept by a sweeper thread.
- enum class ConcurrentSweepingState : intptr_t {
- kDone,
- kPending,
- kInProgress,
- };
-
- static const size_t kHeaderSize =
- BasicMemoryChunk::kHeaderSize // Parent size.
- + 3 * kSystemPointerSize // VirtualMemory reservation_
- + kSystemPointerSize // Address owner_
- + kSizetSize // size_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
- + kSystemPointerSize // SlotSet* sweeping_slot_set_
- + kSystemPointerSize *
- NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
- + kSystemPointerSize *
- NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
- + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
- + kSystemPointerSize // base::Mutex* mutex_
- + kSystemPointerSize // std::atomic<ConcurrentSweepingState>
- // concurrent_sweeping_
- + kSystemPointerSize // base::Mutex* page_protection_change_mutex_
- + kSystemPointerSize // unitptr_t write_unprotect_counter_
- + kSizetSize * ExternalBackingStoreType::kNumTypes
- // std::atomic<size_t> external_backing_store_bytes_
- + kSizetSize // size_t allocated_bytes_
- + kSizetSize // size_t wasted_memory_
- + kSystemPointerSize * 2 // base::ListNode
- + kSystemPointerSize // FreeListCategory** categories__
- + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
- + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
- + kSystemPointerSize // Bitmap* young_generation_bitmap_
- + kSystemPointerSize // CodeObjectRegistry* code_object_registry_
- + kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
-
- // Page size in bytes. This must be a multiple of the OS page size.
- static const int kPageSize = 1 << kPageSizeBits;
-
- // Maximum number of nested code memory modification scopes.
- static const int kMaxWriteUnprotectCounter = 3;
-
- // Only works if the pointer is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromAddress(Address a) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
- return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
- }
- // Only works if the object is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromHeapObject(HeapObject o) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
- return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
- }
-
- void SetOldGenerationPageFlags(bool is_marking);
- void SetYoungGenerationPageFlags(bool is_marking);
-
- static inline void UpdateHighWaterMark(Address mark) {
- if (mark == kNullAddress) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationAreaAddress.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
- intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
- while ((new_mark > old_mark) &&
- !chunk->high_water_mark_.compare_exchange_weak(
- old_mark, new_mark, std::memory_order_acq_rel)) {
- }
- }
-
- static inline void MoveExternalBackingStoreBytes(
- ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
- size_t amount);
-
- void DiscardUnusedMemory(Address addr, size_t size);
-
- base::Mutex* mutex() { return mutex_; }
-
- void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
- concurrent_sweeping_ = state;
- }
-
- ConcurrentSweepingState concurrent_sweeping_state() {
- return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
- }
-
- bool SweepingDone() {
- return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
- }
-
- inline Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
-#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race in
- // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
- // release store.
- void SynchronizedHeapLoad();
-#endif
-
- template <RememberedSetType type>
- bool ContainsSlots() {
- return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
- invalidated_slots<type>() != nullptr;
- }
-
- template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
- SlotSet* slot_set() {
- if (access_mode == AccessMode::ATOMIC)
- return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
- return slot_set_[type];
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- SlotSet* sweeping_slot_set() {
- if (access_mode == AccessMode::ATOMIC)
- return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
- return sweeping_slot_set_;
- }
-
- template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
- TypedSlotSet* typed_slot_set() {
- if (access_mode == AccessMode::ATOMIC)
- return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
- return typed_slot_set_[type];
- }
-
- template <RememberedSetType type>
- V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
- SlotSet* AllocateSweepingSlotSet();
- SlotSet* AllocateSlotSet(SlotSet** slot_set);
-
- // Not safe to be called concurrently.
- template <RememberedSetType type>
- void ReleaseSlotSet();
- void ReleaseSlotSet(SlotSet** slot_set);
- void ReleaseSweepingSlotSet();
- template <RememberedSetType type>
- TypedSlotSet* AllocateTypedSlotSet();
- // Not safe to be called concurrently.
- template <RememberedSetType type>
- void ReleaseTypedSlotSet();
-
- template <RememberedSetType type>
- InvalidatedSlots* AllocateInvalidatedSlots();
- template <RememberedSetType type>
- void ReleaseInvalidatedSlots();
- template <RememberedSetType type>
- V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
- void InvalidateRecordedSlots(HeapObject object);
- template <RememberedSetType type>
- bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
- template <RememberedSetType type>
- InvalidatedSlots* invalidated_slots() {
- return invalidated_slots_[type];
- }
-
- void ReleaseLocalTracker();
-
- void AllocateYoungGenerationBitmap();
- void ReleaseYoungGenerationBitmap();
-
- int FreeListsLength();
-
- // Approximate amount of physical memory committed for this chunk.
- V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
-
- Address HighWaterMark() { return address() + high_water_mark_; }
-
- size_t ProgressBar() {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.load(std::memory_order_acquire);
- }
-
- bool TrySetProgressBar(size_t old_value, size_t new_value) {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.compare_exchange_strong(old_value, new_value,
- std::memory_order_acq_rel);
- }
-
- void ResetProgressBar() {
- if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- progress_bar_.store(0, std::memory_order_release);
- }
- }
-
- inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
- return external_backing_store_bytes_[type];
- }
-
- // Some callers rely on the fact that this can operate on both
- // tagged and aligned object addresses.
- inline uint32_t AddressToMarkbitIndex(Address addr) const {
- return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
- }
-
- inline Address MarkbitIndexToAddress(uint32_t index) const {
- return this->address() + (index << kTaggedSizeLog2);
- }
-
- bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
-
- void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
-
- bool CanAllocate() {
- return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsEvacuationCandidate() {
- DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
- IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
- return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool ShouldSkipEvacuationSlotRecording() {
- uintptr_t flags = GetFlags<access_mode>();
- return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
- ((flags & COMPACTION_WAS_ABORTED) == 0);
- }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
- bool IsToPage() const { return IsFlagSet(TO_PAGE); }
- bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
- bool InYoungGeneration() const {
- return (GetFlags() & kIsInYoungGenerationMask) != 0;
- }
- bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
- bool InNewLargeObjectSpace() const {
- return InYoungGeneration() && IsLargePage();
- }
- bool InOldSpace() const;
- V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
-
- // Gets the chunk's owner or null if the space has been detached.
- Space* owner() const { return owner_; }
-
- void set_owner(Space* space) { owner_ = space; }
-
- bool IsWritable() const {
- // If this is a read-only space chunk but heap_ is non-null, it has not yet
- // been sealed and can be written to.
- return !InReadOnlySpace() || heap_ != nullptr;
- }
-
- // Gets the chunk's allocation space, potentially dealing with a null owner_
- // (like read-only chunks have).
- inline AllocationSpace owner_identity() const;
-
- // Emits a memory barrier. For TSAN builds the other thread needs to perform
- // MemoryChunk::synchronized_heap() to simulate the barrier.
- void InitializationMemoryFence();
-
- V8_EXPORT_PRIVATE void SetReadable();
- V8_EXPORT_PRIVATE void SetReadAndExecutable();
- V8_EXPORT_PRIVATE void SetReadAndWritable();
-
- void SetDefaultCodePermissions() {
- if (FLAG_jitless) {
- SetReadable();
- } else {
- SetReadAndExecutable();
- }
- }
-
- base::ListNode<MemoryChunk>& list_node() { return list_node_; }
-
- CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
-
- FreeList* free_list() { return owner()->free_list(); }
-
- PossiblyEmptyBuckets* possibly_empty_buckets() {
- return &possibly_empty_buckets_;
- }
-
- protected:
- static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner,
- VirtualMemory reservation);
-
- // Release all memory allocated by the chunk. Should be called when memory
- // chunk is about to be freed.
- void ReleaseAllAllocatedMemory();
- // Release memory allocated by the chunk, except that which is needed by
- // read-only space chunks.
- void ReleaseAllocatedMemoryNeededForWritableChunk();
-
- // Sets the requested page permissions only if the write unprotect counter
- // has reached 0.
- void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
- PageAllocator::Permission permission);
-
- VirtualMemory* reserved_memory() { return &reservation_; }
-
- template <AccessMode mode>
- ConcurrentBitmap<mode>* marking_bitmap() const {
- return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
- }
-
- template <AccessMode mode>
- ConcurrentBitmap<mode>* young_generation_bitmap() const {
- return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
- }
-
- // If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
-
- // The space owning this memory chunk.
- std::atomic<Space*> owner_;
-
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- std::atomic<size_t> progress_bar_;
-
- // Count of bytes marked black on page.
- intptr_t live_byte_count_;
-
- // A single slot set for small pages (of size kPageSize) or an array of slot
- // set for large pages. In the latter case the number of entries in the array
- // is ceil(size() / kPageSize).
- SlotSet* sweeping_slot_set_;
- TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
-
- // Assuming the initial allocation on a page is sequential,
- // count highest number of bytes ever allocated on the page.
- std::atomic<intptr_t> high_water_mark_;
-
- base::Mutex* mutex_;
-
- std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
-
- base::Mutex* page_protection_change_mutex_;
-
- // This field is only relevant for code pages. It depicts the number of
- // times a component requested this page to be read+writeable. The
- // counter is decremented when a component resets to read+executable.
- // If Value() == 0 => The memory is read and executable.
- // If Value() >= 1 => The Memory is read and writable (and maybe executable).
- // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
- // excessive nesting of scopes.
- // All executable MemoryChunks are allocated rw based on the assumption that
- // they will be used immediatelly for an allocation. They are initialized
- // with the number of open CodeSpaceMemoryModificationScopes. The caller
- // that triggers the page allocation is responsible for decrementing the
- // counter.
- uintptr_t write_unprotect_counter_;
-
- // Byte allocated on the page, which includes all objects on the page
- // and the linear allocation area.
- size_t allocated_bytes_;
-
- // Tracks off-heap memory used by this memory chunk.
- std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
-
- // Freed memory that was not added to the free list.
- size_t wasted_memory_;
-
- base::ListNode<MemoryChunk> list_node_;
-
- FreeListCategory** categories_;
-
- LocalArrayBufferTracker* local_tracker_;
-
- std::atomic<intptr_t> young_generation_live_byte_count_;
- Bitmap* young_generation_bitmap_;
-
- CodeObjectRegistry* code_object_registry_;
-
- PossiblyEmptyBuckets possibly_empty_buckets_;
-
- private:
- void InitializeReservedMemory() { reservation_.Reset(); }
-
- friend class ConcurrentMarkingState;
- friend class MajorMarkingState;
- friend class MajorAtomicMarkingState;
- friend class MajorNonAtomicMarkingState;
- friend class MemoryAllocator;
- friend class MinorMarkingState;
- friend class MinorNonAtomicMarkingState;
- friend class PagedSpace;
-};
-
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
@@ -1053,7 +611,8 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
+ for (int i = kFirstCategory;
+ i < owner()->free_list()->number_of_categories(); i++) {
callback(categories_[i]);
}
}
@@ -1115,51 +674,9 @@ class Page : public MemoryChunk {
friend class MemoryAllocator;
};
-class ReadOnlyPage : public Page {
- public:
- // Clears any pointers in the header that point out of the page that would
- // otherwise make the header non-relocatable.
- void MakeHeaderRelocatable();
-
- private:
- friend class ReadOnlySpace;
-};
-
-class LargePage : public MemoryChunk {
- public:
- // A limit to guarantee that we do not overflow typed slot offset in
- // the old to old remembered set.
- // Note that this limit is higher than what assembler already imposes on
- // x64 and ia32 architectures.
- static const int kMaxCodePageSize = 512 * MB;
-
- static LargePage* FromHeapObject(HeapObject o) {
- return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
- }
-
- inline HeapObject GetObject();
-
- inline LargePage* next_page() {
- return static_cast<LargePage*>(list_node_.next());
- }
-
- // Uncommit memory that is not in use anymore by the object. If the object
- // cannot be shrunk 0 is returned.
- Address GetAddressToShrink(Address object_address, size_t object_size);
-
- void ClearOutOfLiveRangeSlots(Address free_start);
-
- private:
- static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable);
-
- friend class MemoryAllocator;
-};
-
// Validate our estimates on the header size.
STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
-STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
// The process-wide singleton that keeps track of code range regions with the
@@ -1704,6 +1221,16 @@ class AllocationStats {
public:
AllocationStats() { Clear(); }
+ AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
+ capacity_ = stats.capacity_.load();
+ max_capacity_ = stats.max_capacity_;
+ size_.store(stats.size_);
+#ifdef DEBUG
+ allocated_on_page_ = stats.allocated_on_page_;
+#endif
+ return *this;
+ }
+
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
@@ -1727,8 +1254,11 @@ class AllocationStats {
#endif
void IncreaseAllocatedBytes(size_t bytes, Page* page) {
- DCHECK_GE(size_ + bytes, size_);
- size_ += bytes;
+#ifdef DEBUG
+ size_t size = size_;
+ DCHECK_GE(size + bytes, size);
+#endif
+ size_.fetch_add(bytes);
#ifdef DEBUG
allocated_on_page_[page] += bytes;
#endif
@@ -1736,7 +1266,7 @@ class AllocationStats {
void DecreaseAllocatedBytes(size_t bytes, Page* page) {
DCHECK_GE(size_, bytes);
- size_ -= bytes;
+ size_.fetch_sub(bytes);
#ifdef DEBUG
DCHECK_GE(allocated_on_page_[page], bytes);
allocated_on_page_[page] -= bytes;
@@ -1768,7 +1298,7 @@ class AllocationStats {
size_t max_capacity_;
// |size_|: The number of allocated bytes.
- size_t size_;
+ std::atomic<size_t> size_;
#ifdef DEBUG
std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
@@ -1810,7 +1340,7 @@ class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
inline Page* GetPageForSize(size_t size_in_bytes) override;
FreeListLegacy();
- ~FreeListLegacy();
+ ~FreeListLegacy() override;
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
@@ -1894,7 +1424,7 @@ class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
inline Page* GetPageForSize(size_t size_in_bytes) override;
FreeListFastAlloc();
- ~FreeListFastAlloc();
+ ~FreeListFastAlloc() override;
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
@@ -1940,7 +1470,7 @@ class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
Page* GetPageForSize(size_t size_in_bytes) override;
FreeListMany();
- ~FreeListMany();
+ ~FreeListMany() override;
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
@@ -2136,7 +1666,7 @@ class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
Page* GetPageForSize(size_t size_in_bytes) override;
FreeListMap();
- ~FreeListMap();
+ ~FreeListMap() override;
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size,
@@ -2185,13 +1715,16 @@ class LocalAllocationBuffer {
AllocationResult result,
intptr_t size);
- ~LocalAllocationBuffer() { Close(); }
+ ~LocalAllocationBuffer() { CloseAndMakeIterable(); }
- // Convert to C++11 move-semantics once allowed by the style guide.
- LocalAllocationBuffer(const LocalAllocationBuffer& other) V8_NOEXCEPT;
- LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other)
+ LocalAllocationBuffer(const LocalAllocationBuffer& other) = delete;
+ V8_EXPORT_PRIVATE LocalAllocationBuffer(LocalAllocationBuffer&& other)
V8_NOEXCEPT;
+ LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other) = delete;
+ V8_EXPORT_PRIVATE LocalAllocationBuffer& operator=(
+ LocalAllocationBuffer&& other) V8_NOEXCEPT;
+
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
@@ -2204,7 +1737,8 @@ class LocalAllocationBuffer {
inline bool TryFreeLast(HeapObject object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
- V8_EXPORT_PRIVATE LinearAllocationArea Close();
+ V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
+ void MakeIterable();
private:
V8_EXPORT_PRIVATE LocalAllocationBuffer(
@@ -2355,6 +1889,15 @@ class V8_EXPORT_PRIVATE PagedSpace
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
+ // Allocate the requested number of bytes in the space from a background
+ // thread.
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
@@ -2576,6 +2119,12 @@ class V8_EXPORT_PRIVATE PagedSpace
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
Executability executable_;
LocalSpaceKind local_space_kind_;
@@ -2588,6 +2137,9 @@ class V8_EXPORT_PRIVATE PagedSpace
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
+ // Mutex guarding concurrent allocation.
+ base::Mutex allocation_mutex_;
+
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -3204,228 +2756,6 @@ class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
void RefillFreeList() override;
};
-// -----------------------------------------------------------------------------
-// Read Only space for all Immortal Immovable and Immutable objects
-
-class ReadOnlySpace : public PagedSpace {
- public:
- explicit ReadOnlySpace(Heap* heap);
-
- // TODO(v8:7464): Remove this once PagedSpace::Unseal no longer writes to
- // memory_chunk_list_.
- ~ReadOnlySpace() override { Unseal(); }
-
- bool writable() const { return !is_marked_read_only_; }
-
- bool Contains(Address a) = delete;
- bool Contains(Object o) = delete;
-
- V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
-
- enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
-
- // Seal the space by marking it read-only, optionally detaching it
- // from the heap and forgetting it for memory bookkeeping purposes (e.g.
- // prevent space's memory from registering as leaked).
- void Seal(SealMode ro_mode);
-
- // During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- void RepairFreeListsAfterDeserialization();
-
- size_t Available() override { return 0; }
-
- private:
- // Unseal the space after is has been sealed, by making it writable.
- // TODO(v8:7464): Only possible if the space hasn't been detached.
- void Unseal();
- void SetPermissionsForPages(MemoryAllocator* memory_allocator,
- PageAllocator::Permission access);
-
- bool is_marked_read_only_ = false;
-
- //
- // String padding must be cleared just before serialization and therefore the
- // string padding in the space will already have been cleared if the space was
- // deserialized.
- bool is_string_padding_cleared_;
-};
-
-// -----------------------------------------------------------------------------
-// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
-// managed by the large object space.
-// Large objects do not move during garbage collections.
-
-class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
- public:
- using iterator = LargePageIterator;
-
- ~LargeObjectSpace() override { TearDown(); }
-
- // Releases internal resources, frees objects in this space.
- void TearDown();
-
- // Available bytes for objects in this space.
- size_t Available() override;
-
- size_t Size() override { return size_; }
- size_t SizeOfObjects() override { return objects_size_; }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
-
- int PageCount() { return page_count_; }
-
- // Frees unmarked objects.
- virtual void FreeUnmarkedObjects();
-
- // Checks whether a heap object is in this space; O(1).
- bool Contains(HeapObject obj);
- // Checks whether an address is in the object area in this space. Iterates
- // all objects in the space. May be slow.
- bool ContainsSlow(Address addr);
-
- // Checks whether the space is empty.
- bool IsEmpty() { return first_page() == nullptr; }
-
- virtual void AddPage(LargePage* page, size_t object_size);
- virtual void RemovePage(LargePage* page, size_t object_size);
-
- LargePage* first_page() {
- return reinterpret_cast<LargePage*>(Space::first_page());
- }
-
- iterator begin() { return iterator(first_page()); }
- iterator end() { return iterator(nullptr); }
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
- virtual bool is_off_thread() const { return false; }
-
-#ifdef VERIFY_HEAP
- virtual void Verify(Isolate* isolate);
-#endif
-
-#ifdef DEBUG
- void Print() override;
-#endif
-
- protected:
- LargeObjectSpace(Heap* heap, AllocationSpace id);
-
- LargePage* AllocateLargePage(int object_size, Executability executable);
-
- size_t size_; // allocated bytes
- int page_count_; // number of chunks
- size_t objects_size_; // size of objects
-
- private:
- friend class LargeObjectSpaceObjectIterator;
-};
-
-class OffThreadLargeObjectSpace;
-
-class OldLargeObjectSpace : public LargeObjectSpace {
- public:
- explicit OldLargeObjectSpace(Heap* heap);
-
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRaw(int object_size);
-
- // Clears the marking state of live objects.
- void ClearMarkingStateOfLiveObjects();
-
- void PromoteNewLargeObject(LargePage* page);
-
- V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
-
- protected:
- explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
- Executability executable);
-};
-
-class NewLargeObjectSpace : public LargeObjectSpace {
- public:
- NewLargeObjectSpace(Heap* heap, size_t capacity);
-
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRaw(int object_size);
-
- // Available bytes for objects in this space.
- size_t Available() override;
-
- void Flip();
-
- void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
-
- void SetCapacity(size_t capacity);
-
- // The last allocated object that is not guaranteed to be initialized when
- // the concurrent marker visits it.
- Address pending_object() {
- return pending_object_.load(std::memory_order_relaxed);
- }
-
- void ResetPendingObject() { pending_object_.store(0); }
-
- private:
- std::atomic<Address> pending_object_;
- size_t capacity_;
-};
-
-class CodeLargeObjectSpace : public OldLargeObjectSpace {
- public:
- explicit CodeLargeObjectSpace(Heap* heap);
-
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRaw(int object_size);
-
- // Finds a large object page containing the given address, returns nullptr
- // if such a page doesn't exist.
- LargePage* FindPage(Address a);
-
- protected:
- void AddPage(LargePage* page, size_t object_size) override;
- void RemovePage(LargePage* page, size_t object_size) override;
-
- private:
- static const size_t kInitialChunkMapCapacity = 1024;
- void InsertChunkMapEntries(LargePage* page);
- void RemoveChunkMapEntries(LargePage* page);
-
- // Page-aligned addresses to their corresponding LargePage.
- std::unordered_map<Address, LargePage*> chunk_map_;
-};
-
-class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
- public:
- explicit OffThreadLargeObjectSpace(Heap* heap);
-
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
-
- void FreeUnmarkedObjects() override;
-
- bool is_off_thread() const override { return true; }
-
- protected:
- // OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
- friend class OldLargeObjectSpace;
-
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
- Executability executable);
-};
-
-class LargeObjectSpaceObjectIterator : public ObjectIterator {
- public:
- explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
-
- HeapObject Next() override;
-
- private:
- LargePage* current_;
-};
-
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 67062503d3..155b970ef6 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -9,7 +9,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -247,6 +247,78 @@ void Sweeper::EnsureCompleted() {
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
+V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
+ Address free_start, Address free_end, Page* page, Space* space,
+ bool non_empty_typed_slots, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode) {
+ CHECK_GT(free_end, free_start);
+ size_t freed_bytes = 0;
+ size_t size = static_cast<size_t>(free_end - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ ZapCode(free_start, size);
+ }
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
+
+ } else {
+ Heap::CreateFillerObjectAt(ReadOnlyRoots(page->heap()), free_start,
+ static_cast<int>(size),
+ ClearFreedMemoryMode::kClearFreedMemory);
+ }
+ if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
+
+ return freed_bytes;
+}
+
+V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
+ Address free_start, Address free_end, Page* page,
+ bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
+ InvalidatedSlotsCleanup* old_to_new_cleanup) {
+ DCHECK_LE(free_start, free_end);
+ RememberedSetSweeping::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ if (non_empty_typed_slots) {
+ free_ranges_map->insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - page->address()),
+ static_cast<uint32_t>(free_end - page->address())));
+ }
+
+ old_to_new_cleanup->Free(free_start, free_end);
+}
+
+void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
+ Page* page, const FreeRangesMap& free_ranges_map) {
+ if (!free_ranges_map.empty()) {
+ TypedSlotSet* old_to_new = page->typed_slot_set<OLD_TO_NEW>();
+ if (old_to_new != nullptr) {
+ old_to_new->ClearInvalidSlots(free_ranges_map);
+ }
+ TypedSlotSet* old_to_old = page->typed_slot_set<OLD_TO_OLD>();
+ if (old_to_old != nullptr) {
+ old_to_old->ClearInvalidSlots(free_ranges_map);
+ }
+ }
+}
+
+void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
+ Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode) {
+ marking_state_->bitmap(page)->Clear();
+ if (free_list_mode == IGNORE_FREE_LIST) {
+ marking_state_->SetLiveBytes(page, 0);
+ // We did not free memory, so have to adjust allocated bytes here.
+ intptr_t freed_bytes = page->area_size() - live_bytes;
+ page->DecreaseAllocatedBytes(freed_bytes);
+ } else {
+ // Keep the old live bytes counter of the page until RefillFreeList, where
+ // the space size is refined.
+ // The allocated_bytes() counter is precisely the total size of objects.
+ DCHECK_EQ(live_bytes, page->allocated_bytes());
+ }
+}
+
int Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
@@ -258,7 +330,26 @@ int Sweeper::RawSweep(
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+ // Phase 1: Prepare the page for sweeping.
+
+ // Before we sweep objects on the page, we free dead array buffers which
+ // requires valid mark bits.
+ ArrayBufferTracker::FreeDead(p, marking_state_);
+
+ // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
+ // counter. The free operations below will decrease allocated_bytes_ to actual
+ // live bytes and keep track of wasted_memory_.
+ p->ResetAllocationStatistics();
+
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
+ if (code_object_registry) code_object_registry->Clear();
+
+ // Phase 2: Free the non-live memory and clean-up the regular remembered set
+ // entires.
+
+ // Liveness and freeing statistics.
+ size_t live_bytes = 0;
+ size_t max_freed_bytes = 0;
// TODO(ulan): we don't have to clear type old-to-old slots in code space
// because the concurrent marker doesn't mark code objects. This requires
@@ -266,35 +357,21 @@ int Sweeper::RawSweep(
bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
p->typed_slot_set<OLD_TO_OLD>() != nullptr;
- // The free ranges map is used for filtering typed slots.
- std::map<uint32_t, uint32_t> free_ranges;
-
- // Before we sweep objects on the page, we free dead array buffers which
- // requires valid mark bits.
- ArrayBufferTracker::FreeDead(p, marking_state_);
-
- Address free_start = p->area_start();
- InvalidatedSlotsCleanup old_to_new_cleanup =
- InvalidatedSlotsCleanup::NoCleanup(p);
-
// Clean invalidated slots during the final atomic pause. After resuming
// execution this isn't necessary, invalid old-to-new refs were already
// removed by mark compact's update pointers phase.
+ InvalidatedSlotsCleanup old_to_new_cleanup =
+ InvalidatedSlotsCleanup::NoCleanup(p);
if (invalidated_slots_in_free_space ==
FreeSpaceMayContainInvalidatedSlots::kYes)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
- intptr_t live_bytes = 0;
- intptr_t freed_bytes = 0;
- intptr_t max_freed_bytes = 0;
-
- // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
- // counter. The free operations below will decrease allocated_bytes_ to actual
- // live bytes and keep track of wasted_memory_.
- p->ResetAllocationStatistics();
-
- if (code_object_registry) code_object_registry->Clear();
+ // The free ranges map is used for filtering typed slots.
+ FreeRangesMap free_ranges_map;
+ // Iterate over the page using the live objects and free the memory before
+ // the given live object.
+ Address free_start = p->area_start();
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject const object = object_and_size.first;
@@ -303,32 +380,14 @@ int Sweeper::RawSweep(
DCHECK(marking_state_->IsBlack(object));
Address free_end = object.address();
if (free_end != free_start) {
- CHECK_GT(free_end, free_start);
- size_t size = static_cast<size_t>(free_end - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- ZapCode(free_start, size);
- }
- if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
- free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- } else {
- p->heap()->CreateFillerObjectAt(
- free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
- ClearFreedMemoryMode::kClearFreedMemory);
- }
- if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
- RememberedSetSweeping::RemoveRange(p, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
- SlotSet::KEEP_EMPTY_BUCKETS);
- if (non_empty_typed_slots) {
- free_ranges.insert(std::pair<uint32_t, uint32_t>(
- static_cast<uint32_t>(free_start - p->address()),
- static_cast<uint32_t>(free_end - p->address())));
- }
-
- old_to_new_cleanup.Free(free_start, free_end);
+ max_freed_bytes =
+ Max(max_freed_bytes,
+ FreeAndProcessFreedMemory(free_start, free_end, p, space,
+ non_empty_typed_slots, free_list_mode,
+ free_space_mode));
+ CleanupRememberedSetEntriesForFreedMemory(
+ free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
+ &old_to_new_cleanup);
}
Map map = object.synchronized_map();
int size = object.SizeFromMap(map);
@@ -336,65 +395,29 @@ int Sweeper::RawSweep(
free_start = free_end + size;
}
- if (free_start != p->area_end()) {
- CHECK_GT(p->area_end(), free_start);
- size_t size = static_cast<size_t>(p->area_end() - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- ZapCode(free_start, size);
- }
- if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
- free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- } else {
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo,
- ClearFreedMemoryMode::kClearFreedMemory);
- }
- if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
- RememberedSetSweeping::RemoveRange(p, free_start, p->area_end(),
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
- SlotSet::KEEP_EMPTY_BUCKETS);
- if (non_empty_typed_slots) {
- free_ranges.insert(std::pair<uint32_t, uint32_t>(
- static_cast<uint32_t>(free_start - p->address()),
- static_cast<uint32_t>(p->area_end() - p->address())));
- }
-
- old_to_new_cleanup.Free(free_start, p->area_end());
+ // If there is free memory after the last live object also free that.
+ Address free_end = p->area_end();
+ if (free_end != free_start) {
+ max_freed_bytes =
+ Max(max_freed_bytes,
+ FreeAndProcessFreedMemory(free_start, free_end, p, space,
+ non_empty_typed_slots, free_list_mode,
+ free_space_mode));
+ CleanupRememberedSetEntriesForFreedMemory(
+ free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
+ &old_to_new_cleanup);
}
- // Clear invalid typed slots after collection all free ranges.
- if (!free_ranges.empty()) {
- TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
- if (old_to_new != nullptr) {
- old_to_new->ClearInvalidSlots(free_ranges);
- }
- TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
- if (old_to_old != nullptr) {
- old_to_old->ClearInvalidSlots(free_ranges);
- }
- }
+ // Phase 3: Post process the page.
+ CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
+ ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
- marking_state_->bitmap(p)->Clear();
- if (free_list_mode == IGNORE_FREE_LIST) {
- marking_state_->SetLiveBytes(p, 0);
- // We did not free memory, so have to adjust allocated bytes here.
- intptr_t freed_bytes = p->area_size() - live_bytes;
- p->DecreaseAllocatedBytes(freed_bytes);
- } else {
- // Keep the old live bytes counter of the page until RefillFreeList, where
- // the space size is refined.
- // The allocated_bytes() counter is precisely the total size of objects.
- DCHECK_EQ(live_bytes, p->allocated_bytes());
- }
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(
- p->free_list()->GuaranteedAllocatable(max_freed_bytes));
+ p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 7e6757351f..3bc199a92d 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SWEEPER_H_
#include <deque>
+#include <map>
#include <vector>
#include "src/base/platform/semaphore.h"
@@ -15,9 +16,11 @@
namespace v8 {
namespace internal {
+class InvalidatedSlotsCleanup;
class MajorNonAtomicMarkingState;
class Page;
class PagedSpace;
+class Space;
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
@@ -26,6 +29,7 @@ class Sweeper {
using IterabilityList = std::vector<Page*>;
using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
+ using FreeRangesMap = std::map<uint32_t, uint32_t>;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope final {
@@ -127,6 +131,33 @@ class Sweeper {
callback(MAP_SPACE);
}
+ // Helper function for RawSweep. Depending on the FreeListRebuildingMode and
+ // FreeSpaceTreatmentMode this function may add the free memory to a free
+ // list, make the memory iterable, clear it, and return the free memory to
+ // the operating system.
+ size_t FreeAndProcessFreedMemory(Address free_start, Address free_end,
+ Page* page, Space* space,
+ bool non_empty_typed_slots,
+ FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode);
+
+ // Helper function for RawSweep. Handle remembered set entries in the freed
+ // memory which require clearing.
+ void CleanupRememberedSetEntriesForFreedMemory(
+ Address free_start, Address free_end, Page* page,
+ bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
+ InvalidatedSlotsCleanup* old_to_new_cleanup);
+
+ // Helper function for RawSweep. Clears invalid typed slots in the given free
+ // ranges.
+ void CleanupInvalidTypedSlotsOfFreeRanges(
+ Page* page, const FreeRangesMap& free_ranges_map);
+
+ // Helper function for RawSweep. Clears the mark bits and ensures consistency
+ // of live bytes.
+ void ClearMarkBitsAndHandleLivenessStatistics(
+ Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode);
+
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
bool is_done = true;
@@ -173,7 +204,9 @@ class Sweeper {
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
bool incremental_sweeper_pending_;
- bool sweeping_in_progress_;
+ // Main thread can finalize sweeping, while background threads allocation slow
+ // path checks this flag to see whether it could support concurrent sweeping.
+ std::atomic<bool> sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_;
diff --git a/deps/v8/src/heap/third-party/heap-api-stub.cc b/deps/v8/src/heap/third-party/heap-api-stub.cc
new file mode 100644
index 0000000000..6d31479bec
--- /dev/null
+++ b/deps/v8/src/heap/third-party/heap-api-stub.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate-utils-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/third-party/heap-api.h"
+
+namespace v8 {
+namespace internal {
+
+Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
+ return GetHeapFromWritableObject(object)->isolate();
+}
+
+} // namespace internal
+} // namespace v8
+
+namespace v8 {
+namespace internal {
+namespace third_party_heap {
+
+// static
+std::unique_ptr<Heap> Heap::New(v8::internal::Isolate*) { return nullptr; }
+
+// static
+v8::internal::Isolate* Heap::GetIsolate(Address) { return nullptr; }
+
+AllocationResult Heap::Allocate(size_t, AllocationType, AllocationAlignment) {
+ return AllocationResult();
+}
+
+Address Heap::GetObjectFromInnerPointer(Address) { return 0; }
+
+const base::AddressRegion& Heap::GetCodeRange() {
+ static const base::AddressRegion no_region(0, 0);
+ return no_region;
+}
+
+// static
+bool Heap::InCodeSpace(Address) { return false; }
+
+// static
+bool Heap::InReadOnlySpace(Address) { return false; }
+
+// static
+bool Heap::IsValidHeapObject(HeapObject) { return false; }
+
+bool Heap::CollectGarbage() { return false; }
+
+} // namespace third_party_heap
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index d3731a7465..fb82a23c32 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -208,7 +208,7 @@ void AccessorAssembler::HandleLoadAccessor(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
TNode<RawPtrT> callback =
- LoadObjectField<RawPtrT>(foreign, Foreign::kForeignAddressOffset);
+ DecodeExternalPointer(LoadForeignForeignAddress(foreign));
TNode<Object> data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
@@ -1655,7 +1655,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
TNode<RawPtrT> callback =
- LoadObjectField<RawPtrT>(foreign, Foreign::kForeignAddressOffset);
+ DecodeExternalPointer(LoadForeignForeignAddress(foreign));
TNode<Object> data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
@@ -3654,6 +3654,9 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
+ CSA_ASSERT(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
+ MegamorphicSymbolConstant()));
+
TryProbeStubCache(isolate()->load_stub_cache(), receiver, CAST(name),
&if_handler, &var_handler, &miss);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index b508a20f80..452275d13f 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -4,7 +4,6 @@
#include "src/ic/ic.h"
-#include "include/v8config.h"
#include "src/api/api-arguments-inl.h"
#include "src/api/api.h"
#include "src/ast/ast.h"
@@ -695,10 +694,6 @@ void IC::SetCache(Handle<Name> name, const MaybeObjectHandle& handler) {
}
}
-#if defined(__clang__) && defined(V8_OS_WIN)
-// Force function alignment to work around CPU bug: https://crbug.com/968683
-__attribute__((__aligned__(32)))
-#endif
void LoadIC::UpdateCaches(LookupIterator* lookup) {
Handle<Object> code;
if (lookup->state() == LookupIterator::ACCESS_CHECK) {
@@ -1428,6 +1423,28 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
}
}
+ // If we are in StoreGlobal then check if we should throw on non-existent
+ // properties.
+ if (IsStoreGlobalIC() &&
+ (GetShouldThrow(it->isolate(), Nothing<ShouldThrow>()) ==
+ ShouldThrow::kThrowOnError)) {
+ // ICs typically does the store in two steps: prepare receiver for the
+ // transition followed by the actual store. For global objects we create a
+ // property cell when preparing for transition and install this cell in the
+ // handler. In strict mode, we throw and never initialize this property
+ // cell. The IC handler assumes that the property cell it is holding is for
+ // a property that is existing. This case violates this assumption. If we
+ // happen to invalidate this property cell later, it leads to incorrect
+ // behaviour. For now just use a slow stub and don't install the property
+ // cell for these cases. Hopefully these cases are not frequent enough to
+ // impact performance.
+ //
+ // TODO(mythria): If we find this to be happening often, we could install a
+ // new kind of handler for non-existent properties. These handlers can then
+ // miss to runtime if the value is not hole (i.e. cell got invalidated) and
+ // handle these stores correctly.
+ return false;
+ }
receiver = it->GetStoreTarget<JSObject>();
if (it->ExtendingNonExtensible(receiver)) return false;
it->PrepareTransitionToDataProperty(receiver, value, NONE, store_origin);
@@ -1918,8 +1935,12 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode,
MaybeHandle<Object> prev_validity_cell) {
+ // The only case when could keep using non-slow element store handler for
+ // a fast array with potentially read-only elements is when it's an
+ // initializing store to array literal.
DCHECK_IMPLIES(
- receiver_map->DictionaryElementsInPrototypeChainOnly(isolate()),
+ !receiver_map->has_dictionary_elements() &&
+ receiver_map->MayHaveReadOnlyElementsInPrototypeChain(isolate()),
IsStoreInArrayLiteralICKind(kind()));
if (receiver_map->IsJSProxyMap()) {
@@ -1984,7 +2005,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
Handle<Map> transition;
if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE ||
- receiver_map->DictionaryElementsInPrototypeChainOnly(isolate())) {
+ receiver_map->MayHaveReadOnlyElementsInPrototypeChain(isolate())) {
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
@@ -2147,17 +2168,18 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
} else if (object->IsJSArray() && IsGrowStoreMode(store_mode) &&
JSArray::HasReadOnlyLength(Handle<JSArray>::cast(object))) {
set_slow_stub_reason("array has read only length");
- } else if (object->IsJSArray() && MayHaveTypedArrayInPrototypeChain(
- Handle<JSObject>::cast(object))) {
+ } else if (object->IsJSObject() && MayHaveTypedArrayInPrototypeChain(
+ Handle<JSObject>::cast(object))) {
// Make sure we don't handle this in IC if there's any JSTypedArray in
// the {receiver}'s prototype chain, since that prototype is going to
// swallow all stores that are out-of-bounds for said prototype, and we
// just let the runtime deal with the complexity of this.
- set_slow_stub_reason("typed array in the prototype chain of an Array");
+ set_slow_stub_reason("typed array in the prototype chain");
} else if (key_is_valid_index) {
if (old_receiver_map->is_abandoned_prototype_map()) {
set_slow_stub_reason("receiver with prototype map");
- } else if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly(
+ } else if (old_receiver_map->has_dictionary_elements() ||
+ !old_receiver_map->MayHaveReadOnlyElementsInPrototypeChain(
isolate())) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
@@ -2167,7 +2189,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
UpdateStoreElement(old_receiver_map, store_mode,
handle(receiver->map(), isolate()));
} else {
- set_slow_stub_reason("dictionary or proxy prototype");
+ set_slow_stub_reason("prototype with potentially read-only elements");
}
} else {
set_slow_stub_reason("non-smi-like key");
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 88a66e1760..bc752e0e93 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -80,9 +80,9 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Nothing<LanguageMode>());
}
- void BranchIfPrototypesHaveNonFastElements(TNode<Map> receiver_map,
- Label* non_fast_elements,
- Label* only_fast_elements);
+ void BranchIfPrototypesMayHaveReadOnlyElements(
+ TNode<Map> receiver_map, Label* maybe_read_only_elements,
+ Label* only_fast_writable_elements);
void TryRewriteElements(TNode<JSObject> receiver, TNode<Map> receiver_map,
TNode<FixedArrayBase> elements,
@@ -176,9 +176,9 @@ void KeyedStoreGenericGenerator::SetPropertyInLiteral(
assembler.SetProperty(context, receiver, key, value, LanguageMode::kStrict);
}
-void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
- TNode<Map> receiver_map, Label* non_fast_elements,
- Label* only_fast_elements) {
+void KeyedStoreGenericAssembler::BranchIfPrototypesMayHaveReadOnlyElements(
+ TNode<Map> receiver_map, Label* maybe_read_only_elements,
+ Label* only_fast_writable_elements) {
TVARIABLE(Map, var_map);
var_map = receiver_map;
Label loop_body(this, &var_map);
@@ -188,16 +188,17 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
{
TNode<Map> map = var_map.value();
TNode<HeapObject> prototype = LoadMapPrototype(map);
- GotoIf(IsNull(prototype), only_fast_elements);
+ GotoIf(IsNull(prototype), only_fast_writable_elements);
TNode<Map> prototype_map = LoadMap(prototype);
var_map = prototype_map;
TNode<Uint16T> instance_type = LoadMapInstanceType(prototype_map);
GotoIf(IsCustomElementsReceiverInstanceType(instance_type),
- non_fast_elements);
+ maybe_read_only_elements);
TNode<Int32T> elements_kind = LoadMapElementsKind(prototype_map);
- GotoIf(IsFastElementsKind(elements_kind), &loop_body);
+ GotoIf(IsFastOrNonExtensibleOrSealedElementsKind(elements_kind),
+ &loop_body);
GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
- Goto(non_fast_elements);
+ Goto(maybe_read_only_elements);
}
}
@@ -350,8 +351,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
CAST(Load(MachineType::AnyTagged(), elements, offset));
GotoIf(IsNotTheHole(element), &hole_check_passed);
}
- BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
- &hole_check_passed);
+ BranchIfPrototypesMayHaveReadOnlyElements(receiver_map, slow,
+ &hole_check_passed);
BIND(&hole_check_passed);
}
}
@@ -443,7 +444,6 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, kHeaderSize);
if (!IsStoreInLiteral()) {
// Check if we're about to overwrite the hole. We can safely do that
- // Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
{
Label hole_check_passed(this);
@@ -456,8 +456,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
Goto(&hole_check_passed);
BIND(&found_hole);
}
- BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
- &hole_check_passed);
+ BranchIfPrototypesMayHaveReadOnlyElements(receiver_map, slow,
+ &hole_check_passed);
BIND(&hole_check_passed);
}
}
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 22d33eaf91..f4049e328e 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -25,6 +25,7 @@
#endif // ENABLE_VTUNE_TRACEMARK
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/numbers/math-random.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
@@ -33,6 +34,7 @@
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -188,7 +190,7 @@ class Genesis {
// Creates the global objects using the global proxy and the template passed
// in through the API. We call this regardless of whether we are building a
- // context from scratch or using a deserialized one from the partial snapshot
+ // context from scratch or using a deserialized one from the context snapshot
// but in the latter case we don't use the objects it produces directly, as
// we have to use the deserialized ones that are linked together with the
// rest of the context snapshot. At the end we link the global proxy and the
@@ -361,32 +363,11 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
isolate_->AddDetachedContext(env);
}
- env->native_context().set_microtask_queue(nullptr);
+ env->native_context().set_microtask_queue(isolate_, nullptr);
}
namespace {
-V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
- Isolate* isolate, Builtins::Name builtin_id, Handle<String> name, int len,
- FunctionKind kind = FunctionKind::kNormalFunction) {
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfoForBuiltin(name, builtin_id,
- kind);
- shared->set_internal_formal_parameter_count(len);
- shared->set_length(len);
- return shared;
-}
-
-V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateBuiltinSharedFunctionInfo(
- Isolate* isolate, Builtins::Name builtin_id, Handle<String> name, int len) {
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfoForBuiltin(name, builtin_id,
- kNormalFunction);
- shared->set_internal_formal_parameter_count(len);
- shared->set_length(len);
- return shared;
-}
-
V8_NOINLINE Handle<JSFunction> CreateFunction(
Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
int inobject_properties, Handle<HeapObject> prototype,
@@ -1334,21 +1315,24 @@ static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
isolate->native_context()->set(context_index, *function);
}
-static void InstallError(Isolate* isolate, Handle<JSObject> global,
- Handle<String> name, int context_index) {
+static void InstallError(
+ Isolate* isolate, Handle<JSObject> global, Handle<String> name,
+ int context_index,
+ Builtins::Name error_constructor = Builtins::kErrorConstructor,
+ InstanceType error_type = JS_ERROR_TYPE, int error_function_length = 1,
+ int header_size = JSObject::kHeaderSize) {
Factory* factory = isolate->factory();
// Most Error objects consist of a message and a stack trace.
// Reserve two in-object properties for these.
const int kInObjectPropertiesCount = 2;
const int kErrorObjectSize =
- JSObject::kHeaderSize + kInObjectPropertiesCount * kTaggedSize;
- Handle<JSFunction> error_fun =
- InstallFunction(isolate, global, name, JS_ERROR_TYPE, kErrorObjectSize,
- kInObjectPropertiesCount, factory->the_hole_value(),
- Builtins::kErrorConstructor);
+ header_size + kInObjectPropertiesCount * kTaggedSize;
+ Handle<JSFunction> error_fun = InstallFunction(
+ isolate, global, name, error_type, kErrorObjectSize,
+ kInObjectPropertiesCount, factory->the_hole_value(), error_constructor);
error_fun->shared().DontAdaptArguments();
- error_fun->shared().set_length(1);
+ error_fun->shared().set_length(error_function_length);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
SimpleInstallFunction(isolate, error_fun, "captureStackTrace",
@@ -1396,20 +1380,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
}
}
-namespace {
-
-void InstallMakeError(Isolate* isolate, int builtin_id, int context_index) {
- NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- isolate->factory()->empty_string(), isolate->factory()->the_hole_value(),
- JS_OBJECT_TYPE, JSObject::kHeaderSize, 0, builtin_id, MUTABLE);
-
- Handle<JSFunction> function = isolate->factory()->NewFunction(args);
- function->shared().DontAdaptArguments();
- isolate->native_context()->set(context_index, *function);
-}
-
-} // namespace
-
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
@@ -1622,47 +1592,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
}
- { // --- A s y n c F r o m S y n c I t e r a t o r
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncIteratorValueUnwrap, factory->empty_string(),
- 1);
- native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
- }
-
- { // --- A s y n c G e n e r a t o r ---
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncGeneratorAwaitResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_await_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncGeneratorAwaitRejectClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_await_reject_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncGeneratorYieldResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_yield_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncGeneratorReturnResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncGeneratorReturnClosedResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_closed_resolve_shared_fun(
- *info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kAsyncGeneratorReturnClosedRejectClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_closed_reject_shared_fun(
- *info);
- }
-
Handle<JSFunction> array_prototype_to_string_fun;
{ // --- A r r a y ---
Handle<JSFunction> array_function = InstallFunction(
@@ -2345,13 +2274,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
- {
- Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
- isolate_, Builtins::kPromiseGetCapabilitiesExecutor,
- factory->empty_string(), 2);
- native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
- }
-
{ // -- P r o m i s e
Handle<JSFunction> promise_fun = InstallFunction(
isolate_, global, "Promise", JS_PROMISE_TYPE,
@@ -2398,89 +2320,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallFunctionWithBuiltinId(isolate_, prototype, "finally",
Builtins::kPromisePrototypeFinally, 1, true);
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseThenFinally,
- isolate_->factory()->empty_string(), 1);
- info->set_native(true);
- native_context()->set_promise_then_finally_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseCatchFinally,
- isolate_->factory()->empty_string(), 1);
- info->set_native(true);
- native_context()->set_promise_catch_finally_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseValueThunkFinally,
- isolate_->factory()->empty_string(), 0);
- native_context()->set_promise_value_thunk_finally_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate(), Builtins::kPromiseThrowerFinally,
- isolate_->factory()->empty_string(), 0);
- native_context()->set_promise_thrower_finally_shared_fun(*info);
- }
-
- // Force the Promise constructor to fast properties, so that we can use the
- // fast paths for various things like
- //
- // x instanceof Promise
- //
- // etc. We should probably come up with a more principled approach once
- // the JavaScript builtins are gone.
- JSObject::MigrateSlowToFast(Handle<JSObject>::cast(promise_fun), 0,
- "Bootstrapping");
+ DCHECK(promise_fun->HasFastProperties());
Handle<Map> prototype_map(prototype->map(), isolate());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate_);
- { // Internal: IsPromise
- Handle<JSFunction> function = SimpleCreateFunction(
- isolate_, factory->empty_string(), Builtins::kIsPromise, 1, false);
- native_context()->set_is_promise(*function);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kPromiseCapabilityDefaultResolve,
- factory->empty_string(), 1, FunctionKind::kConciseMethod);
- info->set_native(true);
- info->set_function_map_index(
- Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- native_context()->set_promise_capability_default_resolve_shared_fun(
- *info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kPromiseCapabilityDefaultReject,
- factory->empty_string(), 1, FunctionKind::kConciseMethod);
- info->set_native(true);
- info->set_function_map_index(
- Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- native_context()->set_promise_capability_default_reject_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kPromiseAllResolveElementClosure,
- factory->empty_string(), 1);
- native_context()->set_promise_all_resolve_element_shared_fun(*info);
- }
-
- // Force the Promise constructor to fast properties, so that we can use the
- // fast paths for various things like
- //
- // x instanceof Promise
- //
- // etc. We should probably come up with a more principled approach once
- // the JavaScript builtins are gone.
- JSObject::MigrateSlowToFast(promise_fun, 0, "Bootstrapping");
+ DCHECK(promise_fun->HasFastProperties());
}
{ // -- R e g E x p
@@ -2674,14 +2519,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_regexp_species_protector(*cell);
}
- // Force the RegExp constructor to fast properties, so that we can use the
- // fast paths for various things like
- //
- // x instanceof RegExp
- //
- // etc. We should probably come up with a more principled approach once
- // the JavaScript builtins are gone.
- JSObject::MigrateSlowToFast(regexp_fun, 0, "Bootstrapping");
+ DCHECK(regexp_fun->HasFastProperties());
}
{ // --- R e g E x p S t r i n g I t e r a t o r ---
@@ -2709,49 +2547,33 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
regexp_string_iterator_function->initial_map());
}
- { // -- E r r o r
- InstallError(isolate_, global, factory->Error_string(),
- Context::ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate_, Builtins::kMakeError, Context::MAKE_ERROR_INDEX);
- }
+ // -- E r r o r
+ InstallError(isolate_, global, factory->Error_string(),
+ Context::ERROR_FUNCTION_INDEX);
- { // -- E v a l E r r o r
- InstallError(isolate_, global, factory->EvalError_string(),
- Context::EVAL_ERROR_FUNCTION_INDEX);
- }
+ // -- E v a l E r r o r
+ InstallError(isolate_, global, factory->EvalError_string(),
+ Context::EVAL_ERROR_FUNCTION_INDEX);
- { // -- R a n g e E r r o r
- InstallError(isolate_, global, factory->RangeError_string(),
- Context::RANGE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate_, Builtins::kMakeRangeError,
- Context::MAKE_RANGE_ERROR_INDEX);
- }
+ // -- R a n g e E r r o r
+ InstallError(isolate_, global, factory->RangeError_string(),
+ Context::RANGE_ERROR_FUNCTION_INDEX);
- { // -- R e f e r e n c e E r r o r
- InstallError(isolate_, global, factory->ReferenceError_string(),
- Context::REFERENCE_ERROR_FUNCTION_INDEX);
- }
+ // -- R e f e r e n c e E r r o r
+ InstallError(isolate_, global, factory->ReferenceError_string(),
+ Context::REFERENCE_ERROR_FUNCTION_INDEX);
- { // -- S y n t a x E r r o r
- InstallError(isolate_, global, factory->SyntaxError_string(),
- Context::SYNTAX_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate_, Builtins::kMakeSyntaxError,
- Context::MAKE_SYNTAX_ERROR_INDEX);
- }
+ // -- S y n t a x E r r o r
+ InstallError(isolate_, global, factory->SyntaxError_string(),
+ Context::SYNTAX_ERROR_FUNCTION_INDEX);
- { // -- T y p e E r r o r
- InstallError(isolate_, global, factory->TypeError_string(),
- Context::TYPE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate_, Builtins::kMakeTypeError,
- Context::MAKE_TYPE_ERROR_INDEX);
- }
+ // -- T y p e E r r o r
+ InstallError(isolate_, global, factory->TypeError_string(),
+ Context::TYPE_ERROR_FUNCTION_INDEX);
- { // -- U R I E r r o r
- InstallError(isolate_, global, factory->URIError_string(),
- Context::URI_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate_, Builtins::kMakeURIError,
- Context::MAKE_URI_ERROR_INDEX);
- }
+ // -- U R I E r r o r
+ InstallError(isolate_, global, factory->URIError_string(),
+ Context::URI_ERROR_FUNCTION_INDEX);
{ // -- C o m p i l e E r r o r
Handle<JSObject> dummy = factory->NewJSObject(isolate_->object_function());
@@ -3203,6 +3025,37 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->numberingSystem_string(),
Builtins::kLocalePrototypeNumberingSystem, true);
}
+
+ { // -- D i s p l a y N a m e s
+ Handle<JSFunction> display_names_fun = InstallFunction(
+ isolate(), intl, "DisplayNames", JS_DISPLAY_NAMES_TYPE,
+ JSDisplayNames::kHeaderSize, 0, factory->the_hole_value(),
+ Builtins::kDisplayNamesConstructor);
+ display_names_fun->shared().set_length(0);
+ display_names_fun->shared().DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate(), display_names_fun,
+ Context::INTL_DISPLAY_NAMES_FUNCTION_INDEX);
+
+ SimpleInstallFunction(isolate(), display_names_fun, "supportedLocalesOf",
+ Builtins::kDisplayNamesSupportedLocalesOf, 1,
+ false);
+
+ {
+ // Setup %DisplayNamesPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(display_names_fun->instance_prototype()), isolate());
+
+ InstallToStringTag(isolate(), prototype, "Intl.DisplayNames");
+
+ SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
+ Builtins::kDisplayNamesPrototypeResolvedOptions,
+ 0, false);
+
+ SimpleInstallFunction(isolate(), prototype, "of",
+ Builtins::kDisplayNamesPrototypeOf, 1, false);
+ }
+ }
}
#endif // V8_INTL_SUPPORT
@@ -3494,6 +3347,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_initial_map_prototype_map(prototype->map());
InstallSpeciesGetter(isolate_, js_map_fun);
+
+ DCHECK(js_map_fun->HasFastProperties());
+
+ native_context()->set_js_map_map(js_map_fun->initial_map());
}
{ // -- B i g I n t
@@ -3586,6 +3443,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_initial_set_prototype(*prototype);
InstallSpeciesGetter(isolate_, js_set_fun);
+
+ DCHECK(js_set_fun->HasFastProperties());
+
+ native_context()->set_js_set_map(js_set_fun->initial_map());
}
{ // -- J S M o d u l e N a m e s p a c e
@@ -3734,12 +3595,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, proxy_function, "revocable",
Builtins::kProxyRevocable, 2, true);
-
- { // Internal: ProxyRevoke
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kProxyRevoke, factory->empty_string(), 0);
- native_context()->set_proxy_revoke_shared_fun(*info);
- }
}
{ // -- R e f l e c t
@@ -3748,11 +3603,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
JSObject::AddProperty(isolate_, global, reflect_string, reflect, DONT_ENUM);
- SimpleInstallFunction(isolate_, reflect, "defineProperty",
- Builtins::kReflectDefineProperty, 3, true);
+ SimpleInstallFunction(isolate_, reflect, "defineProperty",
+ Builtins::kReflectDefineProperty, 3, true);
- SimpleInstallFunction(isolate_, reflect, "deleteProperty",
- Builtins::kReflectDeleteProperty, 2, true);
+ SimpleInstallFunction(isolate_, reflect, "deleteProperty",
+ Builtins::kReflectDeleteProperty, 2, true);
Handle<JSFunction> apply = SimpleInstallFunction(
isolate_, reflect, "apply", Builtins::kReflectApply, 3, false);
@@ -3975,9 +3830,12 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
void Genesis::InitializeExperimentalGlobal() {
#define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();
- HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
- HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
+ // Initialize features from more mature to less mature, because less mature
+ // features may depend on more mature features having been initialized
+ // already.
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
+ HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
+ HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
}
@@ -4191,20 +4049,6 @@ void Genesis::InitializeIteratorFunctions() {
Handle<Map> async_function_object_map = factory->NewMap(
JS_ASYNC_FUNCTION_OBJECT_TYPE, JSAsyncFunctionObject::kHeaderSize);
native_context->set_async_function_object_map(*async_function_object_map);
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncFunctionAwaitRejectClosure,
- factory->empty_string(), 1);
- native_context->set_async_function_await_reject_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncFunctionAwaitResolveClosure,
- factory->empty_string(), 1);
- native_context->set_async_function_await_resolve_shared_fun(*info);
- }
}
}
@@ -4273,17 +4117,14 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_methods)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_chaining)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_nullish)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_logical_assignment)
#ifdef V8_INTL_SUPPORT
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_displaynames_date_types)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(
harmony_intl_dateformat_fractional_second_digits)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_other_calendars)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
@@ -4308,17 +4149,11 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
{
- // Create %FinalizationRegistryPrototype%
- Handle<String> finalization_registry_name =
- factory->NewStringFromStaticChars("FinalizationRegistry");
- Handle<JSObject> finalization_registry_prototype = factory->NewJSObject(
- isolate()->object_function(), AllocationType::kOld);
-
// Create %FinalizationRegistry%
- Handle<JSFunction> finalization_registry_fun = CreateFunction(
- isolate(), finalization_registry_name, JS_FINALIZATION_REGISTRY_TYPE,
- JSFinalizationRegistry::kHeaderSize, 0, finalization_registry_prototype,
- Builtins::kFinalizationRegistryConstructor);
+ Handle<JSFunction> finalization_registry_fun = InstallFunction(
+ isolate(), global, factory->FinalizationRegistry_string(),
+ JS_FINALIZATION_REGISTRY_TYPE, JSFinalizationRegistry::kHeaderSize, 0,
+ factory->the_hole_value(), Builtins::kFinalizationRegistryConstructor);
InstallWithIntrinsicDefaultProto(
isolate(), finalization_registry_fun,
Context::JS_FINALIZATION_REGISTRY_FUNCTION_INDEX);
@@ -4326,16 +4161,12 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
finalization_registry_fun->shared().DontAdaptArguments();
finalization_registry_fun->shared().set_length(1);
- // Install the "constructor" property on the prototype.
- JSObject::AddProperty(isolate(), finalization_registry_prototype,
- factory->constructor_string(),
- finalization_registry_fun, DONT_ENUM);
+ Handle<JSObject> finalization_registry_prototype(
+ JSObject::cast(finalization_registry_fun->instance_prototype()),
+ isolate());
InstallToStringTag(isolate(), finalization_registry_prototype,
- finalization_registry_name);
-
- JSObject::AddProperty(isolate(), global, finalization_registry_name,
- finalization_registry_fun, DONT_ENUM);
+ factory->FinalizationRegistry_string());
SimpleInstallFunction(isolate(), finalization_registry_prototype,
"register", Builtins::kFinalizationRegistryRegister,
@@ -4345,89 +4176,100 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
"unregister",
Builtins::kFinalizationRegistryUnregister, 1, false);
- SimpleInstallFunction(isolate(), finalization_registry_prototype,
- "cleanupSome",
- Builtins::kFinalizationRegistryCleanupSome, 0, false);
+ // The cleanupSome function is created but not exposed, as it is used
+ // internally by InvokeFinalizationRegistryCleanupFromTask.
+ //
+ // It is exposed by FLAG_harmony_weak_refs_with_cleanup_some.
+ Handle<JSFunction> cleanup_some_fun = SimpleCreateFunction(
+ isolate(), factory->InternalizeUtf8String("cleanupSome"),
+ Builtins::kFinalizationRegistryPrototypeCleanupSome, 0, false);
+ native_context()->set_finalization_registry_cleanup_some(*cleanup_some_fun);
}
{
- // Create %WeakRefPrototype%
- Handle<Map> weak_ref_map =
- factory->NewMap(JS_WEAK_REF_TYPE, JSWeakRef::kHeaderSize);
- DCHECK(weak_ref_map->IsJSObjectMap());
+ // Create %WeakRef%
+ Handle<JSFunction> weak_ref_fun = InstallFunction(
+ isolate(), global, factory->WeakRef_string(), JS_WEAK_REF_TYPE,
+ JSWeakRef::kHeaderSize, 0, factory->the_hole_value(),
+ Builtins::kWeakRefConstructor);
+ InstallWithIntrinsicDefaultProto(isolate(), weak_ref_fun,
+ Context::JS_WEAK_REF_FUNCTION_INDEX);
- Handle<JSObject> weak_ref_prototype = factory->NewJSObject(
- isolate()->object_function(), AllocationType::kOld);
- Map::SetPrototype(isolate(), weak_ref_map, weak_ref_prototype);
+ weak_ref_fun->shared().DontAdaptArguments();
+ weak_ref_fun->shared().set_length(1);
+
+ Handle<JSObject> weak_ref_prototype(
+ JSObject::cast(weak_ref_fun->instance_prototype()), isolate());
InstallToStringTag(isolate(), weak_ref_prototype,
factory->WeakRef_string());
SimpleInstallFunction(isolate(), weak_ref_prototype, "deref",
Builtins::kWeakRefDeref, 0, false);
+ }
+}
- // Create %WeakRef%
- Handle<String> weak_ref_name = factory->InternalizeUtf8String("WeakRef");
- Handle<JSFunction> weak_ref_fun = CreateFunction(
- isolate(), weak_ref_name, JS_WEAK_REF_TYPE, JSWeakRef::kHeaderSize, 0,
- weak_ref_prototype, Builtins::kWeakRefConstructor);
- InstallWithIntrinsicDefaultProto(isolate(), weak_ref_fun,
- Context::JS_WEAK_REF_FUNCTION_INDEX);
+void Genesis::InitializeGlobal_harmony_weak_refs_with_cleanup_some() {
+ if (!FLAG_harmony_weak_refs_with_cleanup_some) return;
+ DCHECK(FLAG_harmony_weak_refs);
- weak_ref_fun->shared().DontAdaptArguments();
- weak_ref_fun->shared().set_length(1);
+ Handle<JSFunction> finalization_registry_fun =
+ isolate()->js_finalization_registry_fun();
+ Handle<JSObject> finalization_registry_prototype(
+ JSObject::cast(finalization_registry_fun->instance_prototype()),
+ isolate());
- // Install the "constructor" property on the prototype.
- JSObject::AddProperty(isolate(), weak_ref_prototype,
- factory->constructor_string(), weak_ref_fun,
- DONT_ENUM);
+ JSObject::AddProperty(isolate(), finalization_registry_prototype,
+ factory()->InternalizeUtf8String("cleanupSome"),
+ isolate()->finalization_registry_cleanup_some(),
+ DONT_ENUM);
+}
- JSObject::AddProperty(isolate(), global, weak_ref_name, weak_ref_fun,
- DONT_ENUM);
+void Genesis::InitializeGlobal_harmony_promise_any() {
+ if (!FLAG_harmony_promise_any) {
+ return;
}
- {
- // Create cleanup iterator for JSFinalizationRegistry.
- Handle<JSObject> iterator_prototype(
- native_context()->initial_iterator_prototype(), isolate());
+ Factory* factory = isolate()->factory();
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Handle<JSObject> cleanup_iterator_prototype = factory->NewJSObject(
- isolate()->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(cleanup_iterator_prototype, iterator_prototype);
+ InstallError(isolate_, global, factory->AggregateError_string(),
+ Context::AGGREGATE_ERROR_FUNCTION_INDEX,
+ Builtins::kAggregateErrorConstructor, JS_AGGREGATE_ERROR_TYPE, 2,
+ JSAggregateError::kHeaderSize);
- InstallToStringTag(isolate(), cleanup_iterator_prototype,
- "FinalizationRegistry Cleanup Iterator");
+ // Setup %AggregateErrorPrototype%.
+ Handle<JSFunction> aggregate_error_function(
+ native_context()->aggregate_error_function(), isolate());
+ Handle<JSObject> prototype(
+ JSObject::cast(aggregate_error_function->instance_prototype()),
+ isolate());
- SimpleInstallFunction(isolate(), cleanup_iterator_prototype, "next",
- Builtins::kFinalizationRegistryCleanupIteratorNext, 0,
- true);
- Handle<Map> cleanup_iterator_map =
- factory->NewMap(JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE,
- JSFinalizationRegistryCleanupIterator::kHeaderSize);
- Map::SetPrototype(isolate(), cleanup_iterator_map,
- cleanup_iterator_prototype);
- native_context()->set_js_finalization_registry_cleanup_iterator_map(
- *cleanup_iterator_map);
- }
+ Handle<String> getter_name =
+ Name::ToFunctionName(isolate_, factory->errors_string(),
+ isolate_->factory()->get_string())
+ .ToHandleChecked();
+
+ Handle<JSFunction> getter = SimpleCreateFunction(
+ isolate(), getter_name, Builtins::kAggregateErrorPrototypeErrorsGetter, 0,
+ true);
+
+ JSObject::DefineAccessor(prototype, factory->errors_string(), getter,
+ factory->undefined_value(), DONT_ENUM);
+
+ Handle<JSFunction> promise_fun(
+ JSFunction::cast(
+ isolate()->native_context()->get(Context::PROMISE_FUNCTION_INDEX)),
+ isolate());
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "any",
+ Builtins::kPromiseAny, 1, true);
+
+ DCHECK(promise_fun->HasFastProperties());
}
void Genesis::InitializeGlobal_harmony_promise_all_settled() {
if (!FLAG_harmony_promise_all_settled) return;
SimpleInstallFunction(isolate(), isolate()->promise_function(), "allSettled",
Builtins::kPromiseAllSettled, 1, true);
- Factory* factory = isolate()->factory();
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kPromiseAllSettledResolveElementClosure,
- factory->empty_string(), 1);
- native_context()->set_promise_all_settled_resolve_element_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate_, Builtins::kPromiseAllSettledRejectElementClosure,
- factory->empty_string(), 1);
- native_context()->set_promise_all_settled_reject_element_shared_fun(*info);
- }
}
void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
@@ -4535,43 +4377,6 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
}
}
-void Genesis::InitializeGlobal_harmony_intl_displaynames() {
- if (!FLAG_harmony_intl_displaynames) return;
- Handle<JSObject> intl = Handle<JSObject>::cast(
- JSReceiver::GetProperty(
- isolate(),
- Handle<JSReceiver>(native_context()->global_object(), isolate()),
- factory()->InternalizeUtf8String("Intl"))
- .ToHandleChecked());
-
- Handle<JSFunction> display_names_fun = InstallFunction(
- isolate(), intl, "DisplayNames", JS_DISPLAY_NAMES_TYPE,
- JSDisplayNames::kHeaderSize, 0, factory()->the_hole_value(),
- Builtins::kDisplayNamesConstructor);
- display_names_fun->shared().set_length(0);
- display_names_fun->shared().DontAdaptArguments();
- InstallWithIntrinsicDefaultProto(isolate_, display_names_fun,
- Context::INTL_DISPLAY_NAMES_FUNCTION_INDEX);
-
- SimpleInstallFunction(isolate(), display_names_fun, "supportedLocalesOf",
- Builtins::kDisplayNamesSupportedLocalesOf, 1, false);
-
- {
- // Setup %DisplayNamesPrototype%.
- Handle<JSObject> prototype(
- JSObject::cast(display_names_fun->instance_prototype()), isolate());
-
- InstallToStringTag(isolate(), prototype, "Intl.DisplayNames");
-
- SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
- Builtins::kDisplayNamesPrototypeResolvedOptions, 0,
- false);
-
- SimpleInstallFunction(isolate(), prototype, "of",
- Builtins::kDisplayNamesPrototypeOf, 1, false);
- }
-}
-
#endif // V8_INTL_SUPPORT
Handle<JSFunction> Genesis::CreateArrayBuffer(
@@ -5152,22 +4957,6 @@ bool Genesis::ConfigureGlobalObjects(
native_context()->set_array_buffer_map(
native_context()->array_buffer_fun().initial_map());
- Handle<JSFunction> js_map_fun(native_context()->js_map_fun(), isolate());
- Handle<JSFunction> js_set_fun(native_context()->js_set_fun(), isolate());
- // Force the Map/Set constructor to fast properties, so that we can use the
- // fast paths for various things like
- //
- // x instanceof Map
- // x instanceof Set
- //
- // etc. We should probably come up with a more principled approach once
- // the JavaScript builtins are gone.
- JSObject::MigrateSlowToFast(js_map_fun, 0, "Bootstrapping");
- JSObject::MigrateSlowToFast(js_set_fun, 0, "Bootstrapping");
-
- native_context()->set_js_map_map(js_map_fun->initial_map());
- native_context()->set_js_set_map(js_set_fun->initial_map());
-
return true;
}
@@ -5451,8 +5240,8 @@ Genesis::Genesis(
}
native_context()->set_microtask_queue(
- microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
- : isolate->default_microtask_queue());
+ isolate, microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
+ : isolate->default_microtask_queue());
// Install experimental natives. Do not include them into the
// snapshot as we should be able to turn them off at runtime. Re-installing
diff --git a/deps/v8/src/init/bootstrapper.h b/deps/v8/src/init/bootstrapper.h
index aaa1997270..e51ef0cd10 100644
--- a/deps/v8/src/init/bootstrapper.h
+++ b/deps/v8/src/init/bootstrapper.h
@@ -55,6 +55,20 @@ class Bootstrapper final {
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
v8::MicrotaskQueue* microtask_queue);
+ // Used for testing context deserialization. No code runs in the generated
+ // context. It only needs to pass heap verification.
+ Handle<Context> CreateEnvironmentForTesting() {
+ MaybeHandle<JSGlobalProxy> no_global_proxy;
+ v8::Local<v8::ObjectTemplate> no_global_object_template;
+ ExtensionConfiguration no_extensions;
+ static constexpr int kDefaultContextIndex = 0;
+ v8::DeserializeEmbedderFieldsCallback no_callback;
+ v8::MicrotaskQueue* no_microtask_queue = nullptr;
+ return CreateEnvironment(no_global_proxy, no_global_object_template,
+ &no_extensions, kDefaultContextIndex, no_callback,
+ no_microtask_queue);
+ }
+
Handle<JSGlobalProxy> NewRemoteContext(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template);
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index d56cc4b0cd..3ac1420f75 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -118,6 +118,7 @@
#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
V(_, add_string, "add") \
+ V(_, AggregateError_string, "AggregateError") \
V(_, always_string, "always") \
V(_, anonymous_function_string, "(anonymous function)") \
V(_, anonymous_string, "anonymous") \
@@ -179,11 +180,13 @@
V(_, enumerable_string, "enumerable") \
V(_, element_string, "element") \
V(_, Error_string, "Error") \
+ V(_, errors_string, "errors") \
V(_, error_to_string, "[object Error]") \
V(_, eval_string, "eval") \
V(_, EvalError_string, "EvalError") \
V(_, exec_string, "exec") \
V(_, false_string, "false") \
+ V(_, FinalizationRegistry_string, "FinalizationRegistry") \
V(_, flags_string, "flags") \
V(_, Float32Array_string, "Float32Array") \
V(_, Float64Array_string, "Float64Array") \
@@ -492,6 +495,7 @@
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS) \
F(SCAVENGER_SCAVENGE_PARALLEL) \
F(SCAVENGER_SCAVENGE_ROOTS) \
+ F(SCAVENGER_SCAVENGE_STACK_ROOTS) \
F(SCAVENGER_SCAVENGE_UPDATE_REFS) \
F(SCAVENGER_SCAVENGE_WEAK) \
F(SCAVENGER_SCAVENGE_FINALIZE) \
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index edcc399f95..5267644737 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -106,6 +106,9 @@ void V8::InitializeOncePerProcessImpl() {
if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
+#if defined(V8_USE_PERFETTO)
+ TrackEvent::Register();
+#endif
Isolate::InitializeOncePerProcess();
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index a326ab0127..acb1e961a8 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -31,13 +31,9 @@ _protocol_generated = [
action("protocol_compatibility") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "$_inspector_protocol/check_protocol_compatibility.py"
- inputs = [
- v8_inspector_js_protocol,
- ]
+ inputs = [ v8_inspector_js_protocol ]
_stamp = "$target_gen_dir/js_protocol.stamp"
- outputs = [
- _stamp,
- ]
+ outputs = [ _stamp ]
args = [
"--stamp",
rebase_path(_stamp, root_build_dir),
@@ -47,9 +43,7 @@ action("protocol_compatibility") {
inspector_protocol_generate("protocol_generated_sources") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- deps = [
- ":protocol_compatibility",
- ]
+ deps = [ ":protocol_compatibility" ]
inspector_protocol_dir = _inspector_protocol
out_dir = target_gen_dir
@@ -71,13 +65,9 @@ config("inspector_config") {
v8_header_set("inspector_test_headers") {
configs = [ ":inspector_config" ]
- public_deps = [
- "../..:v8_headers",
- ]
+ public_deps = [ "../..:v8_headers" ]
- sources = [
- "test-interface.h",
- ]
+ sources = [ "test-interface.h" ]
}
v8_source_set("inspector_string_conversions") {
@@ -86,14 +76,13 @@ v8_source_set("inspector_string_conversions") {
"v8-string-conversions.h",
]
configs = [ "../..:internal_config_base" ]
- deps = [
- "../..:v8_libbase",
- ]
+ deps = [ "../..:v8_libbase" ]
}
v8_source_set("inspector") {
deps = [
":inspector_string_conversions",
+ "../..:v8_tracing",
"../..:v8_version",
"../../third_party/inspector_protocol:crdtp",
]
@@ -166,7 +155,5 @@ v8_source_set("inspector") {
group("v8_generated_cc_files") {
testonly = true
- deps = [
- ":protocol_generated_sources",
- ]
+ deps = [ ":protocol_generated_sources" ]
}
diff --git a/deps/v8/src/inspector/search-util.cc b/deps/v8/src/inspector/search-util.cc
index d2550ad9e4..ec800007dd 100644
--- a/deps/v8/src/inspector/search-util.cc
+++ b/deps/v8/src/inspector/search-util.cc
@@ -36,7 +36,8 @@ String16 findMagicComment(const String16& content, const String16& name,
if (content[pos + 2] != '#' && content[pos + 2] != '@') continue;
if (content[pos + 3] != ' ' && content[pos + 3] != '\t') continue;
equalSignPos = pos + 4 + nameLength;
- if (equalSignPos < length && content[equalSignPos] != '=') continue;
+ if (equalSignPos >= length) continue;
+ if (content[equalSignPos] != '=') continue;
if (multiline) {
closingCommentPos = content.find("*/", equalSignPos + 1);
if (closingCommentPos == String16::kNotFound) return String16();
@@ -46,6 +47,7 @@ String16 findMagicComment(const String16& content, const String16& name,
}
DCHECK(equalSignPos);
+ DCHECK_LT(equalSignPos, length);
DCHECK(!multiline || closingCommentPos);
size_t urlPos = equalSignPos + 1;
String16 match = multiline
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index b4cbd016a6..76c68498b2 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -28,7 +28,7 @@ class String16 {
String16(String16&&) V8_NOEXCEPT = default;
String16(const UChar* characters, size_t size);
V8_EXPORT String16(const UChar* characters); // NOLINT(runtime/explicit)
- String16(const char* characters); // NOLINT(runtime/explicit)
+ V8_EXPORT String16(const char* characters); // NOLINT(runtime/explicit)
String16(const char* characters, size_t size);
explicit String16(const std::basic_string<UChar>& impl);
explicit String16(std::basic_string<UChar>&& impl);
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 3f779e86e1..b1c0d6411a 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -6,6 +6,7 @@
#include <cinttypes>
#include <cmath>
+#include <cstddef>
#include "src/base/platform/platform.h"
#include "src/inspector/protocol/Protocol.h"
@@ -13,6 +14,91 @@
namespace v8_inspector {
+namespace protocol {
+namespace {
+std::pair<uint8_t, uint8_t> SplitByte(uint8_t byte, uint8_t split) {
+ return {byte >> split, (byte & ((1 << split) - 1)) << (6 - split)};
+}
+
+v8::Maybe<uint8_t> DecodeByte(char byte) {
+ if ('A' <= byte && byte <= 'Z') return v8::Just<uint8_t>(byte - 'A');
+ if ('a' <= byte && byte <= 'z') return v8::Just<uint8_t>(byte - 'a' + 26);
+ if ('0' <= byte && byte <= '9')
+ return v8::Just<uint8_t>(byte - '0' + 26 + 26);
+ if (byte == '+') return v8::Just<uint8_t>(62);
+ if (byte == '/') return v8::Just<uint8_t>(63);
+ return v8::Nothing<uint8_t>();
+}
+} // namespace
+
+String Binary::toBase64() const {
+ const char* table =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ if (size() == 0) return {};
+ std::basic_string<UChar> result;
+ result.reserve(4 * ((size() + 2) / 3));
+ uint8_t last = 0;
+ for (size_t n = 0; n < size();) {
+ auto split = SplitByte((*bytes_)[n], 2 + 2 * (n % 3));
+ result.push_back(table[split.first | last]);
+
+ ++n;
+ if (n < size() && n % 3 == 0) {
+ result.push_back(table[split.second]);
+ last = 0;
+ } else {
+ last = split.second;
+ }
+ }
+ result.push_back(table[last]);
+ while (result.size() % 4 > 0) result.push_back('=');
+ return String16(std::move(result));
+}
+
+/* static */
+Binary Binary::fromBase64(const String& base64, bool* success) {
+ if (base64.isEmpty()) {
+ *success = true;
+ return Binary::fromSpan(nullptr, 0);
+ }
+
+ *success = false;
+ // Fail if the length is invalid or decoding would overflow.
+ if (base64.length() % 4 != 0 || base64.length() + 4 < base64.length()) {
+ return Binary::fromSpan(nullptr, 0);
+ }
+
+ std::vector<uint8_t> result;
+ result.reserve(3 * base64.length() / 4);
+ char pad = '=';
+ // Iterate groups of four
+ for (size_t i = 0; i < base64.length(); i += 4) {
+ uint8_t a = 0, b = 0, c = 0, d = 0;
+ if (!DecodeByte(base64[i + 0]).To(&a)) return Binary::fromSpan(nullptr, 0);
+ if (!DecodeByte(base64[i + 1]).To(&b)) return Binary::fromSpan(nullptr, 0);
+ if (!DecodeByte(base64[i + 2]).To(&c)) {
+ // Padding is allowed only in the group on the last two positions
+ if (i + 4 < base64.length() || base64[i + 2] != pad ||
+ base64[i + 3] != pad) {
+ return Binary::fromSpan(nullptr, 0);
+ }
+ }
+ if (!DecodeByte(base64[i + 3]).To(&d)) {
+ // Padding is allowed only in the group on the last two positions
+ if (i + 4 < base64.length() || base64[i + 3] != pad) {
+ return Binary::fromSpan(nullptr, 0);
+ }
+ }
+
+ result.push_back((a << 2) | (b >> 4));
+ if (base64[i + 2] != '=') result.push_back((0xFF & (b << 4)) | (c >> 2));
+ if (base64[i + 3] != '=') result.push_back((0xFF & (c << 6)) | d);
+ }
+ *success = true;
+ return Binary(std::make_shared<std::vector<uint8_t>>(std::move(result)));
+}
+} // namespace protocol
+
v8::Local<v8::String> toV8String(v8::Isolate* isolate, const String16& string) {
if (string.isEmpty()) return v8::String::Empty(isolate);
DCHECK_GT(v8::String::kMaxLength, string.length());
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 50d3614e54..1b5f9e34ab 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -41,19 +41,14 @@ class StringUtil {
};
// A read-only sequence of uninterpreted bytes with reference-counted storage.
-// Though the templates for generating the protocol bindings reference
-// this type, js_protocol.pdl doesn't have a field of type 'binary', so
-// therefore it's unnecessary to provide an implementation here.
-class Binary {
+class V8_EXPORT Binary {
public:
Binary() = default;
const uint8_t* data() const { return bytes_->data(); }
size_t size() const { return bytes_->size(); }
- String toBase64() const { UNIMPLEMENTED(); }
- static Binary fromBase64(const String& base64, bool* success) {
- UNIMPLEMENTED();
- }
+ String toBase64() const;
+ static Binary fromBase64(const String& base64, bool* success);
static Binary fromSpan(const uint8_t* data, size_t size) {
return Binary(std::make_shared<std::vector<uint8_t>>(data, data + size));
}
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index ec7709f8c6..4fd33e346a 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -783,15 +783,11 @@ static bool isCommandLineAPIGetter(const String16& name) {
void V8Console::CommandLineAPIScope::accessorGetterCallback(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- CommandLineAPIScope* scope = static_cast<CommandLineAPIScope*>(
- info.Data().As<v8::External>()->Value());
- DCHECK(scope);
-
+ CommandLineAPIScope* scope = *static_cast<CommandLineAPIScope**>(
+ info.Data().As<v8::ArrayBuffer>()->GetBackingStore()->Data());
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
- if (scope->m_cleanup) {
- bool removed = info.Holder()->Delete(context, name).FromMaybe(false);
- DCHECK(removed);
- USE(removed);
+ if (scope == nullptr) {
+ USE(info.Holder()->Delete(context, name).FromMaybe(false));
return;
}
v8::Local<v8::Object> commandLineAPI = scope->m_commandLineAPI;
@@ -815,16 +811,14 @@ void V8Console::CommandLineAPIScope::accessorGetterCallback(
void V8Console::CommandLineAPIScope::accessorSetterCallback(
v8::Local<v8::Name> name, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- CommandLineAPIScope* scope = static_cast<CommandLineAPIScope*>(
- info.Data().As<v8::External>()->Value());
+ CommandLineAPIScope* scope = *static_cast<CommandLineAPIScope**>(
+ info.Data().As<v8::ArrayBuffer>()->GetBackingStore()->Data());
+ if (scope == nullptr) return;
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
if (!info.Holder()->Delete(context, name).FromMaybe(false)) return;
if (!info.Holder()->CreateDataProperty(context, name, value).FromMaybe(false))
return;
- bool removed =
- scope->m_installedMethods->Delete(context, name).FromMaybe(false);
- DCHECK(removed);
- USE(removed);
+ USE(scope->m_installedMethods->Delete(context, name).FromMaybe(false));
}
V8Console::CommandLineAPIScope::CommandLineAPIScope(
@@ -833,14 +827,15 @@ V8Console::CommandLineAPIScope::CommandLineAPIScope(
: m_context(context),
m_commandLineAPI(commandLineAPI),
m_global(global),
- m_installedMethods(v8::Set::New(context->GetIsolate())),
- m_cleanup(false) {
+ m_installedMethods(v8::Set::New(context->GetIsolate())) {
v8::MicrotasksScope microtasksScope(context->GetIsolate(),
v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::Local<v8::Array> names;
if (!m_commandLineAPI->GetOwnPropertyNames(context).ToLocal(&names)) return;
- v8::Local<v8::External> externalThis =
- v8::External::New(context->GetIsolate(), this);
+ m_thisReference =
+ v8::ArrayBuffer::New(context->GetIsolate(), sizeof(CommandLineAPIScope*));
+ *static_cast<CommandLineAPIScope**>(
+ m_thisReference->GetBackingStore()->Data()) = this;
for (uint32_t i = 0; i < names->Length(); ++i) {
v8::Local<v8::Value> name;
if (!names->Get(context, i).ToLocal(&name) || !name->IsName()) continue;
@@ -851,7 +846,7 @@ V8Console::CommandLineAPIScope::CommandLineAPIScope(
->SetAccessor(context, v8::Local<v8::Name>::Cast(name),
CommandLineAPIScope::accessorGetterCallback,
CommandLineAPIScope::accessorSetterCallback,
- externalThis, v8::DEFAULT, v8::DontEnum,
+ m_thisReference, v8::DEFAULT, v8::DontEnum,
v8::SideEffectType::kHasNoSideEffect)
.FromMaybe(false)) {
bool removed = m_installedMethods->Delete(context, name).FromMaybe(false);
@@ -865,7 +860,8 @@ V8Console::CommandLineAPIScope::CommandLineAPIScope(
V8Console::CommandLineAPIScope::~CommandLineAPIScope() {
v8::MicrotasksScope microtasksScope(m_context->GetIsolate(),
v8::MicrotasksScope::kDoNotRunMicrotasks);
- m_cleanup = true;
+ *static_cast<CommandLineAPIScope**>(
+ m_thisReference->GetBackingStore()->Data()) = nullptr;
v8::Local<v8::Array> names = m_installedMethods->AsArray();
for (uint32_t i = 0; i < names->Length(); ++i) {
v8::Local<v8::Value> name;
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index 4d38c51a2a..5875164595 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -42,7 +42,7 @@ class V8Console : public v8::debug::ConsoleDelegate {
v8::Local<v8::Object> m_commandLineAPI;
v8::Local<v8::Object> m_global;
v8::Local<v8::Set> m_installedMethods;
- bool m_cleanup;
+ v8::Local<v8::ArrayBuffer> m_thisReference;
DISALLOW_COPY_AND_ASSIGN(CommandLineAPIScope);
};
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 2e5c8cd417..afefd4e14c 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-inspector.h"
#include "src/base/safe_conversions.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
@@ -25,8 +26,6 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
using protocol::Array;
@@ -1150,6 +1149,55 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
result, exceptionDetails);
}
+Response V8DebuggerAgentImpl::executeWasmEvaluator(
+ const String16& callFrameId, const protocol::Binary& evaluator,
+ Maybe<double> timeout,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
+ if (!v8::debug::StackTraceIterator::SupportsWasmDebugEvaluate()) {
+ return Response::ServerError(
+ "--wasm-expose-debug-eval is required to execte evaluator modules");
+ }
+ if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
+ InjectedScript::CallFrameScope scope(m_session, callFrameId);
+ Response response = scope.initialize();
+ if (!response.IsSuccess()) return response;
+
+ int frameOrdinal = static_cast<int>(scope.frameOrdinal());
+ std::unique_ptr<v8::debug::StackTraceIterator> it =
+ v8::debug::StackTraceIterator::Create(m_isolate, frameOrdinal);
+ if (it->Done()) {
+ return Response::ServerError("Could not find call frame with given id");
+ }
+ if (!it->GetScript()->IsWasm()) {
+ return Response::ServerError(
+ "executeWasmEvaluator can only be called on WebAssembly frames");
+ }
+
+ v8::MaybeLocal<v8::Value> maybeResultValue;
+ {
+ V8InspectorImpl::EvaluateScope evaluateScope(scope);
+ if (timeout.isJust()) {
+ response = evaluateScope.setTimeout(timeout.fromJust() / 1000.0);
+ if (!response.IsSuccess()) return response;
+ }
+ v8::MaybeLocal<v8::String> eval_result =
+ it->EvaluateWasm({evaluator.data(), evaluator.size()}, frameOrdinal);
+ if (!eval_result.IsEmpty()) maybeResultValue = eval_result.ToLocalChecked();
+ }
+
+ // Re-initialize after running client's code, as it could have destroyed
+ // context or session.
+ response = scope.initialize();
+ if (!response.IsSuccess()) return response;
+
+ String16 object_group = "";
+ InjectedScript* injected_script = scope.injectedScript();
+ return injected_script->wrapEvaluateResult(maybeResultValue, scope.tryCatch(),
+ object_group, WrapMode::kNoPreview,
+ result, exceptionDetails);
+}
+
Response V8DebuggerAgentImpl::setVariableValue(
int scopeNumber, const String16& variableName,
std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
@@ -1423,6 +1471,39 @@ static String16 getScriptLanguage(const V8DebuggerScript& script) {
}
}
+static const char* getDebugSymbolTypeName(
+ v8::debug::WasmScript::DebugSymbolsType type) {
+ switch (type) {
+ case v8::debug::WasmScript::DebugSymbolsType::None:
+ return v8_inspector::protocol::Debugger::DebugSymbols::TypeEnum::None;
+ case v8::debug::WasmScript::DebugSymbolsType::SourceMap:
+ return v8_inspector::protocol::Debugger::DebugSymbols::TypeEnum::
+ SourceMap;
+ case v8::debug::WasmScript::DebugSymbolsType::EmbeddedDWARF:
+ return v8_inspector::protocol::Debugger::DebugSymbols::TypeEnum::
+ EmbeddedDWARF;
+ case v8::debug::WasmScript::DebugSymbolsType::ExternalDWARF:
+ return v8_inspector::protocol::Debugger::DebugSymbols::TypeEnum::
+ ExternalDWARF;
+ }
+}
+
+static std::unique_ptr<protocol::Debugger::DebugSymbols> getDebugSymbols(
+ const V8DebuggerScript& script) {
+ v8::debug::WasmScript::DebugSymbolsType type;
+ if (!script.getDebugSymbolsType().To(&type)) return {};
+
+ std::unique_ptr<protocol::Debugger::DebugSymbols> debugSymbols =
+ v8_inspector::protocol::Debugger::DebugSymbols::create()
+ .setType(getDebugSymbolTypeName(type))
+ .build();
+ String16 externalUrl;
+ if (script.getExternalDebugSymbolsURL().To(&externalUrl)) {
+ debugSymbols->setExternalURL(externalUrl);
+ }
+ return debugSymbols;
+}
+
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
v8::HandleScope handles(m_isolate);
@@ -1458,6 +1539,8 @@ void V8DebuggerAgentImpl::didParseSource(
script->getLanguage() == V8DebuggerScript::Language::JavaScript
? Maybe<int>()
: script->codeOffset();
+ std::unique_ptr<protocol::Debugger::DebugSymbols> debugSymbols =
+ getDebugSymbols(*script);
m_scripts[scriptId] = std::move(script);
// Release the strong reference to get notified when debugger is the only
@@ -1505,8 +1588,8 @@ void V8DebuggerAgentImpl::didParseSource(
scriptId, scriptURL, 0, 0, 0, 0, contextId, scriptRef->hash(),
std::move(executionContextAuxDataParam), isLiveEditParam,
std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam, 0,
- std::move(stackTrace), std::move(codeOffset),
- std::move(scriptLanguage));
+ std::move(stackTrace), std::move(codeOffset), std::move(scriptLanguage),
+ std::move(debugSymbols));
} else {
m_frontend.scriptParsed(
scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
@@ -1514,7 +1597,8 @@ void V8DebuggerAgentImpl::didParseSource(
scriptRef->hash(), std::move(executionContextAuxDataParam),
isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
isModuleParam, scriptRef->length(), std::move(stackTrace),
- std::move(codeOffset), std::move(scriptLanguage));
+ std::move(codeOffset), std::move(scriptLanguage),
+ std::move(debugSymbols));
}
std::vector<protocol::DictionaryValue*> potentialBreakpoints;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index df719a4fa3..13ca1624b7 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -113,6 +113,11 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Maybe<double> timeout,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>*) override;
+ Response executeWasmEvaluator(
+ const String16& callFrameId, const protocol::Binary& evaluator,
+ Maybe<double> timeout,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result,
+ Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) override;
Response setVariableValue(
int scopeNumber, const String16& variableName,
std::unique_ptr<protocol::Runtime::CallArgument> newValue,
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 551beb242b..7905341481 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -122,6 +122,21 @@ class ActualScript : public V8DebuggerScript {
return v8::Just(v8::debug::WasmScript::Cast(*script)->Bytecode());
}
Language getLanguage() const override { return m_language; }
+ v8::Maybe<v8::debug::WasmScript::DebugSymbolsType> getDebugSymbolsType()
+ const override {
+ auto script = this->script();
+ if (!script->IsWasm())
+ return v8::Nothing<v8::debug::WasmScript::DebugSymbolsType>();
+ return v8::Just(v8::debug::WasmScript::Cast(*script)->GetDebugSymbolType());
+ }
+ v8::Maybe<String16> getExternalDebugSymbolsURL() const override {
+ auto script = this->script();
+ if (!script->IsWasm()) return v8::Nothing<String16>();
+ v8::MemorySpan<const char> external_url =
+ v8::debug::WasmScript::Cast(*script)->ExternalSymbolsURL();
+ if (external_url.size() == 0) return v8::Nothing<String16>();
+ return v8::Just(String16(external_url.data(), external_url.size()));
+ }
int startLine() const override { return m_startLine; }
int startColumn() const override { return m_startColumn; }
int endLine() const override { return m_endLine; }
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index d4ad784394..f47f81a049 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -61,6 +61,9 @@ class V8DebuggerScript {
virtual String16 source(size_t pos, size_t len = UINT_MAX) const = 0;
virtual v8::Maybe<v8::MemorySpan<const uint8_t>> wasmBytecode() const = 0;
virtual Language getLanguage() const = 0;
+ virtual v8::Maybe<String16> getExternalDebugSymbolsURL() const = 0;
+ virtual v8::Maybe<v8::debug::WasmScript::DebugSymbolsType>
+ getDebugSymbolsType() const = 0;
virtual const String16& hash() const = 0;
virtual int startLine() const = 0;
virtual int startColumn() const = 0;
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index f9457fd1ec..78e7417b8f 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -1602,6 +1602,10 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
if (value->IsSymbol()) {
return std::make_unique<SymbolMirror>(value.As<v8::Symbol>());
}
+ if (v8::debug::WasmValue::IsWasmValue(value)) {
+ // TODO(v8:10347) WasmValue is not created anywhere yet.
+ UNIMPLEMENTED();
+ }
auto clientSubtype = (value->IsUndefined() || value->IsObject())
? clientFor(context)->valueSubtype(value)
: nullptr;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 18a2fc9913..4a1c045927 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -1040,10 +1040,10 @@ static bool IsInEagerLiterals(
#endif // DEBUG
BytecodeGenerator::BytecodeGenerator(
- UnoptimizedCompilationInfo* info,
+ Zone* compile_zone, UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
std::vector<FunctionLiteral*>* eager_inner_literals)
- : zone_(info->zone()),
+ : zone_(compile_zone),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
info->SourcePositionRecordingMode()),
@@ -1332,7 +1332,7 @@ void BytecodeGenerator::GenerateBytecodeBody() {
if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
// Emit type profile call.
- if (info()->collect_type_profile()) {
+ if (info()->flags().collect_type_profile()) {
feedback_spec()->AddTypeProfileSlot();
int num_parameters = closure_scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
@@ -2134,7 +2134,7 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
DCHECK(expr->scope()->outer_scope() == current_scope());
uint8_t flags = CreateClosureFlags::Encode(
expr->pretenure(), closure_scope()->is_function_scope(),
- info()->might_always_opt());
+ info()->flags().might_always_opt());
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
builder()->CreateClosure(entry, GetCachedCreateClosureSlot(expr), flags);
function_literals_.push_back(std::make_pair(expr, entry));
@@ -3197,7 +3197,7 @@ void BytecodeGenerator::BuildReturn(int source_position) {
builder()->StoreAccumulatorInRegister(result).CallRuntime(
Runtime::kTraceExit, result);
}
- if (info()->collect_type_profile()) {
+ if (info()->flags().collect_type_profile()) {
builder()->CollectTypeProfile(info()->literal()->return_position());
}
builder()->SetReturnPosition(source_position, info()->literal());
@@ -3538,16 +3538,15 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
// In pseudo-code, this builds:
//
// if (!done) {
-// let method = iterator.return
-// if (method !== null && method !== undefined) {
-// try {
-// if (typeof(method) !== "function") throw TypeError
+// try {
+// let method = iterator.return
+// if (method !== null && method !== undefined) {
// let return_val = method.call(iterator)
// if (!%IsObject(return_val)) throw TypeError
-// } catch (e) {
-// if (iteration_continuation != RETHROW)
-// rethrow e
// }
+// } catch (e) {
+// if (iteration_continuation != RETHROW)
+// rethrow e
// }
// }
//
@@ -3562,44 +3561,24 @@ void BytecodeGenerator::BuildFinalizeIteration(
builder()->LoadAccumulatorWithRegister(done).JumpIfTrue(
ToBooleanMode::kConvertToBoolean, iterator_is_done.New());
- // method = iterator.return
- // if (method !== null && method !== undefined) {
- Register method = register_allocator()->NewRegister();
- builder()
- ->LoadNamedProperty(iterator.object(),
- ast_string_constants()->return_string(),
- feedback_index(feedback_spec()->AddLoadICSlot()))
- .StoreAccumulatorInRegister(method)
- .JumpIfUndefinedOrNull(iterator_is_done.New());
-
{
RegisterAllocationScope register_scope(this);
BuildTryCatch(
// try {
- // if (typeof(method) !== "function") throw TypeError
- // let return_val = method.call(iterator)
- // if (!%IsObject(return_val)) throw TypeError
+ // let method = iterator.return
+ // if (method !== null && method !== undefined) {
+ // let return_val = method.call(iterator)
+ // if (!%IsObject(return_val)) throw TypeError
+ // }
// }
[&]() {
- BytecodeLabel if_callable;
+ Register method = register_allocator()->NewRegister();
builder()
- ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
- {
- // throw %NewTypeError(kReturnMethodNotCallable)
- RegisterAllocationScope register_scope(this);
- RegisterList new_type_error_args =
- register_allocator()->NewRegisterList(2);
- builder()
- ->LoadLiteral(
- Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
- .StoreAccumulatorInRegister(new_type_error_args[0])
- .LoadLiteral(ast_string_constants()->empty_string())
- .StoreAccumulatorInRegister(new_type_error_args[1])
- .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
- .Throw();
- }
- builder()->Bind(&if_callable);
+ ->LoadNamedProperty(
+ iterator.object(), ast_string_constants()->return_string(),
+ feedback_index(feedback_spec()->AddLoadICSlot()))
+ .JumpIfUndefinedOrNull(iterator_is_done.New())
+ .StoreAccumulatorInRegister(method);
RegisterList args(iterator.object());
builder()->CallProperty(
@@ -4142,9 +4121,23 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
}
}
- BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
+ BinaryOperation* binop = expr->binary_operation();
FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
- if (expr->value()->IsSmiLiteral()) {
+ BytecodeLabel short_circuit;
+ if (binop->op() == Token::NULLISH) {
+ BytecodeLabel nullish;
+ builder()
+ ->JumpIfUndefinedOrNull(&nullish)
+ .Jump(&short_circuit)
+ .Bind(&nullish);
+ VisitForAccumulatorValue(expr->value());
+ } else if (binop->op() == Token::OR) {
+ builder()->JumpIfTrue(ToBooleanMode::kConvertToBoolean, &short_circuit);
+ VisitForAccumulatorValue(expr->value());
+ } else if (binop->op() == Token::AND) {
+ builder()->JumpIfFalse(ToBooleanMode::kConvertToBoolean, &short_circuit);
+ VisitForAccumulatorValue(expr->value());
+ } else if (expr->value()->IsSmiLiteral()) {
builder()->BinaryOperationSmiLiteral(
binop->op(), expr->value()->AsLiteral()->AsSmiLiteral(),
feedback_index(slot));
@@ -4154,9 +4147,9 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
VisitForAccumulatorValue(expr->value());
builder()->BinaryOperation(binop->op(), old_value, feedback_index(slot));
}
-
builder()->SetExpressionPosition(expr);
BuildAssignment(lhs_data, expr->op(), expr->lookup_hoisting_mode());
+ builder()->Bind(&short_circuit);
}
// Suspends the generator to resume at the next suspend_id, with output stored
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index aa461d523c..2b11e721fa 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -32,7 +32,7 @@ class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
explicit BytecodeGenerator(
- UnoptimizedCompilationInfo* info,
+ Zone* zone, UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
std::vector<FunctionLiteral*>* eager_inner_literals);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index eaea1c91dd..49adee5bf7 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -832,149 +832,24 @@ TNode<Object> InterpreterAssembler::Construct(
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
TVARIABLE(Object, var_result);
TVARIABLE(AllocationSite, var_site);
- Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
- construct(this), construct_array(this, &var_site);
- GotoIf(IsUndefined(maybe_feedback_vector), &construct);
-
- TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
-
- // Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
+ Label return_result(this), construct_generic(this),
+ construct_array(this, &var_site);
- // Check if we have monomorphic {new_target} feedback already.
- TNode<MaybeObject> feedback =
- LoadFeedbackVectorSlot(feedback_vector, slot_id);
- Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
- &extra_checks);
+ CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
+ slot_id, &construct_generic, &construct_array,
+ &var_site);
- BIND(&extra_checks);
+ BIND(&construct_generic);
{
- Label check_allocation_site(this), check_initialized(this),
- initialize(this), mark_megamorphic(this);
-
- // Check if it is a megamorphic {new_target}..
- Comment("check if megamorphic");
- TNode<BoolT> is_megamorphic = TaggedEqual(
- feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
- GotoIf(is_megamorphic, &construct);
-
- Comment("check if weak reference");
- GotoIfNot(IsWeakOrCleared(feedback), &check_allocation_site);
-
- // If the weak reference is cleared, we have a new chance to become
- // monomorphic.
- Comment("check if weak reference is cleared");
- Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
-
- BIND(&check_allocation_site);
- {
- // Check if it is an AllocationSite.
- Comment("check if allocation site");
- TNode<HeapObject> strong_feedback = CAST(feedback);
- GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
-
- // Make sure that {target} and {new_target} are the Array constructor.
- TNode<Object> array_function = LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
- GotoIfNot(TaggedEqual(target, array_function), &mark_megamorphic);
- GotoIfNot(TaggedEqual(new_target, array_function), &mark_megamorphic);
- var_site = CAST(strong_feedback);
- Goto(&construct_array);
- }
-
- BIND(&check_initialized);
- {
- // Check if it is uninitialized.
- Comment("check if uninitialized");
- TNode<BoolT> is_uninitialized =
- TaggedEqual(feedback, UninitializedSymbolConstant());
- Branch(is_uninitialized, &initialize, &mark_megamorphic);
- }
-
- BIND(&initialize);
- {
- Comment("check if function in same native context");
- GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
- // Check if the {new_target} is a JSFunction or JSBoundFunction
- // in the current native context.
- TVARIABLE(HeapObject, var_current, CAST(new_target));
- Label loop(this, &var_current), done_loop(this);
- Goto(&loop);
- BIND(&loop);
- {
- Label if_boundfunction(this), if_function(this);
- TNode<HeapObject> current = var_current.value();
- TNode<Uint16T> current_instance_type = LoadInstanceType(current);
- GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
- &if_boundfunction);
- Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
- &if_function, &mark_megamorphic);
-
- BIND(&if_function);
- {
- // Check that the JSFunction {current} is in the current native
- // context.
- TNode<Context> current_context =
- CAST(LoadObjectField(current, JSFunction::kContextOffset));
- TNode<NativeContext> current_native_context =
- LoadNativeContext(current_context);
- Branch(
- TaggedEqual(LoadNativeContext(context), current_native_context),
- &done_loop, &mark_megamorphic);
- }
-
- BIND(&if_boundfunction);
- {
- // Continue with the [[BoundTargetFunction]] of {current}.
- var_current = LoadObjectField<HeapObject>(
- current, JSBoundFunction::kBoundTargetFunctionOffset);
- Goto(&loop);
- }
- }
- BIND(&done_loop);
-
- // Create an AllocationSite if {target} and {new_target} refer
- // to the current native context's Array constructor.
- Label create_allocation_site(this), store_weak_reference(this);
- GotoIfNot(TaggedEqual(target, new_target), &store_weak_reference);
- TNode<Object> array_function = LoadContextElement(
- LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
- Branch(TaggedEqual(target, array_function), &create_allocation_site,
- &store_weak_reference);
-
- BIND(&create_allocation_site);
- {
- var_site =
- CreateAllocationSiteInFeedbackVector(feedback_vector, slot_id);
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Construct:CreateAllocationSite");
- Goto(&construct_array);
- }
-
- BIND(&store_weak_reference);
- {
- StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
- CAST(new_target));
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Construct:StoreWeakReference");
- Goto(&construct);
- }
- }
-
- BIND(&mark_megamorphic);
- {
- // MegamorphicSentinel is an immortal immovable object so
- // write-barrier is not needed.
- Comment("transition to megamorphic");
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
- StoreFeedbackVectorSlot(
- feedback_vector, slot_id,
- HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
- SKIP_WRITE_BARRIER);
- ReportFeedbackUpdate(feedback_vector, slot_id,
- "Construct:TransitionMegamorphic");
- Goto(&construct);
- }
+ // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
+ Comment("call using Construct builtin");
+ Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
+ isolate(), InterpreterPushArgsMode::kOther);
+ TNode<Code> code_target = HeapConstant(callable.code());
+ var_result = CallStub(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(), target,
+ new_target, UndefinedConstant());
+ Goto(&return_result);
}
BIND(&construct_array);
@@ -991,19 +866,6 @@ TNode<Object> InterpreterAssembler::Construct(
Goto(&return_result);
}
- BIND(&construct);
- {
- // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
- Comment("call using Construct builtin");
- Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
- isolate(), InterpreterPushArgsMode::kOther);
- TNode<Code> code_target = HeapConstant(callable.code());
- var_result = CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, UndefinedConstant());
- Goto(&return_result);
- }
-
BIND(&return_result);
return var_result.value();
}
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 6b8b7135e0..9fef9ac0a0 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -1902,17 +1902,7 @@ IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- Label feedback_done(this);
- GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
-
- // Record feedback for the {callable} in the {feedback_vector}.
- CollectCallableFeedback(callable, context, CAST(maybe_feedback_vector),
- slot_id,
- CallableFeedbackMode::kDontCollectFeedbackCell);
- Goto(&feedback_done);
-
- BIND(&feedback_done);
- // Perform the actual instanceof operation.
+ CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot_id);
SetAccumulator(InstanceOf(object, callable, context));
Dispatch();
}
@@ -2811,7 +2801,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
BIND(&if_duplicate_parameters);
{
TNode<Object> result =
- CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
+ CallRuntime(Runtime::kNewSloppyArguments, context, closure);
SetAccumulator(result);
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 805b677357..42f0c56162 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -149,10 +149,10 @@ InterpreterCompilationJob::InterpreterCompilationJob(
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals)
: UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
- &compilation_info_),
+ &compilation_info_, CanOffThreadFinalize::kYes),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
- generator_(&compilation_info_, parse_info->ast_string_constants(),
+ generator_(&zone_, &compilation_info_, parse_info->ast_string_constants(),
eager_inner_literals) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
@@ -191,17 +191,18 @@ void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
std::cerr << "Bytecode mismatch";
#ifdef OBJECT_PRINT
std::cerr << " found for function: ";
- Handle<String> name = parse_info()->function_name()->string();
- if (name->length() == 0) {
- std::cerr << "anonymous";
- } else {
+ MaybeHandle<String> maybe_name = parse_info()->literal()->GetName(isolate);
+ Handle<String> name;
+ if (maybe_name.ToHandle(&name) && name->length() != 0) {
name->StringPrint(std::cerr);
+ } else {
+ std::cerr << "anonymous";
}
Object script_name = script->GetNameOrSourceURL();
if (script_name.IsString()) {
std::cerr << " ";
String::cast(script_name).StringPrint(std::cerr);
- std::cerr << ":" << parse_info()->start_position();
+ std::cerr << ":" << parse_info()->literal()->start_position();
}
#endif
std::cerr << "\nOriginal bytecode:\n";
@@ -286,7 +287,7 @@ Interpreter::NewSourcePositionCollectionJob(
auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
diff --git a/deps/v8/src/libplatform/default-job.cc b/deps/v8/src/libplatform/default-job.cc
new file mode 100644
index 0000000000..7f750f185f
--- /dev/null
+++ b/deps/v8/src/libplatform/default-job.cc
@@ -0,0 +1,150 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-job.h"
+
+namespace v8 {
+namespace platform {
+
+DefaultJobState::~DefaultJobState() { DCHECK_EQ(0U, active_workers_); }
+
+void DefaultJobState::NotifyConcurrencyIncrease() {
+ if (is_canceled_.load(std::memory_order_relaxed)) return;
+
+ size_t num_tasks_to_post = 0;
+ {
+ base::MutexGuard guard(&mutex_);
+ const size_t max_concurrency = CappedMaxConcurrency();
+ // Consider |pending_tasks_| to avoid posting too many tasks.
+ if (max_concurrency > (active_workers_ + pending_tasks_)) {
+ num_tasks_to_post = max_concurrency - active_workers_ - pending_tasks_;
+ pending_tasks_ += num_tasks_to_post;
+ }
+ }
+ // Post additional worker tasks to reach |max_concurrency|.
+ for (size_t i = 0; i < num_tasks_to_post; ++i) {
+ CallOnWorkerThread(std::make_unique<DefaultJobWorker>(shared_from_this(),
+ job_task_.get()));
+ }
+}
+
+void DefaultJobState::Join() {
+ bool can_run = false;
+ {
+ base::MutexGuard guard(&mutex_);
+ priority_ = TaskPriority::kUserBlocking;
+ // Reserve a worker for the joining thread. GetMaxConcurrency() is ignored
+ // here, but WaitForParticipationOpportunityLockRequired() waits for
+ // workers to return if necessary so we don't exceed GetMaxConcurrency().
+ num_worker_threads_ = platform_->NumberOfWorkerThreads() + 1;
+ ++active_workers_;
+ can_run = WaitForParticipationOpportunityLockRequired();
+ }
+ while (can_run) {
+ job_task_->Run(this);
+ base::MutexGuard guard(&mutex_);
+ can_run = WaitForParticipationOpportunityLockRequired();
+ }
+}
+
+void DefaultJobState::CancelAndWait() {
+ {
+ base::MutexGuard guard(&mutex_);
+ is_canceled_.store(true, std::memory_order_relaxed);
+ while (active_workers_ > 0) {
+ worker_released_condition_.Wait(&mutex_);
+ }
+ }
+}
+
+bool DefaultJobState::CanRunFirstTask() {
+ base::MutexGuard guard(&mutex_);
+ --pending_tasks_;
+ if (is_canceled_.load(std::memory_order_relaxed)) return false;
+ if (active_workers_ >=
+ std::min(job_task_->GetMaxConcurrency(), num_worker_threads_)) {
+ return false;
+ }
+ // Acquire current worker.
+ ++active_workers_;
+ return true;
+}
+
+bool DefaultJobState::DidRunTask() {
+ size_t num_tasks_to_post = 0;
+ {
+ base::MutexGuard guard(&mutex_);
+ const size_t max_concurrency = CappedMaxConcurrency();
+ if (is_canceled_.load(std::memory_order_relaxed) ||
+ active_workers_ > max_concurrency) {
+ // Release current worker and notify.
+ --active_workers_;
+ worker_released_condition_.NotifyOne();
+ return false;
+ }
+ // Consider |pending_tasks_| to avoid posting too many tasks.
+ if (max_concurrency > active_workers_ + pending_tasks_) {
+ num_tasks_to_post = max_concurrency - active_workers_ - pending_tasks_;
+ pending_tasks_ += num_tasks_to_post;
+ }
+ }
+ // Post additional worker tasks to reach |max_concurrency| in the case that
+ // max concurrency increased. This is not strictly necessary, since
+ // NotifyConcurrencyIncrease() should eventually be invoked. However, some
+ // users of PostJob() batch work and tend to call NotifyConcurrencyIncrease()
+ // late. Posting here allows us to spawn new workers sooner.
+ for (size_t i = 0; i < num_tasks_to_post; ++i) {
+ CallOnWorkerThread(std::make_unique<DefaultJobWorker>(shared_from_this(),
+ job_task_.get()));
+ }
+ return true;
+}
+
+bool DefaultJobState::WaitForParticipationOpportunityLockRequired() {
+ size_t max_concurrency = CappedMaxConcurrency();
+ while (active_workers_ > max_concurrency && active_workers_ > 1) {
+ worker_released_condition_.Wait(&mutex_);
+ max_concurrency = CappedMaxConcurrency();
+ }
+ if (active_workers_ <= max_concurrency) return true;
+ DCHECK_EQ(1U, active_workers_);
+ DCHECK_EQ(0U, max_concurrency);
+ active_workers_ = 0;
+ is_canceled_.store(true, std::memory_order_relaxed);
+ return false;
+}
+
+size_t DefaultJobState::CappedMaxConcurrency() const {
+ return std::min(job_task_->GetMaxConcurrency(), num_worker_threads_);
+}
+
+void DefaultJobState::CallOnWorkerThread(std::unique_ptr<Task> task) {
+ switch (priority_) {
+ case TaskPriority::kBestEffort:
+ return platform_->CallLowPriorityTaskOnWorkerThread(std::move(task));
+ case TaskPriority::kUserVisible:
+ return platform_->CallOnWorkerThread(std::move(task));
+ case TaskPriority::kUserBlocking:
+ return platform_->CallBlockingTaskOnWorkerThread(std::move(task));
+ }
+}
+
+DefaultJobHandle::DefaultJobHandle(std::shared_ptr<DefaultJobState> state)
+ : state_(std::move(state)) {
+ state_->NotifyConcurrencyIncrease();
+}
+
+DefaultJobHandle::~DefaultJobHandle() { DCHECK_EQ(nullptr, state_); }
+
+void DefaultJobHandle::Join() {
+ state_->Join();
+ state_ = nullptr;
+}
+void DefaultJobHandle::Cancel() {
+ state_->CancelAndWait();
+ state_ = nullptr;
+}
+
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/default-job.h b/deps/v8/src/libplatform/default-job.h
new file mode 100644
index 0000000000..2eb00cbd74
--- /dev/null
+++ b/deps/v8/src/libplatform/default-job.h
@@ -0,0 +1,126 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_DEFAULT_JOB_H_
+#define V8_LIBPLATFORM_DEFAULT_JOB_H_
+
+#include <atomic>
+#include <memory>
+
+#include "include/libplatform/libplatform-export.h"
+#include "include/v8-platform.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace platform {
+
+class V8_PLATFORM_EXPORT DefaultJobState
+ : NON_EXPORTED_BASE(public JobDelegate),
+ public std::enable_shared_from_this<DefaultJobState> {
+ public:
+ DefaultJobState(Platform* platform, std::unique_ptr<JobTask> job_task,
+ TaskPriority priority, size_t num_worker_threads)
+ : platform_(platform),
+ job_task_(std::move(job_task)),
+ priority_(priority),
+ num_worker_threads_(num_worker_threads) {}
+ virtual ~DefaultJobState();
+
+ void NotifyConcurrencyIncrease() override;
+ bool ShouldYield() override {
+ // Thread-safe but may return an outdated result.
+ return is_canceled_.load(std::memory_order_relaxed);
+ }
+
+ void Join();
+ void CancelAndWait();
+
+ // Must be called before running |job_task_| for the first time. If it returns
+ // true, then the worker thread must contribute and must call DidRunTask(), or
+ // false if it should return.
+ bool CanRunFirstTask();
+ // Must be called after running |job_task_|. Returns true if the worker thread
+ // must contribute again, or false if it should return.
+ bool DidRunTask();
+
+ private:
+ // Called from the joining thread. Waits for the worker count to be below or
+ // equal to max concurrency (will happen when a worker calls
+ // DidRunTask()). Returns true if the joining thread should run a task, or
+ // false if joining was completed and all other workers returned because
+ // there's no work remaining.
+ bool WaitForParticipationOpportunityLockRequired();
+
+ // Returns GetMaxConcurrency() capped by the number of threads used by this
+ // job.
+ size_t CappedMaxConcurrency() const;
+
+ void CallOnWorkerThread(std::unique_ptr<Task> task);
+
+ Platform* const platform_;
+ std::unique_ptr<JobTask> job_task_;
+
+ // All members below are protected by |mutex_|.
+ base::Mutex mutex_;
+ TaskPriority priority_;
+ // Number of workers running this job.
+ size_t active_workers_ = 0;
+ // Number of posted tasks that aren't running this job yet.
+ size_t pending_tasks_ = 0;
+ // Indicates if the job is canceled.
+ std::atomic_bool is_canceled_{false};
+ // Number of worker threads available to schedule the worker task.
+ size_t num_worker_threads_;
+ // Signaled when a worker returns.
+ base::ConditionVariable worker_released_condition_;
+};
+
+class V8_PLATFORM_EXPORT DefaultJobHandle : public JobHandle {
+ public:
+ explicit DefaultJobHandle(std::shared_ptr<DefaultJobState> state);
+ ~DefaultJobHandle() override;
+
+ void NotifyConcurrencyIncrease() override {
+ state_->NotifyConcurrencyIncrease();
+ }
+
+ void Join() override;
+ void Cancel() override;
+ bool IsRunning() override { return state_ != nullptr; }
+
+ private:
+ std::shared_ptr<DefaultJobState> state_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultJobHandle);
+};
+
+class DefaultJobWorker : public Task {
+ public:
+ DefaultJobWorker(std::weak_ptr<DefaultJobState> state, JobTask* job_task)
+ : state_(std::move(state)), job_task_(job_task) {}
+ ~DefaultJobWorker() override = default;
+
+ void Run() override {
+ auto shared_state = state_.lock();
+ if (!shared_state) return;
+ if (!shared_state->CanRunFirstTask()) return;
+ do {
+ job_task_->Run(shared_state.get());
+ } while (shared_state->DidRunTask());
+ }
+
+ private:
+ friend class DefaultJob;
+
+ std::weak_ptr<DefaultJobState> state_;
+ JobTask* job_task_;
+
+ DISALLOW_COPY_AND_ASSIGN(DefaultJobWorker);
+};
+
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_DEFAULT_JOB_H_
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index ddbbd0d4be..e380161eb0 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -15,6 +15,7 @@
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
#include "src/libplatform/default-foreground-task-runner.h"
+#include "src/libplatform/default-job.h"
#include "src/libplatform/default-worker-threads-task-runner.h"
namespace v8 {
@@ -38,11 +39,10 @@ std::unique_ptr<v8::Platform> NewDefaultPlatform(
if (in_process_stack_dumping == InProcessStackDumping::kEnabled) {
v8::base::debug::EnableInProcessStackDumping();
}
- std::unique_ptr<DefaultPlatform> platform(
- new DefaultPlatform(idle_task_support, std::move(tracing_controller)));
- platform->SetThreadPoolSize(thread_pool_size);
+ auto platform = std::make_unique<DefaultPlatform>(
+ thread_pool_size, idle_task_support, std::move(tracing_controller));
platform->EnsureBackgroundTaskRunnerInitialized();
- return std::move(platform);
+ return platform;
}
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
@@ -64,19 +64,30 @@ void SetTracingController(
std::unique_ptr<v8::TracingController>(tracing_controller));
}
-const int DefaultPlatform::kMaxThreadPoolSize = 8;
+namespace {
+constexpr int kMaxThreadPoolSize = 16;
+
+int GetActualThreadPoolSize(int thread_pool_size) {
+ DCHECK_GE(thread_pool_size, 0);
+ if (thread_pool_size < 1) {
+ thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
+ }
+ return std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
+}
+} // namespace
DefaultPlatform::DefaultPlatform(
- IdleTaskSupport idle_task_support,
+ int thread_pool_size, IdleTaskSupport idle_task_support,
std::unique_ptr<v8::TracingController> tracing_controller)
- : thread_pool_size_(0),
+ : thread_pool_size_(GetActualThreadPoolSize(thread_pool_size)),
idle_task_support_(idle_task_support),
tracing_controller_(std::move(tracing_controller)),
- page_allocator_(new v8::base::PageAllocator()),
- time_function_for_testing_(nullptr) {
+ page_allocator_(std::make_unique<v8::base::PageAllocator>()) {
if (!tracing_controller_) {
tracing::TracingController* controller = new tracing::TracingController();
+#if !defined(V8_USE_PERFETTO)
controller->Initialize(nullptr);
+#endif
tracing_controller_.reset(controller);
}
}
@@ -84,21 +95,11 @@ DefaultPlatform::DefaultPlatform(
DefaultPlatform::~DefaultPlatform() {
base::MutexGuard guard(&lock_);
if (worker_threads_task_runner_) worker_threads_task_runner_->Terminate();
- for (auto it : foreground_task_runner_map_) {
+ for (const auto& it : foreground_task_runner_map_) {
it.second->Terminate();
}
}
-void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
- base::MutexGuard guard(&lock_);
- DCHECK_GE(thread_pool_size, 0);
- if (thread_pool_size < 1) {
- thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
- }
- thread_pool_size_ =
- std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
-}
-
namespace {
double DefaultTimeFunction() {
@@ -199,6 +200,24 @@ bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) {
return idle_task_support_ == IdleTaskSupport::kEnabled;
}
+std::unique_ptr<JobHandle> DefaultPlatform::PostJob(
+ TaskPriority priority, std::unique_ptr<JobTask> job_task) {
+ size_t num_worker_threads = 0;
+ switch (priority) {
+ case TaskPriority::kUserBlocking:
+ num_worker_threads = NumberOfWorkerThreads();
+ break;
+ case TaskPriority::kUserVisible:
+ num_worker_threads = NumberOfWorkerThreads() / 2;
+ break;
+ case TaskPriority::kBestEffort:
+ num_worker_threads = 1;
+ break;
+ }
+ return std::make_unique<DefaultJobHandle>(std::make_shared<DefaultJobState>(
+ this, std::move(job_task), priority, num_worker_threads));
+}
+
double DefaultPlatform::MonotonicallyIncreasingTime() {
if (time_function_for_testing_) return time_function_for_testing_();
return DefaultTimeFunction();
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index f49277954c..d459e32490 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -5,11 +5,8 @@
#ifndef V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
#define V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
-#include <functional>
#include <map>
#include <memory>
-#include <queue>
-#include <vector>
#include "include/libplatform/libplatform-export.h"
#include "include/libplatform/libplatform.h"
@@ -32,13 +29,12 @@ class DefaultPageAllocator;
class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
public:
explicit DefaultPlatform(
+ int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
std::unique_ptr<v8::TracingController> tracing_controller = {});
~DefaultPlatform() override;
- void SetThreadPoolSize(int thread_pool_size);
-
void EnsureBackgroundTaskRunnerInitialized();
bool PumpMessageLoop(
@@ -62,6 +58,8 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
double delay_in_seconds) override;
bool IdleTasksEnabled(Isolate* isolate) override;
+ std::unique_ptr<JobHandle> PostJob(
+ TaskPriority priority, std::unique_ptr<JobTask> job_state) override;
double MonotonicallyIncreasingTime() override;
double CurrentClockTimeMillis() override;
v8::TracingController* GetTracingController() override;
@@ -69,10 +67,8 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
v8::PageAllocator* GetPageAllocator() override;
private:
- static const int kMaxThreadPoolSize;
-
base::Mutex lock_;
- int thread_pool_size_;
+ const int thread_pool_size_;
IdleTaskSupport idle_task_support_;
std::shared_ptr<DefaultWorkerThreadsTaskRunner> worker_threads_task_runner_;
std::map<v8::Isolate*, std::shared_ptr<DefaultForegroundTaskRunner>>
@@ -81,7 +77,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
std::unique_ptr<TracingController> tracing_controller_;
std::unique_ptr<PageAllocator> page_allocator_;
- TimeFunction time_function_for_testing_;
+ TimeFunction time_function_for_testing_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
deleted file mode 100644
index 60cc9a98a8..0000000000
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/json-trace-event-listener.h"
-
-#include <cmath>
-
-#include "base/trace_event/common/trace_event_common.h"
-#include "perfetto/tracing.h"
-#include "protos/perfetto/trace/chrome/chrome_trace_packet.pb.h"
-#include "protos/perfetto/trace/trace.pb.h"
-#include "src/base/logging.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-JSONTraceEventListener::JSONTraceEventListener(std::ostream* stream)
- : stream_(stream) {
- *stream_ << "{\"traceEvents\":[";
-}
-
-JSONTraceEventListener::~JSONTraceEventListener() { *stream_ << "]}"; }
-
-// TODO(petermarshall): Clean up this code which was copied from trace-writer.cc
-// once we've removed that file.
-
-// Writes the given string, taking care to escape characters when necessary.
-void JSONTraceEventListener::AppendJSONString(const char* str) {
- size_t len = strlen(str);
- *stream_ << "\"";
- for (size_t i = 0; i < len; ++i) {
- // All of the permitted escape sequences in JSON strings, as per
- // https://mathiasbynens.be/notes/javascript-escapes
- switch (str[i]) {
- case '\b':
- *stream_ << "\\b";
- break;
- case '\f':
- *stream_ << "\\f";
- break;
- case '\n':
- *stream_ << "\\n";
- break;
- case '\r':
- *stream_ << "\\r";
- break;
- case '\t':
- *stream_ << "\\t";
- break;
- case '\"':
- *stream_ << "\\\"";
- break;
- case '\\':
- *stream_ << "\\\\";
- break;
- // Note that because we use double quotes for JSON strings,
- // we don't need to escape single quotes.
- default:
- *stream_ << str[i];
- break;
- }
- }
- *stream_ << "\"";
-}
-
-void JSONTraceEventListener::AppendArgValue(
- const ::perfetto::protos::ChromeTraceEvent_Arg& arg) {
- if (arg.has_bool_value()) {
- *stream_ << (arg.bool_value() ? "true" : "false");
- } else if (arg.has_uint_value()) {
- *stream_ << arg.uint_value();
- } else if (arg.has_int_value()) {
- *stream_ << arg.int_value();
- } else if (arg.has_double_value()) {
- std::string real;
- double val = arg.double_value();
- if (std::isfinite(val)) {
- std::ostringstream convert_stream;
- convert_stream << val;
- real = convert_stream.str();
- // Ensure that the number has a .0 if there's no decimal or 'e'. This
- // makes sure that when we read the JSON back, it's interpreted as a
- // real rather than an int.
- if (real.find('.') == std::string::npos &&
- real.find('e') == std::string::npos &&
- real.find('E') == std::string::npos) {
- real += ".0";
- }
- } else if (std::isnan(val)) {
- // The JSON spec doesn't allow NaN and Infinity (since these are
- // objects in EcmaScript). Use strings instead.
- real = "\"NaN\"";
- } else if (val < 0) {
- real = "\"-Infinity\"";
- } else {
- real = "\"Infinity\"";
- }
- *stream_ << real;
- } else if (arg.has_string_value()) {
- AppendJSONString(arg.string_value().c_str());
- } else if (arg.has_pointer_value()) {
- // JSON only supports double and int numbers.
- // So as not to lose bits from a 64-bit pointer, output as a hex string.
- *stream_ << "\"0x" << std::hex << arg.pointer_value() << std::dec << "\"";
- } else if (arg.has_json_value()) {
- *stream_ << arg.json_value();
- }
- // V8 does not emit proto arguments currently.
- CHECK(!arg.has_traced_value());
-}
-
-void JSONTraceEventListener::ProcessPacket(
- const ::perfetto::protos::TracePacket& packet) {
- for (const ::perfetto::protos::ChromeTraceEvent& event :
- packet.chrome_events().trace_events()) {
- if (append_comma_) *stream_ << ",";
- append_comma_ = true;
-
- // TODO(petermarshall): Handle int64 fields differently?
- // clang-format off
- *stream_ << "{\"pid\":" << event.process_id()
- << ",\"tid\":" << event.thread_id()
- << ",\"ts\":" << event.timestamp()
- << ",\"tts\":" << event.thread_timestamp()
- << ",\"ph\":\"" << static_cast<char>(event.phase())
- << "\",\"cat\":\"" << event.category_group_name()
- << "\",\"name\":\"" << event.name()
- << "\",\"dur\":" << event.duration()
- << ",\"tdur\":" << event.thread_duration();
- // clang-format on
-
- if (event.flags() &
- (TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT)) {
- *stream_ << ",\"bind_id\":\"0x" << std::hex << event.bind_id() << "\""
- << std::dec;
- if (event.flags() & TRACE_EVENT_FLAG_FLOW_IN) {
- *stream_ << ",\"flow_in\":true";
- }
- if (event.flags() & TRACE_EVENT_FLAG_FLOW_OUT) {
- *stream_ << ",\"flow_out\":true";
- }
- }
- if (event.flags() & TRACE_EVENT_FLAG_HAS_ID) {
- if (event.has_scope()) {
- *stream_ << ",\"scope\":\"" << event.scope() << "\"";
- }
- // So as not to lose bits from a 64-bit integer, output as a hex string.
- *stream_ << ",\"id\":\"0x" << std::hex << event.id() << "\"" << std::dec;
- }
-
- *stream_ << ",\"args\":{";
-
- int i = 0;
- for (const ::perfetto::protos::ChromeTraceEvent_Arg& arg : event.args()) {
- if (i++ > 0) *stream_ << ",";
- *stream_ << "\"" << arg.name() << "\":";
- AppendArgValue(arg);
- }
- *stream_ << "}}";
- }
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
deleted file mode 100644
index a0ab31c981..0000000000
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_JSON_TRACE_EVENT_LISTENER_H_
-#define V8_LIBPLATFORM_TRACING_JSON_TRACE_EVENT_LISTENER_H_
-
-#include <ostream>
-
-#include "libplatform/libplatform-export.h"
-#include "src/libplatform/tracing/trace-event-listener.h"
-
-namespace perfetto {
-namespace protos {
-class ChromeTraceEvent_Arg;
-} // namespace protos
-} // namespace perfetto
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-// A listener that converts the proto trace data to JSON and writes it to a
-// file.
-class V8_PLATFORM_EXPORT JSONTraceEventListener final
- : public TraceEventListener {
- public:
- explicit JSONTraceEventListener(std::ostream* stream);
- ~JSONTraceEventListener() override;
-
- void ProcessPacket(const ::perfetto::protos::TracePacket& packet) override;
-
- private:
- // Internal implementation
- void AppendJSONString(const char* str);
- void AppendArgValue(const ::perfetto::protos::ChromeTraceEvent_Arg& arg);
-
- std::ostream* stream_;
- bool append_comma_ = false;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_JSON_TRACE_EVENT_LISTENER_H_
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index fdbfdddb51..2019b3ef5b 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -13,30 +13,38 @@
#include "src/base/platform/time.h"
#ifdef V8_USE_PERFETTO
-#include "base/trace_event/common/trace_event_common.h"
-#include "perfetto/tracing.h"
-#include "protos/perfetto/trace/chrome/chrome_trace_event.pbzero.h"
-#include "protos/perfetto/trace/trace_packet.pbzero.h"
+#include "perfetto/ext/trace_processor/export_json.h"
+#include "perfetto/trace_processor/trace_processor.h"
+#include "perfetto/tracing/tracing.h"
+#include "protos/perfetto/config/data_source_config.gen.h"
+#include "protos/perfetto/config/trace_config.gen.h"
+#include "protos/perfetto/config/track_event/track_event_config.gen.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
-#include "src/libplatform/tracing/json-trace-event-listener.h"
+#include "src/libplatform/tracing/trace-event-listener.h"
#endif // V8_USE_PERFETTO
#ifdef V8_USE_PERFETTO
-class V8DataSource : public perfetto::DataSource<V8DataSource> {
+class JsonOutputWriter : public perfetto::trace_processor::json::OutputWriter {
public:
- void OnSetup(const SetupArgs&) override {}
- void OnStart(const StartArgs&) override {}
- void OnStop(const StopArgs&) override {}
-};
+ explicit JsonOutputWriter(std::ostream* stream) : stream_(stream) {}
+
+ perfetto::trace_processor::util::Status AppendString(
+ const std::string& string) override {
+ *stream_ << string;
+ return perfetto::trace_processor::util::OkStatus();
+ }
-PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(V8DataSource);
+ private:
+ std::ostream* stream_;
+};
#endif // V8_USE_PERFETTO
namespace v8 {
namespace platform {
namespace tracing {
+#if !defined(V8_USE_PERFETTO)
static const size_t kMaxCategoryGroups = 200;
// Parallel arrays g_category_groups and g_category_group_enabled are separate
@@ -60,12 +68,14 @@ const int g_num_builtin_categories = 3;
// Skip default categories.
v8::base::AtomicWord g_category_index = g_num_builtin_categories;
+#endif // !defined(V8_USE_PERFETTO)
-TracingController::TracingController() = default;
+TracingController::TracingController() { mutex_.reset(new base::Mutex()); }
TracingController::~TracingController() {
StopTracing();
+#if !defined(V8_USE_PERFETTO)
{
// Free memory for category group names allocated via strdup.
base::MutexGuard lock(mutex_.get());
@@ -76,11 +86,7 @@ TracingController::~TracingController() {
}
g_category_index = g_num_builtin_categories;
}
-}
-
-void TracingController::Initialize(TraceBuffer* trace_buffer) {
- trace_buffer_.reset(trace_buffer);
- mutex_.reset(new base::Mutex());
+#endif // !defined(V8_USE_PERFETTO)
}
#ifdef V8_USE_PERFETTO
@@ -88,14 +94,16 @@ void TracingController::InitializeForPerfetto(std::ostream* output_stream) {
output_stream_ = output_stream;
DCHECK_NOT_NULL(output_stream);
DCHECK(output_stream->good());
- mutex_.reset(new base::Mutex());
}
void TracingController::SetTraceEventListenerForTesting(
TraceEventListener* listener) {
listener_for_testing_ = listener;
}
-#endif
+#else // !V8_USE_PERFETTO
+void TracingController::Initialize(TraceBuffer* trace_buffer) {
+ trace_buffer_.reset(trace_buffer);
+}
int64_t TracingController::CurrentTimestampMicroseconds() {
return base::TimeTicks::HighResolutionNow().ToInternalValue();
@@ -105,60 +113,6 @@ int64_t TracingController::CurrentCpuTimestampMicroseconds() {
return base::ThreadTicks::Now().ToInternalValue();
}
-namespace {
-
-#ifdef V8_USE_PERFETTO
-void AddArgsToTraceProto(
- ::perfetto::protos::pbzero::ChromeTraceEvent* event, int num_args,
- const char** arg_names, const uint8_t* arg_types,
- const uint64_t* arg_values,
- std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables) {
- for (int i = 0; i < num_args; i++) {
- ::perfetto::protos::pbzero::ChromeTraceEvent_Arg* arg = event->add_args();
- // TODO(petermarshall): Set name_index instead if need be.
- arg->set_name(arg_names[i]);
-
- TraceObject::ArgValue arg_value;
- arg_value.as_uint = arg_values[i];
- switch (arg_types[i]) {
- case TRACE_VALUE_TYPE_CONVERTABLE: {
- // TODO(petermarshall): Support AppendToProto for Convertables.
- std::string json_value;
- arg_convertables[i]->AppendAsTraceFormat(&json_value);
- arg->set_json_value(json_value.c_str());
- break;
- }
- case TRACE_VALUE_TYPE_BOOL:
- arg->set_bool_value(arg_value.as_uint);
- break;
- case TRACE_VALUE_TYPE_UINT:
- arg->set_uint_value(arg_value.as_uint);
- break;
- case TRACE_VALUE_TYPE_INT:
- arg->set_int_value(arg_value.as_int);
- break;
- case TRACE_VALUE_TYPE_DOUBLE:
- arg->set_double_value(arg_value.as_double);
- break;
- case TRACE_VALUE_TYPE_POINTER:
- arg->set_pointer_value(arg_value.as_uint);
- break;
- // There is no difference between copy strings and regular strings for
- // Perfetto; the set_string_value(const char*) API will copy the string
- // into the protobuf by default.
- case TRACE_VALUE_TYPE_COPY_STRING:
- case TRACE_VALUE_TYPE_STRING:
- arg->set_string_value(arg_value.as_string);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-#endif // V8_USE_PERFETTO
-
-} // namespace
-
uint64_t TracingController::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
@@ -182,42 +136,6 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
unsigned int flags, int64_t timestamp) {
int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
-#ifdef V8_USE_PERFETTO
- // Don't use COMPLETE events with perfetto - instead transform them into
- // BEGIN/END pairs. This avoids the need for a thread-local stack of pending
- // trace events as perfetto does not support handles into the trace buffer.
- if (phase == TRACE_EVENT_PHASE_COMPLETE) phase = TRACE_EVENT_PHASE_BEGIN;
-
- V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
- auto packet = ctx.NewTracePacket();
- auto* trace_event_bundle = packet->set_chrome_events();
- auto* trace_event = trace_event_bundle->add_trace_events();
-
- trace_event->set_name(name);
- trace_event->set_timestamp(timestamp);
- trace_event->set_phase(phase);
- trace_event->set_thread_id(base::OS::GetCurrentThreadId());
- trace_event->set_duration(0);
- trace_event->set_thread_duration(0);
- if (scope) trace_event->set_scope(scope);
- trace_event->set_id(id);
- trace_event->set_flags(flags);
- if (category_enabled_flag) {
- const char* category_group_name =
- GetCategoryGroupName(category_enabled_flag);
- DCHECK_NOT_NULL(category_group_name);
- trace_event->set_category_group_name(category_group_name);
- }
- trace_event->set_process_id(base::OS::GetCurrentProcessId());
- trace_event->set_thread_timestamp(cpu_now_us);
- trace_event->set_bind_id(bind_id);
-
- AddArgsToTraceProto(trace_event, num_args, arg_names, arg_types,
- arg_values, arg_convertables);
- });
- return 0;
-#else
-
uint64_t handle = 0;
if (recording_.load(std::memory_order_acquire)) {
TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
@@ -232,7 +150,6 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
}
}
return handle;
-#endif // V8_USE_PERFETTO
}
void TracingController::UpdateTraceEventDuration(
@@ -240,24 +157,9 @@ void TracingController::UpdateTraceEventDuration(
int64_t now_us = CurrentTimestampMicroseconds();
int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
-#ifdef V8_USE_PERFETTO
- V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
- auto packet = ctx.NewTracePacket();
- auto* trace_event_bundle = packet->set_chrome_events();
- auto* trace_event = trace_event_bundle->add_trace_events();
-
- trace_event->set_phase(TRACE_EVENT_PHASE_END);
- trace_event->set_thread_id(base::OS::GetCurrentThreadId());
- trace_event->set_timestamp(now_us);
- trace_event->set_process_id(base::OS::GetCurrentProcessId());
- trace_event->set_thread_timestamp(cpu_now_us);
- });
-#else
-
TraceObject* trace_object = trace_buffer_->GetEventByHandle(handle);
if (!trace_object) return;
trace_object->UpdateDuration(now_us, cpu_now_us);
-#endif // V8_USE_PERFETTO
}
const char* TracingController::GetCategoryGroupName(
@@ -275,23 +177,26 @@ const char* TracingController::GetCategoryGroupName(
(category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
return g_category_groups[category_index];
}
+#endif // !defined(V8_USE_PERFETTO)
void TracingController::StartTracing(TraceConfig* trace_config) {
#ifdef V8_USE_PERFETTO
DCHECK_NOT_NULL(output_stream_);
DCHECK(output_stream_->good());
- json_listener_ = std::make_unique<JSONTraceEventListener>(output_stream_);
+ perfetto::trace_processor::Config processor_config;
+ trace_processor_ =
+ perfetto::trace_processor::TraceProcessorStorage::CreateInstance(
+ processor_config);
- // TODO(petermarshall): Set other the params for the config.
::perfetto::TraceConfig perfetto_trace_config;
perfetto_trace_config.add_buffers()->set_size_kb(4096);
- auto* ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
- ds_config->set_name("v8.trace_events");
-
- perfetto::DataSourceDescriptor dsd;
- dsd.set_name("v8.trace_events");
- bool registered = V8DataSource::Register(dsd);
- CHECK(registered);
+ auto ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
+ ds_config->set_name("track_event");
+ perfetto::protos::gen::TrackEventConfig te_config;
+ te_config.add_disabled_categories("*");
+ for (const auto& category : trace_config->GetEnabledCategories())
+ te_config.add_enabled_categories(category);
+ ds_config->set_track_event_config_raw(te_config.SerializeAsString());
tracing_session_ =
perfetto::Tracing::NewTrace(perfetto::BackendType::kUnspecifiedBackend);
@@ -305,7 +210,9 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
{
base::MutexGuard lock(mutex_.get());
recording_.store(true, std::memory_order_release);
+#ifndef V8_USE_PERFETTO
UpdateCategoryGroupEnabledFlags();
+#endif
observers_copy = observers_;
}
for (auto o : observers_copy) {
@@ -318,7 +225,9 @@ void TracingController::StopTracing() {
if (!recording_.compare_exchange_strong(expected, false)) {
return;
}
+#ifndef V8_USE_PERFETTO
UpdateCategoryGroupEnabledFlags();
+#endif
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
base::MutexGuard lock(mutex_.get());
@@ -329,23 +238,21 @@ void TracingController::StopTracing() {
}
#ifdef V8_USE_PERFETTO
- // Emit a fake trace event from the main thread. The final trace event is
- // sometimes skipped because perfetto can't guarantee that the caller is
- // totally finished writing to it without synchronization. To avoid the
- // situation where we lose the last trace event, add a fake one here that will
- // be sacrificed.
- // TODO(petermarshall): Use the Client API to flush here rather than this
- // workaround when that becomes available.
- V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
- auto packet = ctx.NewTracePacket();
- });
tracing_session_->StopBlocking();
std::vector<char> trace = tracing_session_->ReadTraceBlocking();
- json_listener_->ParseFromArray(trace);
+ std::unique_ptr<uint8_t[]> trace_bytes(new uint8_t[trace.size()]);
+ std::copy(&trace[0], &trace[0] + trace.size(), &trace_bytes[0]);
+ trace_processor_->Parse(std::move(trace_bytes), trace.size());
+ trace_processor_->NotifyEndOfFile();
+ JsonOutputWriter output_writer(output_stream_);
+ auto status = perfetto::trace_processor::json::ExportJson(
+ trace_processor_.get(), &output_writer, nullptr, nullptr, nullptr);
+ DCHECK(status.ok());
+
if (listener_for_testing_) listener_for_testing_->ParseFromArray(trace);
- json_listener_.reset();
+ trace_processor_.reset();
#else
{
@@ -356,6 +263,7 @@ void TracingController::StopTracing() {
#endif // V8_USE_PERFETTO
}
+#if !defined(V8_USE_PERFETTO)
void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
unsigned char enabled_flag = 0;
const char* category_group = g_category_groups[category_index];
@@ -433,6 +341,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabled(
}
return category_group_enabled;
}
+#endif // !defined(V8_USE_PERFETTO)
void TracingController::AddTraceStateObserver(
v8::TracingController::TraceStateObserver* observer) {
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 32f7e3e652..1d44b4c833 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -79,6 +79,10 @@ namespace internal {
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
/* number of code spaces reserved per wasm module */ \
HR(wasm_module_num_code_spaces, V8.WasmModuleNumberOfCodeSpaces, 1, 128, 20) \
+ /* number of live modules per isolate */ \
+ HR(wasm_modules_per_isolate, V8.WasmModulesPerIsolate, 1, 1024, 30) \
+ /* number of live modules per engine (i.e. whole process) */ \
+ HR(wasm_modules_per_engine, V8.WasmModulesPerEngine, 1, 1024, 30) \
/* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21) \
/* Ticks observed in a single Turbofan compilation, in 1K */ \
@@ -237,8 +241,6 @@ namespace internal {
#define STATS_COUNTER_LIST_1(SC) \
/* Global Handle Count*/ \
SC(global_handles, V8.GlobalHandles) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
SC(maps_normalized, V8.MapsNormalized) \
SC(maps_created, V8.MapsCreated) \
SC(elements_transitions, V8.ObjectElementsTransitions) \
@@ -264,7 +266,7 @@ namespace internal {
SC(total_compile_size, V8.TotalCompileSize) \
/* Number of contexts created from scratch. */ \
SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
- /* Number of contexts created by partial snapshot. */ \
+ /* Number of contexts created by context snapshot. */ \
SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
/* Number of code objects found from pc. */ \
SC(pc_to_code, V8.PcToCode) \
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 44d4278896..02a6feee2e 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -772,7 +772,6 @@ class RuntimeCallTimer final {
V(Int8Array_New) \
V(Isolate_DateTimeConfigurationChangeNotification) \
V(Isolate_LocaleConfigurationChangeNotification) \
- V(FinalizationGroup_Cleanup) \
V(JSON_Parse) \
V(JSON_Stringify) \
V(Map_AsArray) \
@@ -895,6 +894,9 @@ class RuntimeCallTimer final {
V(Value_NumberValue) \
V(Value_TypeOf) \
V(Value_Uint32Value) \
+ V(WasmCompileError_New) \
+ V(WasmLinkError_New) \
+ V(WasmRuntimeError_New) \
V(WeakMap_Get) \
V(WeakMap_New) \
V(WeakMap_Set)
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index dc79ffda5e..00edcc8c9b 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -19,6 +19,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
+#include "src/heap/combined-heap.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
@@ -87,8 +88,6 @@ static const char* ComputeMarker(const wasm::WasmCode* code) {
switch (code->kind()) {
case wasm::WasmCode::kFunction:
return code->is_liftoff() ? "" : "*";
- case wasm::WasmCode::kInterpreterEntry:
- return "~";
default:
return "";
}
@@ -1838,7 +1837,7 @@ void Logger::LogAccessorCallbacks() {
void Logger::LogAllMaps() {
DisallowHeapAllocation no_gc;
Heap* heap = isolate_->heap();
- HeapObjectIterator iterator(heap);
+ CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
if (!obj.IsMap()) continue;
@@ -2069,10 +2068,6 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
description = "A Wasm to JavaScript adapter";
tag = CodeEventListener::STUB_TAG;
break;
- case AbstractCode::WASM_INTERPRETER_ENTRY:
- description = "A Wasm to Interpreter adapter";
- tag = CodeEventListener::STUB_TAG;
- break;
case AbstractCode::C_WASM_ENTRY:
description = "A C to Wasm entry stub";
tag = CodeEventListener::STUB_TAG;
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 9e4dfeb28d..98723a533a 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -117,7 +117,7 @@ class Logger : public CodeEventListener {
};
explicit Logger(Isolate* isolate);
- ~Logger();
+ ~Logger() override;
// The separator is used to write an unescaped "," into the log.
static const LogSeparator kNext;
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index f0788b7ca1..7447eb5ec3 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -16,12 +16,11 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(AllocationMemento, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AllocationMemento)
OBJECT_CONSTRUCTORS_IMPL(AllocationSite, Struct)
NEVER_READ_ONLY_SPACE_IMPL(AllocationSite)
-CAST_ACCESSOR(AllocationMemento)
CAST_ACCESSOR(AllocationSite)
ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index fde9b7d602..1da5925bee 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -162,12 +162,9 @@ class AllocationSite : public Struct {
OBJECT_CONSTRUCTORS(AllocationSite, Struct);
};
-class AllocationMemento : public Struct {
+class AllocationMemento
+ : public TorqueGeneratedAllocationMemento<AllocationMemento, Struct> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_ALLOCATION_MEMENTO_FIELDS)
-
DECL_ACCESSORS(allocation_site, Object)
inline bool IsValid() const;
@@ -175,11 +172,8 @@ class AllocationMemento : public Struct {
inline Address GetAllocationSiteUnchecked() const;
DECL_PRINTER(AllocationMemento)
- DECL_VERIFIER(AllocationMemento)
-
- DECL_CAST(AllocationMemento)
- OBJECT_CONSTRUCTORS(AllocationMemento, Struct);
+ TQ_OBJECT_CONSTRUCTORS(AllocationMemento)
};
} // namespace internal
diff --git a/deps/v8/src/objects/allocation-site.tq b/deps/v8/src/objects/allocation-site.tq
index 1a5f5825fb..3710268539 100644
--- a/deps/v8/src/objects/allocation-site.tq
+++ b/deps/v8/src/objects/allocation-site.tq
@@ -3,6 +3,8 @@
// found in the LICENSE file.
extern class AllocationSite extends Struct;
+
+@generateCppClass
extern class AllocationMemento extends Struct {
allocation_site: AllocationSite;
}
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 0bec61a7b8..93d2bcd7f4 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -22,24 +22,15 @@ namespace v8 {
namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo)
-OBJECT_CONSTRUCTORS_IMPL(AccessorInfo, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo)
-CAST_ACCESSOR(AccessorInfo)
-
-ACCESSORS(AccessorInfo, name, Name, kNameOffset)
-SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
-ACCESSORS(AccessorInfo, expected_receiver_type, Object,
- kExpectedReceiverTypeOffset)
-
ACCESSORS_CHECKED2(AccessorInfo, getter, Object, kGetterOffset, true,
Foreign::IsNormalized(value))
ACCESSORS_CHECKED2(AccessorInfo, setter, Object, kSetterOffset, true,
Foreign::IsNormalized(value))
-ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
-ACCESSORS(AccessorInfo, data, Object, kDataOffset)
bool AccessorInfo::has_getter() {
bool result = getter() != Smi::zero();
@@ -97,12 +88,12 @@ bool AccessorInfo::HasExpectedReceiverType() {
}
BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
- kCanInterceptSymbolsBit)
-BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
-BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
-BOOL_ACCESSORS(InterceptorInfo, flags, is_named, kNamed)
-BOOL_ACCESSORS(InterceptorInfo, flags, has_no_side_effect, kHasNoSideEffect)
-
+ CanInterceptSymbolsBit::kShift)
+BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, AllCanReadBit::kShift)
+BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, NonMaskingBit::kShift)
+BOOL_ACCESSORS(InterceptorInfo, flags, is_named, NamedBit::kShift)
+BOOL_ACCESSORS(InterceptorInfo, flags, has_no_side_effect,
+ HasNoSideEffectBit::kShift)
bool CallHandlerInfo::IsSideEffectFreeCallHandlerInfo() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 77ff26d659..969adb9fca 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -24,27 +24,17 @@ namespace internal {
// If the accessor in the prototype has the READ_ONLY property attribute, then
// a new value is added to the derived object when the property is set.
// This shadows the accessor in the prototype.
-class AccessorInfo : public Struct {
+class AccessorInfo : public TorqueGeneratedAccessorInfo<AccessorInfo, Struct> {
public:
- DECL_ACCESSORS(name, Name)
- DECL_INT_ACCESSORS(flags)
- DECL_ACCESSORS(expected_receiver_type, Object)
// This directly points at a foreign C function to be used from the runtime.
DECL_ACCESSORS(getter, Object)
inline bool has_getter();
DECL_ACCESSORS(setter, Object)
inline bool has_setter();
- // This either points at the same as above, or a trampoline in case we are
- // running with the simulator. Use these entries from generated code.
- DECL_ACCESSORS(js_getter, Object)
- DECL_ACCESSORS(data, Object)
static Address redirect(Address address, AccessorComponent component);
Address redirected_getter() const;
- // Dispatched behavior.
- DECL_PRINTER(AccessorInfo)
-
DECL_BOOLEAN_ACCESSORS(all_can_read)
DECL_BOOLEAN_ACCESSORS(all_can_write)
DECL_BOOLEAN_ACCESSORS(is_special_data_property)
@@ -68,35 +58,23 @@ class AccessorInfo : public Struct {
Handle<Map> map);
inline bool IsCompatibleReceiver(Object receiver);
- DECL_CAST(AccessorInfo)
-
- // Dispatched behavior.
- DECL_VERIFIER(AccessorInfo)
-
// Append all descriptors to the array that are not already there.
// Return number added.
static int AppendUnique(Isolate* isolate, Handle<Object> descriptors,
Handle<FixedArray> array, int valid_descriptors);
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_ACCESSOR_INFO_FIELDS)
-
private:
inline bool HasExpectedReceiverType();
// Bit positions in |flags|.
DEFINE_TORQUE_GENERATED_ACCESSOR_INFO_FLAGS()
- OBJECT_CONSTRUCTORS(AccessorInfo, Struct);
+ TQ_OBJECT_CONSTRUCTORS(AccessorInfo)
};
class AccessCheckInfo
: public TorqueGeneratedAccessCheckInfo<AccessCheckInfo, Struct> {
public:
- // Dispatched behavior.
- DECL_PRINTER(AccessCheckInfo)
-
static AccessCheckInfo Get(Isolate* isolate, Handle<JSObject> receiver);
TQ_OBJECT_CONSTRUCTORS(AccessCheckInfo)
@@ -111,14 +89,7 @@ class InterceptorInfo
DECL_BOOLEAN_ACCESSORS(is_named)
DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
- // Dispatched behavior.
- DECL_PRINTER(InterceptorInfo)
-
- static const int kCanInterceptSymbolsBit = 0;
- static const int kAllCanReadBit = 1;
- static const int kNonMasking = 2;
- static const int kNamed = 3;
- static const int kHasNoSideEffect = 4;
+ DEFINE_TORQUE_GENERATED_INTERCEPTOR_INFO_FLAGS()
TQ_OBJECT_CONSTRUCTORS(InterceptorInfo)
};
diff --git a/deps/v8/src/objects/api-callbacks.tq b/deps/v8/src/objects/api-callbacks.tq
index 47d25546f5..8a8aab59f8 100644
--- a/deps/v8/src/objects/api-callbacks.tq
+++ b/deps/v8/src/objects/api-callbacks.tq
@@ -9,7 +9,16 @@ extern class CallHandlerInfo extends Struct {
data: Object;
}
+bitfield struct InterceptorInfoFlags extends uint31 {
+ can_intercept_symbols: bool: 1 bit;
+ all_can_read: bool: 1 bit;
+ non_masking: bool: 1 bit;
+ named: bool: 1 bit;
+ has_no_side_effect: bool: 1 bit;
+}
+
@generateCppClass
+@generatePrint
extern class InterceptorInfo extends Struct {
getter: NonNullForeign|Zero|Undefined;
setter: NonNullForeign|Zero|Undefined;
@@ -19,10 +28,11 @@ extern class InterceptorInfo extends Struct {
enumerator: NonNullForeign|Zero|Undefined;
definer: NonNullForeign|Zero|Undefined;
data: Object;
- flags: Smi;
+ flags: SmiTagged<InterceptorInfoFlags>;
}
@generateCppClass
+@generatePrint
extern class AccessCheckInfo extends Struct {
callback: Foreign|Zero|Undefined;
named_interceptor: InterceptorInfo|Zero|Undefined;
@@ -44,12 +54,16 @@ bitfield struct AccessorInfoFlags extends uint31 {
initial_attributes: PropertyAttributes: 3 bit;
}
+@generateCppClass
+@generatePrint
extern class AccessorInfo extends Struct {
- name: Object;
+ name: Name;
flags: SmiTagged<AccessorInfoFlags>;
expected_receiver_type: Object;
setter: NonNullForeign|Zero;
getter: NonNullForeign|Zero;
+ // This either points at the same as above, or a trampoline in case we are
+ // running with the simulator. Use these entries from generated code.
js_getter: NonNullForeign|Zero;
data: Object;
}
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 52cfb810fb..ed2d31d1dd 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -113,9 +113,6 @@ class AliasedArgumentsEntry
: public TorqueGeneratedAliasedArgumentsEntry<AliasedArgumentsEntry,
Struct> {
public:
- // Dispatched behavior.
- DECL_PRINTER(AliasedArgumentsEntry)
-
TQ_OBJECT_CONSTRUCTORS(AliasedArgumentsEntry)
};
diff --git a/deps/v8/src/objects/arguments.tq b/deps/v8/src/objects/arguments.tq
index e24eefe631..5211707eae 100644
--- a/deps/v8/src/objects/arguments.tq
+++ b/deps/v8/src/objects/arguments.tq
@@ -29,6 +29,7 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
type SloppyArgumentsElements extends FixedArray;
@generateCppClass
+@generatePrint
extern class AliasedArgumentsEntry extends Struct {
aliased_context_slot: Smi;
}
@@ -36,169 +37,167 @@ extern class AliasedArgumentsEntry extends Struct {
// TODO(danno): This should be a namespace {} once supported
namespace arguments {
- macro NewJSStrictArgumentsObject(implicit context: Context)(
- elements: FixedArray): JSStrictArgumentsObject {
- const map = GetStrictArgumentsMap();
- return new JSStrictArgumentsObject{
- map,
- properties_or_hash: kEmptyFixedArray,
- elements,
- length: elements.length
- };
- }
+macro NewJSStrictArgumentsObject(implicit context: Context)(
+ elements: FixedArray): JSStrictArgumentsObject {
+ const map = GetStrictArgumentsMap();
+ return new JSStrictArgumentsObject{
+ map,
+ properties_or_hash: kEmptyFixedArray,
+ elements,
+ length: elements.length
+ };
+}
- macro NewJSSloppyArgumentsObject(implicit context: Context)(
- elements: FixedArray, callee: JSFunction): JSSloppyArgumentsObject {
- const map = GetSloppyArgumentsMap();
- return new JSSloppyArgumentsObject{
- map,
- properties_or_hash: kEmptyFixedArray,
- elements,
- length: elements.length,
- callee
- };
- }
+macro NewJSSloppyArgumentsObject(implicit context: Context)(
+ elements: FixedArray, callee: JSFunction): JSSloppyArgumentsObject {
+ const map = GetSloppyArgumentsMap();
+ return new JSSloppyArgumentsObject{
+ map,
+ properties_or_hash: kEmptyFixedArray,
+ elements,
+ length: elements.length,
+ callee
+ };
+}
- macro NewJSFastAliasedArgumentsObject(implicit context: Context)(
- elements: FixedArray, length: Smi,
- callee: JSFunction): JSSloppyArgumentsObject {
- // TODO(danno): FastAliasedArguments should really be a type for itself
- const map = GetFastAliasedArgumentsMap();
- return new JSSloppyArgumentsObject{
- map,
- properties_or_hash: kEmptyFixedArray,
- elements,
- length,
- callee
- };
- }
+macro NewJSFastAliasedArgumentsObject(implicit context: Context)(
+ elements: FixedArray, length: Smi,
+ callee: JSFunction): JSSloppyArgumentsObject {
+ // TODO(danno): FastAliasedArguments should really be a type for itself
+ const map = GetFastAliasedArgumentsMap();
+ return new JSSloppyArgumentsObject{
+ map,
+ properties_or_hash: kEmptyFixedArray,
+ elements,
+ length,
+ callee
+ };
+}
- struct ParameterMapIterator {
- macro Next(): Object labels NoMore {
- const currentMapSlotCopy = this.currentMapSlot++;
- if (currentMapSlotCopy > 1) {
- if (this.currentIndex == this.endInterationIndex) goto NoMore;
- this.currentIndex--;
- return Convert<Smi>(this.currentIndex);
- } else if (currentMapSlotCopy == 0) {
- return this.context;
- } else {
- assert(currentMapSlotCopy == 1);
- return this.elements;
- }
+struct ParameterMapIterator {
+ macro Next(): Object labels NoMore {
+ const currentMapSlotCopy = this.currentMapSlot++;
+ if (currentMapSlotCopy > 1) {
+ if (this.currentIndex == this.endInterationIndex) goto NoMore;
+ this.currentIndex--;
+ return Convert<Smi>(this.currentIndex);
+ } else if (currentMapSlotCopy == 0) {
+ return this.context;
+ } else {
+ assert(currentMapSlotCopy == 1);
+ return this.elements;
}
- const context: Context;
- const elements: FixedArray;
- currentIndex: intptr;
- const endInterationIndex: intptr;
- currentMapSlot: intptr;
}
+ const context: Context;
+ const elements: FixedArray;
+ currentIndex: intptr;
+ const endInterationIndex: intptr;
+ currentMapSlot: intptr;
+}
- macro NewParameterMapIterator(
- context: Context, elements: FixedArray, formalParameterCount: intptr,
- mappedCount: intptr): ParameterMapIterator {
- const flags = context.scope_info.flags;
- let contextHeaderSize: intptr = MIN_CONTEXT_SLOTS;
- if (flags.has_context_extension_slot) ++contextHeaderSize;
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // context_header_size .. context_header_size+argument_count-1
- // The mapped parameter thus need to get indices
- // context_header_size+parameter_count-1 ..
- // context_header_size+argument_count-mapped_count
- // We loop from right to left.
- const afterLastContextIndex = contextHeaderSize + formalParameterCount;
- const firstContextIndex = afterLastContextIndex - mappedCount;
- return ParameterMapIterator{
- context,
- elements,
- currentIndex: afterLastContextIndex,
- endInterationIndex: firstContextIndex,
- currentMapSlot: 0
- };
- }
+macro NewParameterMapIterator(
+ context: Context, elements: FixedArray, formalParameterCount: intptr,
+ mappedCount: intptr): ParameterMapIterator {
+ const flags = context.scope_info.flags;
+ let contextHeaderSize: intptr = MIN_CONTEXT_SLOTS;
+ if (flags.has_context_extension_slot) ++contextHeaderSize;
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // context_header_size .. context_header_size+argument_count-1
+ // The mapped parameter thus need to get indices
+ // context_header_size+parameter_count-1 ..
+ // context_header_size+argument_count-mapped_count
+ // We loop from right to left.
+ const afterLastContextIndex = contextHeaderSize + formalParameterCount;
+ const firstContextIndex = afterLastContextIndex - mappedCount;
+ return ParameterMapIterator{
+ context,
+ elements,
+ currentIndex: afterLastContextIndex,
+ endInterationIndex: firstContextIndex,
+ currentMapSlot: 0
+ };
+}
- struct ParameterValueIterator {
- macro Next(): Object labels NoMore() {
- if (this.mapped_count != 0) {
- this.mapped_count--;
- return TheHole;
- }
- if (this.current == this.arguments.length) goto NoMore;
- return this.arguments[this.current++];
+struct ParameterValueIterator {
+ macro Next(): Object labels NoMore() {
+ if (this.mapped_count != 0) {
+ this.mapped_count--;
+ return TheHole;
}
- mapped_count: intptr;
- const arguments: Arguments;
- current: intptr;
+ if (this.current == this.arguments.length) goto NoMore;
+ return this.arguments[this.current++];
}
+ mapped_count: intptr;
+ const arguments: Arguments;
+ current: intptr;
+}
- macro NewParameterValueIterator(mappedCount: intptr, arguments: Arguments):
- ParameterValueIterator {
- return ParameterValueIterator{
- mapped_count: mappedCount,
- arguments,
- current: mappedCount
- };
- }
+macro NewParameterValueIterator(
+ mappedCount: intptr, arguments: Arguments): ParameterValueIterator {
+ return ParameterValueIterator{
+ mapped_count: mappedCount,
+ arguments,
+ current: mappedCount
+ };
+}
- macro NewAllArguments(implicit context: Context)(
- frame: FrameWithArguments, argumentCount: intptr): JSArray {
- const map = GetFastPackedElementsJSArrayMap();
- const arguments = GetFrameArguments(frame, argumentCount);
- const it = ArgumentsIterator{arguments, current: 0};
- const elements = NewFixedArray(argumentCount, it);
- return NewJSArray(map, elements);
- }
+macro NewAllArguments(implicit context: Context)(
+ frame: FrameWithArguments, argumentCount: intptr): JSArray {
+ const map = GetFastPackedElementsJSArrayMap();
+ const arguments = GetFrameArguments(frame, argumentCount);
+ const it = ArgumentsIterator{arguments, current: 0};
+ const elements = NewFixedArray(argumentCount, it);
+ return NewJSArray(map, elements);
+}
- macro NewRestArguments(implicit context:
- Context)(info: FrameWithArgumentsInfo): JSArray {
- const argumentCount = Convert<intptr>(info.argument_count);
- const formalParameterCount = Convert<intptr>(info.formal_parameter_count);
- const map = GetFastPackedElementsJSArrayMap();
- const length = (formalParameterCount >= argumentCount) ?
- 0 :
- argumentCount - formalParameterCount;
- const arguments = GetFrameArguments(info.frame, argumentCount);
- const it = ArgumentsIterator{arguments, current: formalParameterCount};
- const elements = NewFixedArray(length, it);
- return NewJSArray(map, elements);
- }
+macro NewRestArguments(implicit context: Context)(info: FrameWithArgumentsInfo):
+ JSArray {
+ const argumentCount = Convert<intptr>(info.argument_count);
+ const formalParameterCount = Convert<intptr>(info.formal_parameter_count);
+ const map = GetFastPackedElementsJSArrayMap();
+ const length = (formalParameterCount >= argumentCount) ?
+ 0 :
+ argumentCount - formalParameterCount;
+ const arguments = GetFrameArguments(info.frame, argumentCount);
+ const it = ArgumentsIterator{arguments, current: formalParameterCount};
+ const elements = NewFixedArray(length, it);
+ return NewJSArray(map, elements);
+}
- macro NewStrictArguments(implicit context: Context)(
- info: FrameWithArgumentsInfo): JSStrictArgumentsObject {
- const argumentCount = Convert<intptr>(info.argument_count);
- const arguments = GetFrameArguments(info.frame, argumentCount);
+macro NewStrictArguments(implicit context: Context)(
+ info: FrameWithArgumentsInfo): JSStrictArgumentsObject {
+ const argumentCount = Convert<intptr>(info.argument_count);
+ const arguments = GetFrameArguments(info.frame, argumentCount);
+ const it = ArgumentsIterator{arguments, current: 0};
+ const elements = NewFixedArray(argumentCount, it);
+ return NewJSStrictArgumentsObject(elements);
+}
+
+macro NewSloppyArguments(implicit context: Context)(
+ info: FrameWithArgumentsInfo, callee: JSFunction): JSSloppyArgumentsObject {
+ const argumentCount = Convert<intptr>(info.argument_count);
+ const arguments = GetFrameArguments(info.frame, argumentCount);
+ const formalParameterCount = Convert<intptr>(info.formal_parameter_count);
+ if (formalParameterCount == 0) {
const it = ArgumentsIterator{arguments, current: 0};
const elements = NewFixedArray(argumentCount, it);
- return NewJSStrictArgumentsObject(elements);
- }
-
- macro NewSloppyArguments(implicit context: Context)(
- info: FrameWithArgumentsInfo,
- callee: JSFunction): JSSloppyArgumentsObject {
- const argumentCount = Convert<intptr>(info.argument_count);
- const arguments = GetFrameArguments(info.frame, argumentCount);
- const formalParameterCount = Convert<intptr>(info.formal_parameter_count);
- if (formalParameterCount == 0) {
- const it = ArgumentsIterator{arguments, current: 0};
- const elements = NewFixedArray(argumentCount, it);
- return NewJSSloppyArgumentsObject(elements, callee);
- }
- const mappedCount = IntPtrMin(formalParameterCount, argumentCount);
- const it = NewParameterValueIterator(mappedCount, arguments);
- const parameterValues = NewFixedArray(argumentCount, it);
- let paramIter = NewParameterMapIterator(
- context, parameterValues, formalParameterCount, mappedCount);
- const elementsLength =
- Convert<Smi>(mappedCount + kSloppyArgumentsParameterMapStart);
- const map = kSloppyArgumentsElementsMap;
- const elements = new
- FixedArray{map, length: elementsLength, objects: ...paramIter};
- const length = Convert<Smi>(argumentCount);
- return NewJSFastAliasedArgumentsObject(elements, length, callee);
+ return NewJSSloppyArgumentsObject(elements, callee);
}
-
+ const mappedCount = IntPtrMin(formalParameterCount, argumentCount);
+ const it = NewParameterValueIterator(mappedCount, arguments);
+ const parameterValues = NewFixedArray(argumentCount, it);
+ let paramIter = NewParameterMapIterator(
+ context, parameterValues, formalParameterCount, mappedCount);
+ const elementsLength =
+ Convert<Smi>(mappedCount + kSloppyArgumentsParameterMapStart);
+ const map = kSloppyArgumentsElementsMap;
+ const elements = new
+ FixedArray{map, length: elementsLength, objects: ...paramIter};
+ const length = Convert<Smi>(argumentCount);
+ return NewJSFastAliasedArgumentsObject(elements, length, callee);
+}
}
@export
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index a1db32f5dc..bd9f39b7d3 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -462,33 +462,63 @@ std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
}
// Try to grow the size of a wasm memory in place, without realloc + copy.
-bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
- size_t max_pages) {
+base::Optional<size_t> BackingStore::GrowWasmMemoryInPlace(Isolate* isolate,
+ size_t delta_pages,
+ size_t max_pages) {
+ // This function grows wasm memory by
+ // * changing the permissions of additional {delta_pages} pages to kReadWrite;
+ // * increment {byte_length_};
+ //
+ // As this code is executed concurrently, the following steps are executed:
+ // 1) Read the current value of {byte_length_};
+ // 2) Change the permission of all pages from {buffer_start_} to
+ // {byte_length_} + {delta_pages} * {page_size} to kReadWrite;
+ // * This operation may be executed racefully. The OS takes care of
+ // synchronization.
+ // 3) Try to update {byte_length_} with a compare_exchange;
+ // 4) Repeat 1) to 3) until the compare_exchange in 3) succeeds;
+ //
+ // The result of this function is the {byte_length_} before growing in pages.
+ // The result of this function appears like the result of an RMW-update on
+ // {byte_length_}, i.e. two concurrent calls to this function will result in
+ // different return values if {delta_pages} != 0.
+ //
+ // Invariants:
+ // * Permissions are always set incrementally, i.e. for any page {b} with
+ // kReadWrite permission, all pages between the first page {a} and page {b}
+ // also have kReadWrite permission.
+ // * {byte_length_} is always lower or equal than the amount of memory with
+ // permissions set to kReadWrite;
+ // * This is guaranteed by incrementing {byte_length_} with a
+ // compare_exchange after changing the permissions.
+ // * This invariant is the reason why we cannot use a fetch_add.
DCHECK(is_wasm_memory_);
max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize);
- if (delta_pages == 0) return true; // degenerate grow.
- if (delta_pages > max_pages) return false; // would never work.
-
// Do a compare-exchange loop, because we also need to adjust page
// permissions. Note that multiple racing grows both try to set page
// permissions for the entire range (to be RW), so the operating system
// should deal with that raciness. We know we succeeded when we can
// compare/swap the old length with the new length.
size_t old_length = byte_length_.load(std::memory_order_relaxed);
+
+ if (delta_pages == 0)
+ return {old_length / wasm::kWasmPageSize}; // degenerate grow.
+ if (delta_pages > max_pages) return {}; // would never work.
+
size_t new_length = 0;
while (true) {
size_t current_pages = old_length / wasm::kWasmPageSize;
// Check if we have exceed the supplied maximum.
- if (current_pages > (max_pages - delta_pages)) return false;
+ if (current_pages > (max_pages - delta_pages)) return {};
new_length = (current_pages + delta_pages) * wasm::kWasmPageSize;
// Try to adjust the permissions on the memory.
if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
new_length, PageAllocator::kReadWrite)) {
- return false;
+ return {};
}
if (byte_length_.compare_exchange_weak(old_length, new_length,
std::memory_order_acq_rel)) {
@@ -502,7 +532,7 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_length - old_length);
}
- return true;
+ return {old_length / wasm::kWasmPageSize};
}
void BackingStore::AttachSharedWasmMemoryObject(
@@ -515,10 +545,9 @@ void BackingStore::AttachSharedWasmMemoryObject(
}
void BackingStore::BroadcastSharedWasmMemoryGrow(
- Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
- size_t new_pages) {
- GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
- isolate, backing_store, new_pages);
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
+ GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(isolate,
+ backing_store);
}
void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
@@ -736,8 +765,7 @@ void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
}
void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
- Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
- size_t new_pages) {
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
{
// The global lock protects the list of isolates per backing store.
base::MutexGuard scope_lock(&impl()->mutex_);
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index e9a7c8ec15..0a460cef8a 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -9,6 +9,7 @@
#include "include/v8-internal.h"
#include "include/v8.h"
+#include "src/base/optional.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -84,8 +85,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
bool free_on_destruct() const { return free_on_destruct_; }
// Attempt to grow this backing store in place.
- bool GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
- size_t max_pages);
+ base::Optional<size_t> GrowWasmMemoryInPlace(Isolate* isolate,
+ size_t delta_pages,
+ size_t max_pages);
// Wrapper around ArrayBuffer::Allocator::Reallocate.
bool Reallocate(Isolate* isolate, size_t new_byte_length);
@@ -104,8 +106,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// after the backing store has been grown. Memory objects in this
// isolate are updated synchronously.
static void BroadcastSharedWasmMemoryGrow(Isolate* isolate,
- std::shared_ptr<BackingStore>,
- size_t new_pages);
+ std::shared_ptr<BackingStore>);
// TODO(wasm): address space limitations should be enforced in page alloc.
// These methods enforce a limit on the total amount of address space,
@@ -243,8 +244,7 @@ class GlobalBackingStoreRegistry {
// Broadcast updates to all attached memory objects.
static void BroadcastSharedWasmMemoryGrow(
- Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
- size_t new_pages);
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
// Update all shared memory objects in the given isolate.
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index fc49f164b2..f26feae421 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -20,9 +20,6 @@ class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
inline Address ValueAddress() { return address() + kValueOffset; }
- // Dispatched behavior.
- DECL_PRINTER(Cell)
-
using BodyDescriptor = FixedBodyDescriptor<kValueOffset, kSize, kSize>;
TQ_OBJECT_CONSTRUCTORS(Cell)
diff --git a/deps/v8/src/objects/cell.tq b/deps/v8/src/objects/cell.tq
index 9f825fc575..ef9b281104 100644
--- a/deps/v8/src/objects/cell.tq
+++ b/deps/v8/src/objects/cell.tq
@@ -3,6 +3,7 @@
// found in the LICENSE file.
@generateCppClass
+@generatePrint
extern class Cell extends HeapObject {
value: Object;
}
diff --git a/deps/v8/src/objects/class-definitions-tq-deps-inl.h b/deps/v8/src/objects/class-definitions-tq-deps-inl.h
new file mode 100644
index 0000000000..de81ccfeb6
--- /dev/null
+++ b/deps/v8/src/objects/class-definitions-tq-deps-inl.h
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
+#define V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
+
+// This is a collection of -inl.h files required by the generated file
+// class-definitions-tq.cc. Generally, classes using @generateCppClass need an
+// entry here.
+#include "src/objects/allocation-site-inl.h"
+#include "src/objects/arguments-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/free-space-inl.h"
+#include "src/objects/js-aggregate-error-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/promise-inl.h"
+#include "src/objects/property-descriptor-object-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/struct-inl.h"
+#include "src/objects/template-objects-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-date-time-format-inl.h"
+#include "src/objects/js-display-names-inl.h"
+#include "src/objects/js-list-format-inl.h"
+#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-number-format-inl.h"
+#include "src/objects/js-plural-rules-inl.h"
+#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segment-iterator-inl.h"
+#include "src/objects/js-segmenter-inl.h"
+#endif
+
+#endif // V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 81db1745c4..1af186f3e7 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -454,6 +454,17 @@ void Code::set_builtin_index(int index) {
bool Code::is_builtin() const { return builtin_index() != -1; }
+unsigned Code::inlined_bytecode_size() const {
+ DCHECK(kind() == OPTIMIZED_FUNCTION ||
+ ReadField<unsigned>(kInlinedBytecodeSizeOffset) == 0);
+ return ReadField<unsigned>(kInlinedBytecodeSizeOffset);
+}
+
+void Code::set_inlined_bytecode_size(unsigned size) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION || size == 0);
+ WriteField<unsigned>(kInlinedBytecodeSizeOffset, size);
+}
+
bool Code::has_safepoint_info() const {
return is_turbofanned() || is_wasm_code();
}
@@ -477,6 +488,24 @@ void Code::set_marked_for_deoptimization(bool flag) {
code_data_container().set_kind_specific_flags(updated);
}
+int Code::deoptimization_count() const {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int32_t flags = code_data_container().kind_specific_flags();
+ int count = DeoptCountField::decode(flags);
+ DCHECK_GE(count, 0);
+ return count;
+}
+
+void Code::increment_deoptimization_count() {
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ int32_t flags = code_data_container().kind_specific_flags();
+ int32_t count = DeoptCountField::decode(flags);
+ DCHECK_GE(count, 0);
+ CHECK_LE(count + 1, DeoptCountField::kMax);
+ int32_t updated = DeoptCountField::update(flags, count + 1);
+ code_data_container().set_kind_specific_flags(updated);
+}
+
bool Code::embedded_objects_cleared() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
int32_t flags = code_data_container().kind_specific_flags();
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 4f272f1299..2e8f808262 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -1026,8 +1026,12 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
return "prototype-check";
case kPropertyCellChangedGroup:
return "property-cell-changed";
- case kFieldOwnerGroup:
- return "field-owner";
+ case kFieldConstGroup:
+ return "field-const";
+ case kFieldTypeGroup:
+ return "field-type";
+ case kFieldRepresentationGroup:
+ return "field-representation";
case kInitialMapChangedGroup:
return "initial-map-changed";
case kAllocationSiteTenuringChangedGroup:
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index d80e72fa03..ea6f52cc59 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -47,7 +47,6 @@ class Code : public HeapObject {
V(WASM_TO_JS_FUNCTION) \
V(JS_TO_WASM_FUNCTION) \
V(JS_TO_JS_FUNCTION) \
- V(WASM_INTERPRETER_ENTRY) \
V(C_WASM_ENTRY)
enum Kind {
@@ -138,6 +137,9 @@ class Code : public HeapObject {
inline void set_builtin_index(int id);
inline bool is_builtin() const;
+ inline unsigned inlined_bytecode_size() const;
+ inline void set_inlined_bytecode_size(unsigned size);
+
inline bool has_safepoint_info() const;
// [stack_slots]: If {has_safepoint_info()}, the number of stack slots
@@ -180,6 +182,12 @@ class Code : public HeapObject {
inline bool marked_for_deoptimization() const;
inline void set_marked_for_deoptimization(bool flag);
+ // [deoptimzation_count]: In turboprop we retain the deoptimized code on soft
+ // deopts for a certain number of soft deopts. This field keeps track of
+ // number of deoptimizations we have seen so far.
+ inline int deoptimization_count() const;
+ inline void increment_deoptimization_count();
+
// [embedded_objects_cleared]: For kind OPTIMIZED_FUNCTION tells whether
// the embedded objects in the code marked for deoptimization were cleared.
// Note that embedded_objects_cleared() implies marked_for_deoptimization().
@@ -397,6 +405,7 @@ class Code : public HeapObject {
FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
V(kCodeCommentsOffsetOffset, kIntSize) \
V(kBuiltinIndexOffset, kIntSize) \
+ V(kInlinedBytecodeSizeOffset, kIntSize) \
V(kUnalignedHeaderSize, 0) \
/* Add padding to align the instruction start following right after */ \
/* the Code object header. */ \
@@ -409,22 +418,23 @@ class Code : public HeapObject {
// This documents the amount of free space we have in each Code object header
// due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
#elif V8_TARGET_ARCH_MIPS64
- static constexpr int kHeaderPaddingSize = 0;
+ static constexpr int kHeaderPaddingSize = 28;
#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
#elif V8_TARGET_ARCH_ARM
- static constexpr int kHeaderPaddingSize = 20;
+ static constexpr int kHeaderPaddingSize = 16;
#elif V8_TARGET_ARCH_IA32
- static constexpr int kHeaderPaddingSize = 20;
+ static constexpr int kHeaderPaddingSize = 16;
#elif V8_TARGET_ARCH_MIPS
- static constexpr int kHeaderPaddingSize = 20;
+ static constexpr int kHeaderPaddingSize = 16;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kHeaderPaddingSize =
- FLAG_enable_embedded_constant_pool ? 28 : 0;
+ FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 12 : 24)
+ : (COMPRESS_POINTERS_BOOL ? 16 : 28);
#elif V8_TARGET_ARCH_S390X
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
#else
#error Unknown architecture.
#endif
@@ -454,11 +464,11 @@ class Code : public HeapObject {
V(DeoptAlreadyCountedField, bool, 1, _) \
V(CanHaveWeakObjectsField, bool, 1, _) \
V(IsPromiseRejectionField, bool, 1, _) \
- V(IsExceptionCaughtField, bool, 1, _)
+ V(IsExceptionCaughtField, bool, 1, _) \
+ V(DeoptCountField, int, 4, _)
DEFINE_BIT_FIELDS(CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS)
#undef CODE_KIND_SPECIFIC_FLAGS_BIT_FIELDS
- static_assert(IsExceptionCaughtField::kLastUsedBit < 32,
- "KindSpecificFlags full");
+ static_assert(DeoptCountField::kLastUsedBit < 32, "KindSpecificFlags full");
// The {marked_for_deoptimization} field is accessed from generated code.
static const int kMarkedForDeoptimizationBit =
@@ -641,7 +651,9 @@ class DependentCode : public WeakFixedArray {
kPropertyCellChangedGroup,
// Group of code that omit run-time checks for field(s) introduced by
// this map, i.e. for the field type.
- kFieldOwnerGroup,
+ kFieldTypeGroup,
+ kFieldConstGroup,
+ kFieldRepresentationGroup,
// Group of code that omit run-time type checks for initial maps of
// constructors.
kInitialMapChangedGroup,
@@ -709,8 +721,8 @@ class DependentCode : public WeakFixedArray {
inline int flags();
inline void set_flags(int flags);
- using GroupField = base::BitField<int, 0, 3>;
- using CountField = base::BitField<int, 3, 27>;
+ using GroupField = base::BitField<int, 0, 5>;
+ using CountField = base::BitField<int, 5, 27>;
STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray);
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 33df991564..8bd41722f1 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -245,14 +245,18 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
return Map::cast(initial_js_array_map);
}
-MicrotaskQueue* NativeContext::microtask_queue() const {
+DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kMicrotaskQueueOffset);
return reinterpret_cast<MicrotaskQueue*>(
- ReadField<Address>(kMicrotaskQueueOffset));
+ DecodeExternalPointer(isolate, encoded_value));
}
-void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) {
- WriteField<Address>(kMicrotaskQueueOffset,
- reinterpret_cast<Address>(microtask_queue));
+void NativeContext::set_microtask_queue(Isolate* isolate,
+ MicrotaskQueue* microtask_queue) {
+ ExternalPointer_t encoded_value = EncodeExternalPointer(
+ isolate, reinterpret_cast<Address>(microtask_queue));
+ WriteField<ExternalPointer_t>(kMicrotaskQueueOffset, encoded_value);
}
OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() {
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index a273549d96..06f742281a 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -42,11 +42,6 @@ enum ContextLookupFlags {
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(ASYNC_MODULE_EVALUATE_INTERNAL, JSFunction, \
async_module_evaluate_internal) \
- V(MAKE_ERROR_INDEX, JSFunction, make_error) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
V(OBJECT_CREATE, JSFunction, object_create) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
@@ -54,7 +49,6 @@ enum ContextLookupFlags {
V(MATH_POW_INDEX, JSFunction, math_pow) \
V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
promise_internal_constructor) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then)
#define NATIVE_CONTEXT_FIELDS(V) \
@@ -76,28 +70,10 @@ enum ContextLookupFlags {
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(ARRAY_JOIN_STACK_INDEX, HeapObject, array_join_stack) \
V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \
- V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_function_await_reject_shared_fun) \
- V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_function_await_resolve_shared_fun) \
V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
V(ASYNC_FUNCTION_OBJECT_MAP_INDEX, Map, async_function_object_map) \
V(ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
async_generator_function_function) \
- V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \
- async_iterator_value_unwrap_shared_fun) \
- V(ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_generator_await_reject_shared_fun) \
- V(ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_await_resolve_shared_fun) \
- V(ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_yield_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_closed_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_closed_reject_shared_fun) \
V(ATOMICS_OBJECT, JSObject, atomics_object) \
V(BIGINT_FUNCTION_INDEX, JSFunction, bigint_function) \
V(BIGINT64_ARRAY_FUN_INDEX, JSFunction, bigint64_array_fun) \
@@ -191,8 +167,6 @@ enum ContextLookupFlags {
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
V(WEAK_CELL_MAP_INDEX, Map, weak_cell_map) \
- V(JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_MAP_INDEX, Map, \
- js_finalization_registry_cleanup_iterator_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(JS_WEAK_REF_FUNCTION_INDEX, JSFunction, js_weak_ref_fun) \
@@ -226,27 +200,6 @@ enum ContextLookupFlags {
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
V(PROXY_REVOCABLE_RESULT_MAP_INDEX, Map, proxy_revocable_result_map) \
- V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \
- V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
- promise_get_capabilities_executor_shared_fun) \
- V(PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX, SharedFunctionInfo, \
- promise_capability_default_reject_shared_fun) \
- V(PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX, SharedFunctionInfo, \
- promise_capability_default_resolve_shared_fun) \
- V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo, \
- promise_then_finally_shared_fun) \
- V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo, \
- promise_catch_finally_shared_fun) \
- V(PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN, SharedFunctionInfo, \
- promise_value_thunk_finally_shared_fun) \
- V(PROMISE_THROWER_FINALLY_SHARED_FUN, SharedFunctionInfo, \
- promise_thrower_finally_shared_fun) \
- V(PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
- promise_all_resolve_element_shared_fun) \
- V(PROMISE_ALL_SETTLED_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
- promise_all_settled_resolve_element_shared_fun) \
- V(PROMISE_ALL_SETTLED_REJECT_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
- promise_all_settled_reject_element_shared_fun) \
V(PROMISE_PROTOTYPE_INDEX, JSObject, promise_prototype) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -343,12 +296,15 @@ enum ContextLookupFlags {
V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
V(ERROR_TO_STRING, JSFunction, error_to_string) \
V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(AGGREGATE_ERROR_FUNCTION_INDEX, JSFunction, aggregate_error_function) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
V(MAP_DELETE_INDEX, JSFunction, map_delete) \
V(MAP_GET_INDEX, JSFunction, map_get) \
V(MAP_HAS_INDEX, JSFunction, map_has) \
V(MAP_SET_INDEX, JSFunction, map_set) \
+ V(FINALIZATION_REGISTRY_CLEANUP_SOME, JSFunction, \
+ finalization_registry_cleanup_some) \
V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
V(OBJECT_VALUE_OF_FUNCTION_INDEX, JSFunction, object_value_of_function) \
@@ -371,6 +327,7 @@ enum ContextLookupFlags {
V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
+ V(RETAINED_MAPS, WeakArrayList, retained_maps) \
V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache)
// A table of all script contexts. Every loaded top-level script with top-level
@@ -694,7 +651,8 @@ class NativeContext : public Context {
// TODO(neis): Move some stuff from Context here.
// [microtask_queue]: pointer to the MicrotaskQueue object.
- DECL_PRIMITIVE_ACCESSORS(microtask_queue, MicrotaskQueue*)
+ DECL_GETTER(microtask_queue, MicrotaskQueue*)
+ inline void set_microtask_queue(Isolate* isolate, MicrotaskQueue* queue);
// Dispatched behavior.
DECL_PRINTER(NativeContext)
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index 6def2320ff..bae4fd60df 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -6,7 +6,7 @@
extern class Context extends HeapObject {
length: Smi;
scope_info: ScopeInfo;
- previous: Object;
+ previous: Context|Zero|Undefined;
}
extern class AwaitContext extends Context generates 'TNode<Context>';
extern class BlockContext extends Context generates 'TNode<Context>';
@@ -24,6 +24,7 @@ const MIN_CONTEXT_SLOTS: constexpr int31
generates 'Context::MIN_CONTEXT_SLOTS';
extern enum NativeContextSlot extends intptr constexpr 'Context::Field' {
+ AGGREGATE_ERROR_FUNCTION_INDEX,
ARRAY_BUFFER_FUN_INDEX,
ARRAY_BUFFER_NOINIT_FUN_INDEX,
ARRAY_BUFFER_MAP_INDEX,
@@ -47,17 +48,7 @@ extern enum NativeContextSlot extends intptr constexpr 'Context::Field' {
PROMISE_FUNCTION_INDEX,
PROMISE_THEN_INDEX,
- PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN,
- PROMISE_ALL_SETTLED_REJECT_ELEMENT_SHARED_FUN,
- PROMISE_ALL_SETTLED_RESOLVE_ELEMENT_SHARED_FUN,
- PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX,
- PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX,
- PROMISE_CATCH_FINALLY_SHARED_FUN,
- PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN,
PROMISE_PROTOTYPE_INDEX,
- PROMISE_THROWER_FINALLY_SHARED_FUN,
- PROMISE_THEN_FINALLY_SHARED_FUN,
- PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN,
STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
@@ -78,3 +69,9 @@ extern operator '[]=' macro StoreContextElement(
extern operator '[]' macro LoadContextElement(Context, intptr): Object;
extern operator '[]' macro LoadContextElement(Context, Smi): Object;
+
+// A dummy used instead of a context constant for runtime calls that don't need
+// a context.
+type NoContext extends Smi;
+extern macro NoContextConstant(): NoContext;
+const kNoContext: NoContext = NoContextConstant();
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index c5b024ed1d..886c31583e 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -51,6 +51,9 @@ BytecodeArray DebugInfo::DebugBytecodeArray() {
return BytecodeArray::cast(debug_bytecode_array());
}
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmValue)
+NEVER_READ_ONLY_SPACE_IMPL(WasmValue)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index a7ce9ec367..415f456b1b 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -126,9 +126,6 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
// Clears all fields related to block coverage.
void ClearCoverageInfo(Isolate* isolate);
- // Dispatched behavior.
- DECL_PRINTER(DebugInfo)
-
static const int kEstimatedNofBreakPointsInFunction = 4;
private:
@@ -202,6 +199,14 @@ class BreakPoint : public TorqueGeneratedBreakPoint<BreakPoint, Struct> {
TQ_OBJECT_CONSTRUCTORS(BreakPoint)
};
+// Holds Wasm values. This is used by the inspector.
+class WasmValue : public TorqueGeneratedWasmValue<WasmValue, Struct> {
+ public:
+ NEVER_READ_ONLY_SPACE
+
+ TQ_OBJECT_CONSTRUCTORS(WasmValue)
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.tq b/deps/v8/src/objects/debug-objects.tq
index d50d86c482..8544e79366 100644
--- a/deps/v8/src/objects/debug-objects.tq
+++ b/deps/v8/src/objects/debug-objects.tq
@@ -35,6 +35,7 @@ bitfield struct DebuggerHints extends uint31 {
}
@generateCppClass
+@generatePrint
extern class DebugInfo extends Struct {
shared: SharedFunctionInfo;
// Bit field containing various information collected for debugging.
@@ -69,3 +70,15 @@ extern class CoverageInfo extends HeapObject {
const slot_count: int32;
slots[slot_count]: CoverageInfoSlot;
}
+
+@generateCppClass
+@generatePrint
+extern class WasmValue extends Struct {
+ // The type, should map to ValueType::Kind values in value-type.h.
+ value_type: SmiTagged<WasmValueType>;
+ // Holds the actual value. For example, if this holds a Wasm i32, this will
+ // be of length 4, for s128, it will have length 16. These values are
+ // represented by the respective C++ types, and memcpy-ed in.
+ // When value_type is a anyref, it holds the object that anyref points to.
+ bytes_or_ref: Object|ByteArray;
+}
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index cfbc4f7ec8..357a6732e2 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -24,12 +24,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(DescriptorArray, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(DescriptorArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(EnumCache)
-CAST_ACCESSOR(DescriptorArray)
-
-ACCESSORS(DescriptorArray, enum_cache, EnumCache, kEnumCacheOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
kNumberOfAllDescriptorsOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_descriptors,
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index e24673d01c..61da8dc240 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -50,13 +50,13 @@ class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
// The "value" fields store either values or field types. A field type is either
// FieldType::None(), FieldType::Any() or a weak reference to a Map. All other
// references are strong.
-class DescriptorArray : public HeapObject {
+class DescriptorArray
+ : public TorqueGeneratedDescriptorArray<DescriptorArray, HeapObject> {
public:
DECL_INT16_ACCESSORS(number_of_all_descriptors)
DECL_INT16_ACCESSORS(number_of_descriptors)
inline int16_t number_of_slack_descriptors() const;
inline int number_of_entries() const;
- DECL_ACCESSORS(enum_cache, EnumCache)
void ClearEnumCache();
inline void CopyEnumCacheFrom(DescriptorArray array);
@@ -135,14 +135,9 @@ class DescriptorArray : public HeapObject {
void Initialize(EnumCache enum_cache, HeapObject undefined_value,
int nof_descriptors, int slack);
- DECL_CAST(DescriptorArray)
-
// Constant for denoting key was not found.
static const int kNotFound = -1;
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_DESCRIPTOR_ARRAY_FIELDS)
STATIC_ASSERT(IsAligned(kStartOfWeakFieldsOffset, kTaggedSize));
STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
@@ -234,7 +229,7 @@ class DescriptorArray : public HeapObject {
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
- OBJECT_CONSTRUCTORS(DescriptorArray, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(DescriptorArray)
};
class NumberOfMarkedDescriptors {
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index 53c209e237..0b088b3d73 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -16,6 +16,7 @@ struct DescriptorEntry {
value: JSAny|Weak<Map>|AccessorInfo|AccessorPair|ClassPositions;
}
+@generateCppClass
extern class DescriptorArray extends HeapObject {
const number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index a09eada629..ec3ca3402d 100644
--- a/deps/v8/src/objects/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -127,6 +127,14 @@ inline bool IsDictionaryElementsKind(ElementsKind kind) {
return kind == DICTIONARY_ELEMENTS;
}
+inline bool IsFastArgumentsElementsKind(ElementsKind kind) {
+ return kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+inline bool IsSlowArgumentsElementsKind(ElementsKind kind) {
+ return kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
inline bool IsSloppyArgumentsElementsKind(ElementsKind kind) {
return base::IsInRange(kind, FAST_SLOPPY_ARGUMENTS_ELEMENTS,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 8222575aa3..3e5276166d 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -67,7 +67,8 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
#endif
}
-bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
+bool EmbedderDataSlot::ToAlignedPointer(const Isolate* isolate,
+ void** out_pointer) const {
// We don't care about atomicity of access here because embedder slots
// are accessed this way only from the main thread via API during "mutator"
// phase which is propely synched with GC (concurrent marker may still look
@@ -78,6 +79,8 @@ bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
Address raw_value = base::ReadUnalignedValue<Address>(address());
+ // We currently have to treat zero as nullptr in embedder slots.
+ if (raw_value) raw_value = DecodeExternalPointer(isolate, raw_value);
#else
Address raw_value = *location();
#endif
@@ -85,15 +88,18 @@ bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
return HAS_SMI_TAG(raw_value);
}
-bool EmbedderDataSlot::store_aligned_pointer(void* ptr) {
+bool EmbedderDataSlot::store_aligned_pointer(Isolate* isolate, void* ptr) {
Address value = reinterpret_cast<Address>(ptr);
if (!HAS_SMI_TAG(value)) return false;
+ // We currently have to treat zero as nullptr in embedder slots.
+ if (value) value = EncodeExternalPointer(isolate, value);
+ DCHECK(HAS_SMI_TAG(value));
gc_safe_store(value);
return true;
}
EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
- const DisallowHeapAllocation& no_gc) const {
+ Isolate* isolate, const DisallowHeapAllocation& no_gc) const {
// We don't care about atomicity of access here because embedder slots
// are accessed this way only by serializer from the main thread when
// GC is not active (concurrent marker may still look at the tagged part
@@ -103,14 +109,20 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- return base::ReadUnalignedValue<Address>(address());
+ Address value = base::ReadUnalignedValue<Address>(address());
+ // We currently have to treat zero as nullptr in embedder slots.
+ if (value) return DecodeExternalPointer(isolate, value);
+ return value;
#else
return *location();
#endif
}
-void EmbedderDataSlot::store_raw(EmbedderDataSlot::RawData data,
+void EmbedderDataSlot::store_raw(Isolate* isolate,
+ EmbedderDataSlot::RawData data,
const DisallowHeapAllocation& no_gc) {
+ // We currently have to treat zero as nullptr in embedder slots.
+ if (data) data = EncodeExternalPointer(isolate, data);
gc_safe_store(data);
}
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index dee8c3ec56..2881ca03b3 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -66,14 +66,18 @@ class EmbedderDataSlot
// the pointer-like value. Note, that some Smis could still look like an
// aligned pointers.
// Returns true on success.
- V8_INLINE bool ToAlignedPointer(void** out_result) const;
+ V8_INLINE bool ToAlignedPointer(const Isolate* isolate,
+ void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer
// was improperly aligned.
- V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(void* ptr);
+ V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(Isolate* isolate,
+ void* ptr);
- V8_INLINE RawData load_raw(const DisallowHeapAllocation& no_gc) const;
- V8_INLINE void store_raw(RawData data, const DisallowHeapAllocation& no_gc);
+ V8_INLINE RawData load_raw(Isolate* isolate,
+ const DisallowHeapAllocation& no_gc) const;
+ V8_INLINE void store_raw(Isolate* isolate, RawData data,
+ const DisallowHeapAllocation& no_gc);
private:
// Stores given value to the embedder data slot in a concurrent-marker
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 929b312f22..dc4581f40e 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -983,7 +983,7 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
int FeedbackNexus::ExtractMapsAndHandlers(
std::vector<std::pair<Handle<Map>, MaybeObjectHandle>>* maps_and_handlers,
- bool drop_deprecated) const {
+ bool try_update_deprecated) const {
DCHECK(IsLoadICKind(kind()) ||
IsStoreICKind(kind()) | IsKeyedLoadICKind(kind()) ||
IsKeyedStoreICKind(kind()) || IsStoreOwnICKind(kind()) ||
@@ -1015,10 +1015,13 @@ int FeedbackNexus::ExtractMapsAndHandlers(
MaybeObject handler = array.Get(i + 1);
if (!handler->IsCleared()) {
DCHECK(IC::IsHandler(handler));
- Map map = Map::cast(heap_object);
- if (drop_deprecated && map.is_deprecated()) continue;
+ Handle<Map> map(Map::cast(heap_object), isolate);
+ if (try_update_deprecated &&
+ !Map::TryUpdate(isolate, map).ToHandle(&map)) {
+ continue;
+ }
maps_and_handlers->push_back(
- MapAndHandler(handle(map, isolate), handle(handler, isolate)));
+ MapAndHandler(map, handle(handler, isolate)));
found++;
}
}
@@ -1028,10 +1031,13 @@ int FeedbackNexus::ExtractMapsAndHandlers(
MaybeObject handler = GetFeedbackExtra();
if (!handler->IsCleared()) {
DCHECK(IC::IsHandler(handler));
- Map map = Map::cast(heap_object);
- if (drop_deprecated && map.is_deprecated()) return 0;
+ Handle<Map> map = handle(Map::cast(heap_object), isolate);
+ if (try_update_deprecated &&
+ !Map::TryUpdate(isolate, map).ToHandle(&map)) {
+ return 0;
+ }
maps_and_handlers->push_back(
- MapAndHandler(handle(map, isolate), handle(handler, isolate)));
+ MapAndHandler(map, handle(handler, isolate)));
return 1;
}
}
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index f1d2cc3029..24025ad16a 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -651,7 +651,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
int ExtractMaps(MapHandles* maps) const;
int ExtractMapsAndHandlers(std::vector<MapAndHandler>* maps_and_handlers,
- bool drop_deprecated = false) const;
+ bool try_update_deprecated = false) const;
MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
bool IsCleared() const {
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index c981b8f57c..8ede5579d5 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -6,13 +6,15 @@ extern class FeedbackVector extends HeapObject {
shared_function_info: SharedFunctionInfo;
optimized_code_weak_or_smi: Weak<Code>|Smi;
closure_feedback_cell_array: FixedArray;
- length: int32;
+ const length: int32;
invocation_count: int32;
profiler_ticks: int32;
// TODO(v8:9287) The padding is not necessary on platforms with 4 bytes
// tagged pointers, we should make it conditional; however, platform-specific
// interacts badly with GCMole, so we need to address that first.
padding: uint32;
+ // TODO(tebbi): The variable-length feedback_slots field should be declared
+ // here once it is possible to declare tagged slots after untagged slots.
}
extern class FeedbackMetadata extends HeapObject;
diff --git a/deps/v8/src/objects/field-type.cc b/deps/v8/src/objects/field-type.cc
index 5c771c4ffa..3b3457249f 100644
--- a/deps/v8/src/objects/field-type.cc
+++ b/deps/v8/src/objects/field-type.cc
@@ -63,6 +63,15 @@ bool FieldType::NowIs(FieldType other) const {
return *this == other;
}
+bool FieldType::Equals(FieldType other) const {
+ if (IsAny() && other.IsAny()) return true;
+ if (IsNone() && other.IsNone()) return true;
+ if (IsClass() && other.IsClass()) {
+ return *this == other;
+ }
+ return false;
+}
+
bool FieldType::NowIs(Handle<FieldType> other) const { return NowIs(*other); }
void FieldType::PrintTo(std::ostream& os) const {
diff --git a/deps/v8/src/objects/field-type.h b/deps/v8/src/objects/field-type.h
index 566994b43e..d3856c48b6 100644
--- a/deps/v8/src/objects/field-type.h
+++ b/deps/v8/src/objects/field-type.h
@@ -41,6 +41,7 @@ class FieldType : public Object {
bool NowIs(FieldType other) const;
bool NowIs(Handle<FieldType> other) const;
+ V8_EXPORT_PRIVATE bool Equals(FieldType other) const;
V8_EXPORT_PRIVATE void PrintTo(std::ostream& os) const;
private:
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 4608f2ea21..174d4abc5b 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -21,55 +21,35 @@
#include "src/roots/roots-inl.h"
#include "src/sanitizer/tsan.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(FixedArray, FixedArrayBase)
-OBJECT_CONSTRUCTORS_IMPL(FixedDoubleArray, FixedArrayBase)
-OBJECT_CONSTRUCTORS_IMPL(ArrayList, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(ByteArray, FixedArrayBase)
-OBJECT_CONSTRUCTORS_IMPL(TemplateList, FixedArray)
-OBJECT_CONSTRUCTORS_IMPL(WeakFixedArray, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(WeakArrayList, HeapObject)
-
-FixedArrayBase::FixedArrayBase(Address ptr, AllowInlineSmiStorage allow_smi)
- : HeapObject(ptr, allow_smi) {
- SLOW_DCHECK(
- (allow_smi == AllowInlineSmiStorage::kAllowBeingASmi && IsSmi()) ||
- IsFixedArrayBase());
-}
-
-ByteArray::ByteArray(Address ptr, AllowInlineSmiStorage allow_smi)
- : FixedArrayBase(ptr, allow_smi) {
- SLOW_DCHECK(
- (allow_smi == AllowInlineSmiStorage::kAllowBeingASmi && IsSmi()) ||
- IsByteArray());
-}
+TQ_OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase)
+FixedArrayBase::FixedArrayBase(Address ptr,
+ HeapObject::AllowInlineSmiStorage allow_smi)
+ : TorqueGeneratedFixedArrayBase(ptr, allow_smi) {}
+TQ_OBJECT_CONSTRUCTORS_IMPL(FixedArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(FixedDoubleArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ArrayList)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ByteArray)
+ByteArray::ByteArray(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)
+ : TorqueGeneratedByteArray(ptr, allow_smi) {}
+TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateList)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WeakFixedArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WeakArrayList)
NEVER_READ_ONLY_SPACE_IMPL(WeakArrayList)
-CAST_ACCESSOR(ArrayList)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedArrayBase)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(TemplateList)
-CAST_ACCESSOR(WeakFixedArray)
-CAST_ACCESSOR(WeakArrayList)
-
-SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
-SMI_ACCESSORS(WeakArrayList, capacity, kCapacityOffset)
SYNCHRONIZED_SMI_ACCESSORS(WeakArrayList, capacity, kCapacityOffset)
-SMI_ACCESSORS(WeakArrayList, length, kLengthOffset)
Object FixedArrayBase::unchecked_synchronized_length() const {
return ACQUIRE_READ_FIELD(*this, kLengthOffset);
@@ -212,6 +192,19 @@ void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src,
isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
}
+// Due to left- and right-trimming, concurrent visitors need to read the length
+// with acquire semantics.
+// TODO(ulan): Acquire should not be needed anymore.
+inline int FixedArray::AllocatedSize() {
+ return SizeFor(synchronized_length());
+}
+inline int WeakFixedArray::AllocatedSize() {
+ return SizeFor(synchronized_length());
+}
+inline int WeakArrayList::AllocatedSize() {
+ return SizeFor(synchronized_capacity());
+}
+
// Perform a binary search in a fixed array.
template <SearchMode search_mode, typename T>
int BinarySearch(T* array, Name name, int valid_entries,
@@ -392,28 +385,15 @@ MaybeObject WeakFixedArray::Get(int index) const {
MaybeObject WeakFixedArray::Get(const Isolate* isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- return TaggedField<MaybeObject>::Relaxed_Load(isolate, *this,
- OffsetOfElementAt(index));
-}
-
-void WeakFixedArray::Set(int index, MaybeObject value) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, length());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
- WEAK_WRITE_BARRIER(*this, offset, value);
+ return objects(isolate, index);
}
void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, length());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
+ set_objects(index, value, mode);
}
MaybeObjectSlot WeakFixedArray::data_start() {
- return RawMaybeWeakField(kHeaderSize);
+ return RawMaybeWeakField(kObjectsOffset);
}
MaybeObjectSlot WeakFixedArray::RawFieldOfElementAt(int index) {
@@ -440,20 +420,15 @@ MaybeObject WeakArrayList::Get(int index) const {
MaybeObject WeakArrayList::Get(const Isolate* isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
- return TaggedField<MaybeObject>::Relaxed_Load(isolate, *this,
- OffsetOfElementAt(index));
+ return objects(isolate, index);
}
void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->capacity());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
+ set_objects(index, value, mode);
}
MaybeObjectSlot WeakArrayList::data_start() {
- return RawMaybeWeakField(kHeaderSize);
+ return RawMaybeWeakField(kObjectsOffset);
}
void WeakArrayList::CopyElements(Isolate* isolate, int dst_index,
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index c706fbd217..63c3c5360b 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -7,16 +7,15 @@
#include "src/handles/maybe-handles.h"
#include "src/objects/instance-type.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
-using WeakArrayBodyDescriptor =
- FlexibleWeakBodyDescriptor<HeapObject::kHeaderSize>;
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
@@ -69,19 +68,14 @@ enum FixedArraySubInstanceType {
// Common superclass for FixedArrays that allow implementations to share
// common accessors and some code paths.
-class FixedArrayBase : public HeapObject {
+class FixedArrayBase
+ : public TorqueGeneratedFixedArrayBase<FixedArrayBase, HeapObject> {
public:
- // [length]: length of the array.
- DECL_INT_ACCESSORS(length)
-
// Get and set the length using acquire loads and release stores.
DECL_SYNCHRONIZED_INT_ACCESSORS(length)
inline Object unchecked_synchronized_length() const;
- DECL_CAST(FixedArrayBase)
- DECL_VERIFIER(FixedArrayBase)
-
static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
V8_EXPORT_PRIVATE bool IsCowArray() const;
@@ -95,20 +89,15 @@ class FixedArrayBase : public HeapObject {
static const int kMaxSize = 128 * kTaggedSize * MB - kTaggedSize;
STATIC_ASSERT(Smi::IsValid(kMaxSize));
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_FIXED_ARRAY_BASE_FIELDS)
-
protected:
- // Special-purpose constructor for subclasses that have fast paths where
- // their ptr() is a Smi.
- inline FixedArrayBase(Address ptr, AllowInlineSmiStorage allow_smi);
-
- OBJECT_CONSTRUCTORS(FixedArrayBase, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(FixedArrayBase)
+ inline FixedArrayBase(Address ptr,
+ HeapObject::AllowInlineSmiStorage allow_smi);
};
// FixedArray describes fixed-sized arrays with element type Object.
-class FixedArray : public FixedArrayBase {
+class FixedArray
+ : public TorqueGeneratedFixedArray<FixedArray, FixedArrayBase> {
public:
// Setter and getter for elements.
inline Object get(int index) const;
@@ -171,12 +160,14 @@ class FixedArray : public FixedArrayBase {
}
// Code Generation support.
- static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
+ static constexpr int OffsetOfElementAt(int index) {
+ STATIC_ASSERT(kObjectsOffset == SizeFor(0));
+ return SizeFor(index);
+ }
// Garbage collection support.
inline ObjectSlot RawFieldOfElementAt(int index);
- DECL_CAST(FixedArray)
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kTaggedSize;
static_assert(Internals::IsValidSmi(kMaxLength),
@@ -189,9 +180,10 @@ class FixedArray : public FixedArrayBase {
// Dispatched behavior.
DECL_PRINTER(FixedArray)
- DECL_VERIFIER(FixedArray)
- using BodyDescriptor = FlexibleBodyDescriptor<kHeaderSize>;
+ int AllocatedSize();
+
+ class BodyDescriptor;
static constexpr int kObjectsOffset = kHeaderSize;
@@ -208,7 +200,7 @@ class FixedArray : public FixedArrayBase {
inline void set_null(ReadOnlyRoots ro_roots, int index);
inline void set_the_hole(ReadOnlyRoots ro_roots, int index);
- OBJECT_CONSTRUCTORS(FixedArray, FixedArrayBase);
+ TQ_OBJECT_CONSTRUCTORS(FixedArray)
};
// FixedArray alias added only because of IsFixedArrayExact() predicate, which
@@ -217,7 +209,8 @@ class FixedArray : public FixedArrayBase {
class FixedArrayExact final : public FixedArray {};
// FixedDoubleArray describes fixed-sized arrays with element type double.
-class FixedDoubleArray : public FixedArrayBase {
+class FixedDoubleArray
+ : public TorqueGeneratedFixedDoubleArray<FixedDoubleArray, FixedArrayBase> {
public:
// Setter and getter for elements.
inline double get_scalar(int index);
@@ -245,8 +238,6 @@ class FixedDoubleArray : public FixedArrayBase {
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
- DECL_CAST(FixedDoubleArray)
-
// Start offset of elements.
static constexpr int kFloatsOffset = kHeaderSize;
@@ -261,29 +252,20 @@ class FixedDoubleArray : public FixedArrayBase {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(FixedDoubleArray, FixedArrayBase);
+ TQ_OBJECT_CONSTRUCTORS(FixedDoubleArray)
};
// WeakFixedArray describes fixed-sized arrays with element type
// MaybeObject.
-class WeakFixedArray : public HeapObject {
+class WeakFixedArray
+ : public TorqueGeneratedWeakFixedArray<WeakFixedArray, HeapObject> {
public:
- DECL_CAST(WeakFixedArray)
-
inline MaybeObject Get(int index) const;
inline MaybeObject Get(const Isolate* isolate, int index) const;
- // Setter that uses write barrier.
- inline void Set(int index, MaybeObject value);
-
- // Setter with explicit barrier mode.
- inline void Set(int index, MaybeObject value, WriteBarrierMode mode);
-
- static constexpr int SizeFor(int length) {
- return kHeaderSize + length * kTaggedSize;
- }
-
- DECL_INT_ACCESSORS(length)
+ inline void Set(
+ int index, MaybeObject value,
+ WriteBarrierMode mode = WriteBarrierMode::UPDATE_WRITE_BARRIER);
// Get and set the length using acquire loads and release stores.
DECL_SYNCHRONIZED_INT_ACCESSORS(length)
@@ -299,19 +281,19 @@ class WeakFixedArray : public HeapObject {
DECL_PRINTER(WeakFixedArray)
DECL_VERIFIER(WeakFixedArray)
- using BodyDescriptor = WeakArrayBodyDescriptor;
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WEAK_FIXED_ARRAY_FIELDS)
+ class BodyDescriptor;
static const int kMaxLength =
(FixedArray::kMaxSize - kHeaderSize) / kTaggedSize;
static_assert(Internals::IsValidSmi(kMaxLength),
"WeakFixedArray maxLength not a Smi");
+ int AllocatedSize();
+
protected:
static int OffsetOfElementAt(int index) {
- return kHeaderSize + index * kTaggedSize;
+ STATIC_ASSERT(kObjectsOffset == SizeFor(0));
+ return SizeFor(index);
}
private:
@@ -319,7 +301,7 @@ class WeakFixedArray : public HeapObject {
static const int kFirstIndex = 1;
- OBJECT_CONSTRUCTORS(WeakFixedArray, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(WeakFixedArray)
};
// WeakArrayList is like a WeakFixedArray with static convenience methods for
@@ -327,11 +309,10 @@ class WeakFixedArray : public HeapObject {
// capacity() returns the allocated size. The number of elements is stored at
// kLengthOffset and is updated with every insertion. The array grows
// dynamically with O(1) amortized insertion.
-class WeakArrayList : public HeapObject {
+class WeakArrayList
+ : public TorqueGeneratedWeakArrayList<WeakArrayList, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
- DECL_CAST(WeakArrayList)
- DECL_VERIFIER(WeakArrayList)
DECL_PRINTER(WeakArrayList)
V8_EXPORT_PRIVATE static Handle<WeakArrayList> AddToEnd(
@@ -365,7 +346,7 @@ class WeakArrayList : public HeapObject {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
static constexpr int SizeForCapacity(int capacity) {
- return kHeaderSize + capacity * kTaggedSize;
+ return SizeFor(capacity);
}
static constexpr int CapacityForLength(int length) {
@@ -380,17 +361,12 @@ class WeakArrayList : public HeapObject {
V8_EXPORT_PRIVATE bool IsFull();
- DECL_INT_ACCESSORS(capacity)
- DECL_INT_ACCESSORS(length)
-
// Get and set the capacity using acquire loads and release stores.
DECL_SYNCHRONIZED_INT_ACCESSORS(capacity)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WEAK_ARRAY_LIST_FIELDS)
+ int AllocatedSize();
- using BodyDescriptor = WeakArrayBodyDescriptor;
+ class BodyDescriptor;
static const int kMaxCapacity =
(FixedArray::kMaxSize - kHeaderSize) / kTaggedSize;
@@ -418,7 +394,7 @@ class WeakArrayList : public HeapObject {
return kHeaderSize + index * kTaggedSize;
}
- OBJECT_CONSTRUCTORS(WeakArrayList, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(WeakArrayList)
};
class WeakArrayList::Iterator {
@@ -443,7 +419,7 @@ class WeakArrayList::Iterator {
// the allocated size. The number of elements is stored at kLengthIndex and is
// updated with every insertion. The elements of the ArrayList are stored in the
// underlying FixedArray starting at kFirstIndex.
-class ArrayList : public FixedArray {
+class ArrayList : public TorqueGeneratedArrayList<ArrayList, FixedArray> {
public:
V8_EXPORT_PRIVATE static Handle<ArrayList> Add(Isolate* isolate,
Handle<ArrayList> array,
@@ -476,14 +452,13 @@ class ArrayList : public FixedArray {
// Return a copy of the list of size Length() without the first entry. The
// number returned by Length() is stored in the first entry.
static Handle<FixedArray> Elements(Isolate* isolate, Handle<ArrayList> array);
- DECL_CAST(ArrayList)
private:
static Handle<ArrayList> EnsureSpace(Isolate* isolate,
Handle<ArrayList> array, int length);
static const int kLengthIndex = 0;
static const int kFirstIndex = 1;
- OBJECT_CONSTRUCTORS(ArrayList, FixedArray);
+ TQ_OBJECT_CONSTRUCTORS(ArrayList)
};
enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
@@ -494,7 +469,7 @@ inline int Search(T* array, Name name, int valid_entries = 0,
// ByteArray represents fixed sized byte arrays. Used for the relocation info
// that is attached to code objects.
-class ByteArray : public FixedArrayBase {
+class ByteArray : public TorqueGeneratedByteArray<ByteArray, FixedArrayBase> {
public:
inline int Size();
@@ -543,12 +518,9 @@ class ByteArray : public FixedArrayBase {
// Returns a pointer to the ByteArray object for a given data start address.
static inline ByteArray FromDataStartAddress(Address address);
- DECL_CAST(ByteArray)
-
// Dispatched behavior.
inline int ByteArraySize();
DECL_PRINTER(ByteArray)
- DECL_VERIFIER(ByteArray)
// Layout description.
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
@@ -561,11 +533,8 @@ class ByteArray : public FixedArrayBase {
class BodyDescriptor;
protected:
- // Special-purpose constructor for subclasses that have fast paths where
- // their ptr() is a Smi.
- inline ByteArray(Address ptr, AllowInlineSmiStorage allow_smi);
-
- OBJECT_CONSTRUCTORS(ByteArray, FixedArrayBase);
+ TQ_OBJECT_CONSTRUCTORS(ByteArray)
+ inline ByteArray(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi);
};
// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
@@ -605,7 +574,8 @@ class PodArray : public ByteArray {
OBJECT_CONSTRUCTORS(PodArray<T>, ByteArray);
};
-class TemplateList : public FixedArray {
+class TemplateList
+ : public TorqueGeneratedTemplateList<TemplateList, FixedArray> {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
@@ -614,12 +584,11 @@ class TemplateList : public FixedArray {
inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
- DECL_CAST(TemplateList)
private:
static const int kLengthIndex = 0;
static const int kFirstElementIndex = kLengthIndex + 1;
- OBJECT_CONSTRUCTORS(TemplateList, FixedArray);
+ TQ_OBJECT_CONSTRUCTORS(TemplateList)
};
} // namespace internal
diff --git a/deps/v8/src/objects/fixed-array.tq b/deps/v8/src/objects/fixed-array.tq
index 0c57e0bc63..5c22149ebb 100644
--- a/deps/v8/src/objects/fixed-array.tq
+++ b/deps/v8/src/objects/fixed-array.tq
@@ -3,31 +3,49 @@
// found in the LICENSE file.
@abstract
+@generateCppClass
extern class FixedArrayBase extends HeapObject {
+ // length of the array.
const length: Smi;
}
-extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
+@generateBodyDescriptor
+@generateCppClass
+extern class FixedArray extends FixedArrayBase {
+ objects[length]: Object;
+}
type EmptyFixedArray extends FixedArray;
+@generateCppClass
extern class FixedDoubleArray extends FixedArrayBase {
floats[length]: float64_or_hole;
}
-extern class WeakFixedArray extends HeapObject { length: Smi; }
+@generateBodyDescriptor
+@generateCppClass
+extern class WeakFixedArray extends HeapObject {
+ const length: Smi;
+ objects[length]: MaybeObject;
+}
-extern class ByteArray extends FixedArrayBase {}
+@generateCppClass
+extern class ByteArray extends FixedArrayBase {
+ bytes[length]: uint8;
+}
@hasSameInstanceTypeAsParent
+@generateCppClass
extern class ArrayList extends FixedArray {
}
@hasSameInstanceTypeAsParent
-@noVerifier
+@generateCppClass
extern class TemplateList extends FixedArray {
}
+@generateBodyDescriptor
+@generateCppClass
extern class WeakArrayList extends HeapObject {
const capacity: Smi;
length: Smi;
@@ -120,8 +138,15 @@ macro ExtractFixedDoubleArray(
ConstantIterator(kDoubleHole)));
}
+namespace runtime {
+extern runtime FatalProcessOutOfMemoryInvalidArrayLength(NoContext): never;
+}
+
macro NewFixedArray<Iterator: type>(length: intptr, it: Iterator): FixedArray {
if (length == 0) return kEmptyFixedArray;
+ if (length > kFixedArrayMaxLength) deferred {
+ runtime::FatalProcessOutOfMemoryInvalidArrayLength(kNoContext);
+ }
return new
FixedArray{map: kFixedArrayMap, length: Convert<Smi>(length), objects: ...it};
}
@@ -129,6 +154,9 @@ macro NewFixedArray<Iterator: type>(length: intptr, it: Iterator): FixedArray {
macro NewFixedDoubleArray<Iterator: type>(
length: intptr, it: Iterator): FixedDoubleArray|EmptyFixedArray {
if (length == 0) return kEmptyFixedArray;
+ if (length > kFixedDoubleArrayMaxLength) deferred {
+ runtime::FatalProcessOutOfMemoryInvalidArrayLength(kNoContext);
+ }
return new FixedDoubleArray{
map: kFixedDoubleArrayMap,
length: Convert<Smi>(length),
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index d2f3a6df72..d455aede1a 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -5,8 +5,10 @@
#ifndef V8_OBJECTS_FOREIGN_INL_H_
#define V8_OBJECTS_FOREIGN_INL_H_
+#include "src/common/globals.h"
#include "src/objects/foreign.h"
+#include "src/common/external-pointer-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/objects-inl.h"
@@ -16,9 +18,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Foreign, HeapObject)
-
-CAST_ACCESSOR(Foreign)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Foreign)
// static
bool Foreign::IsNormalized(Object value) {
@@ -26,12 +26,16 @@ bool Foreign::IsNormalized(Object value) {
return Foreign::cast(value).foreign_address() != kNullAddress;
}
-Address Foreign::foreign_address() {
- return ReadField<Address>(kForeignAddressOffset);
+DEF_GETTER(Foreign, foreign_address, Address) {
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kForeignAddressOffset);
+ Address value = DecodeExternalPointer(isolate, encoded_value);
+ return value;
}
-void Foreign::set_foreign_address(Address value) {
- WriteField<Address>(kForeignAddressOffset, value);
+void Foreign::set_foreign_address(Isolate* isolate, Address value) {
+ ExternalPointer_t encoded_value = EncodeExternalPointer(isolate, value);
+ WriteField<ExternalPointer_t>(kForeignAddressOffset, encoded_value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
index 617ca0e34f..e0f498ab7e 100644
--- a/deps/v8/src/objects/foreign.h
+++ b/deps/v8/src/objects/foreign.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_FOREIGN_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,21 +15,15 @@ namespace v8 {
namespace internal {
// Foreign describes objects pointing from JavaScript to C structures.
-class Foreign : public HeapObject {
+class Foreign : public TorqueGeneratedForeign<Foreign, HeapObject> {
public:
// [address]: field containing the address.
- inline Address foreign_address();
+ DECL_GETTER(foreign_address, Address)
static inline bool IsNormalized(Object object);
- DECL_CAST(Foreign)
-
// Dispatched behavior.
DECL_PRINTER(Foreign)
- DECL_VERIFIER(Foreign)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_FOREIGN_FIELDS)
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): When pointer compression is enabled the
@@ -38,11 +32,9 @@ class Foreign : public HeapObject {
// compression is supported) allow unaligned access to full words.
STATIC_ASSERT(IsAligned(kForeignAddressOffset, kTaggedSize));
#else
- STATIC_ASSERT(IsAligned(kForeignAddressOffset, kSystemPointerSize));
+ STATIC_ASSERT(IsAligned(kForeignAddressOffset, kExternalPointerSize));
#endif
- STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
-
class BodyDescriptor;
private:
@@ -50,9 +42,9 @@ class Foreign : public HeapObject {
friend class SerializerDeserializer;
friend class StartupSerializer;
- inline void set_foreign_address(Address value);
+ inline void set_foreign_address(Isolate* isolate, Address value);
- OBJECT_CONSTRUCTORS(Foreign, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Foreign)
};
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.tq b/deps/v8/src/objects/foreign.tq
index 3204635070..dcf52b12e3 100644
--- a/deps/v8/src/objects/foreign.tq
+++ b/deps/v8/src/objects/foreign.tq
@@ -3,6 +3,7 @@
// found in the LICENSE file.
@apiExposedInstanceTypeValue(0x46)
+@generateCppClass
extern class Foreign extends HeapObject {
- foreign_address: RawPtr;
+ foreign_address: ExternalPointer;
}
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index e4b395488d..5627b72823 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -33,17 +33,8 @@ FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
#undef DEFINE_FRAME_ARRAY_ACCESSORS
bool FrameArray::IsWasmFrame(int frame_ix) const {
- return IsWasmCompiledFrame(frame_ix) || IsWasmInterpretedFrame(frame_ix);
-}
-
-bool FrameArray::IsWasmCompiledFrame(int frame_ix) const {
- const int flags = Flags(frame_ix).value();
- return (flags & kIsWasmCompiledFrame) != 0;
-}
-
-bool FrameArray::IsWasmInterpretedFrame(int frame_ix) const {
const int flags = Flags(frame_ix).value();
- return (flags & kIsWasmInterpretedFrame) != 0;
+ return (flags & kIsWasmFrame) != 0;
}
bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
@@ -52,8 +43,7 @@ bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
}
bool FrameArray::IsAnyWasmFrame(int frame_ix) const {
- return IsWasmFrame(frame_ix) || IsWasmInterpretedFrame(frame_ix) ||
- IsAsmJsWasmFrame(frame_ix);
+ return IsWasmFrame(frame_ix) || IsAsmJsWasmFrame(frame_ix);
}
int FrameArray::FrameCount() const {
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index b82a7a60be..9ad4fb96a9 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -38,8 +38,6 @@ class FrameArray : public FixedArray {
#undef DECL_FRAME_ARRAY_ACCESSORS
inline bool IsWasmFrame(int frame_ix) const;
- inline bool IsWasmCompiledFrame(int frame_ix) const;
- inline bool IsWasmInterpretedFrame(int frame_ix) const;
inline bool IsAsmJsWasmFrame(int frame_ix) const;
inline bool IsAnyWasmFrame(int frame_ix) const;
inline int FrameCount() const;
@@ -48,14 +46,13 @@ class FrameArray : public FixedArray {
// Flags.
enum Flag {
- kIsWasmCompiledFrame = 1 << 0,
- kIsWasmInterpretedFrame = 1 << 1,
- kIsAsmJsWasmFrame = 1 << 2,
- kIsStrict = 1 << 3,
- kIsConstructor = 1 << 4,
- kAsmJsAtNumberConversion = 1 << 5,
- kIsAsync = 1 << 6,
- kIsPromiseAll = 1 << 7
+ kIsWasmFrame = 1 << 0,
+ kIsAsmJsWasmFrame = 1 << 1,
+ kIsStrict = 1 << 2,
+ kIsConstructor = 1 << 3,
+ kAsmJsAtNumberConversion = 1 << 4,
+ kIsAsync = 1 << 5,
+ kIsPromiseAll = 1 << 6
};
static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index 318272c340..1aa571d5b6 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -18,9 +18,8 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(FreeSpace, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(FreeSpace)
-SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
int FreeSpace::Size() { return size(); }
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index 5714727036..ad5ffa075c 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_FREE_SPACE_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,12 +19,9 @@ namespace internal {
// the heap remains iterable. They have a size and a next pointer.
// The next pointer is the raw address of the next FreeSpace object (or NULL)
// in the free list.
-class FreeSpace : public HeapObject {
+class FreeSpace : public TorqueGeneratedFreeSpace<FreeSpace, HeapObject> {
public:
// [size]: size of the free space including the header.
- inline int size() const;
- inline void set_size(int value);
-
inline int relaxed_read_size() const;
inline void relaxed_write_size(int value);
@@ -39,15 +36,11 @@ class FreeSpace : public HeapObject {
// Dispatched behavior.
DECL_PRINTER(FreeSpace)
- DECL_VERIFIER(FreeSpace)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_FREE_SPACE_FIELDS)
private:
inline bool IsValid();
- OBJECT_CONSTRUCTORS(FreeSpace, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(FreeSpace)
};
} // namespace internal
diff --git a/deps/v8/src/objects/free-space.tq b/deps/v8/src/objects/free-space.tq
index 5fc8767a58..501326b04d 100644
--- a/deps/v8/src/objects/free-space.tq
+++ b/deps/v8/src/objects/free-space.tq
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+@generateCppClass
extern class FreeSpace extends HeapObject {
size: Smi;
next: FreeSpace|Uninitialized;
diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h
index dda576fd8a..5e17ebf054 100644
--- a/deps/v8/src/objects/function-kind.h
+++ b/deps/v8/src/objects/function-kind.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_FUNCTION_KIND_H_
#include "src/base/bounds.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
@@ -56,6 +57,9 @@ enum FunctionKind : uint8_t {
kLastFunctionKind = kClassMembersInitializerFunction,
};
+constexpr int kFunctionKindBitSize = 5;
+STATIC_ASSERT(kLastFunctionKind < (1 << kFunctionKindBitSize));
+
inline bool IsArrowFunction(FunctionKind kind) {
return base::IsInRange(kind, FunctionKind::kArrowFunction,
FunctionKind::kAsyncArrowFunction);
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 546b16e93d..4e40fa70b8 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -16,15 +16,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(HeapNumber, PrimitiveHeapObject)
-
-CAST_ACCESSOR(HeapNumber)
-
-double HeapNumber::value() const { return ReadField<double>(kValueOffset); }
-
-void HeapNumber::set_value(double value) {
- WriteField<double>(kValueOffset, value);
-}
+TQ_OBJECT_CONSTRUCTORS_IMPL(HeapNumber)
uint64_t HeapNumber::value_as_bits() const {
// Bug(v8:8875): HeapNumber's double may be unaligned.
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
index 259e23a41d..4e77a52340 100644
--- a/deps/v8/src/objects/heap-number.h
+++ b/deps/v8/src/objects/heap-number.h
@@ -15,12 +15,9 @@ namespace internal {
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer).
-class HeapNumber : public PrimitiveHeapObject {
+class HeapNumber
+ : public TorqueGeneratedHeapNumber<HeapNumber, PrimitiveHeapObject> {
public:
- // [value]: number value.
- inline double value() const;
- inline void set_value(double value);
-
inline uint64_t value_as_bits() const;
inline void set_value_as_bits(uint64_t bits);
@@ -28,7 +25,6 @@ class HeapNumber : public PrimitiveHeapObject {
inline int get_sign();
// Layout description.
- static const int kValueOffset = PrimitiveHeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second
// is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
// words within double numbers are endian dependent and they are set
@@ -43,7 +39,6 @@ class HeapNumber : public PrimitiveHeapObject {
#error Unknown byte ordering
#endif
- static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
@@ -56,12 +51,10 @@ class HeapNumber : public PrimitiveHeapObject {
static const int kMantissaBitsInTopWord = 20;
static const int kNonMantissaBitsInTopWord = 12;
- DECL_CAST(HeapNumber)
- DECL_VERIFIER(HeapNumber)
DECL_PRINTER(HeapNumber)
V8_EXPORT_PRIVATE void HeapNumberShortPrint(std::ostream& os);
- OBJECT_CONSTRUCTORS(HeapNumber, PrimitiveHeapObject);
+ TQ_OBJECT_CONSTRUCTORS(HeapNumber)
};
} // namespace internal
diff --git a/deps/v8/src/objects/heap-number.tq b/deps/v8/src/objects/heap-number.tq
index 478e861bc2..cffe3a092c 100644
--- a/deps/v8/src/objects/heap-number.tq
+++ b/deps/v8/src/objects/heap-number.tq
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern class HeapNumber extends PrimitiveHeapObject { value: float64; }
+@generateCppClass
+extern class HeapNumber extends PrimitiveHeapObject {
+ value: float64;
+}
// The HeapNumber value NaN
type NaN extends HeapNumber;
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 74fe664ca3..b19d429320 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -191,7 +191,7 @@ class HeapObject : public Object {
bool CanBeRehashed() const;
// Rehash the object based on the layout inferred from its map.
- void RehashBasedOnMap(Isolate* isolate);
+ void RehashBasedOnMap(ReadOnlyRoots root);
// Layout description.
#define HEAP_OBJECT_FIELDS(V) \
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index dee27016d7..d358e2780e 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -563,14 +563,12 @@ bool ValidateResource(const icu::Locale locale, const char* path,
} // namespace
std::set<std::string> Intl::BuildLocaleSet(
- const icu::Locale* icu_available_locales, int32_t count, const char* path,
+ const std::vector<std::string>& icu_available_locales, const char* path,
const char* validate_key) {
std::set<std::string> locales;
- for (int32_t i = 0; i < count; ++i) {
- std::string locale =
- Intl::ToLanguageTag(icu_available_locales[i]).FromJust();
+ for (const std::string& locale : icu_available_locales) {
if (path != nullptr || validate_key != nullptr) {
- if (!ValidateResource(icu_available_locales[i], path, validate_key)) {
+ if (!ValidateResource(icu::Locale(locale.c_str()), path, validate_key)) {
continue;
}
}
@@ -2107,9 +2105,9 @@ Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
return Just(false);
}
-const std::set<std::string>& Intl::GetAvailableLocalesForLocale() {
- static base::LazyInstance<Intl::AvailableLocales<icu::Locale>>::type
- available_locales = LAZY_INSTANCE_INITIALIZER;
+const std::set<std::string>& Intl::GetAvailableLocales() {
+ static base::LazyInstance<Intl::AvailableLocales<>>::type available_locales =
+ LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
@@ -2123,8 +2121,7 @@ struct CheckCalendar {
} // namespace
const std::set<std::string>& Intl::GetAvailableLocalesForDateFormat() {
- static base::LazyInstance<
- Intl::AvailableLocales<icu::DateFormat, CheckCalendar>>::type
+ static base::LazyInstance<Intl::AvailableLocales<CheckCalendar>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 0d8cab42e8..944a1f103e 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -49,7 +49,7 @@ class Intl {
// script; eg, pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India)
// would include pa_IN.
static std::set<std::string> BuildLocaleSet(
- const icu::Locale* icu_available_locales, int32_t count, const char* path,
+ const std::vector<std::string>& locales, const char* path,
const char* validate_key);
static Maybe<std::string> ToLanguageTag(const icu::Locale& locale);
@@ -276,21 +276,34 @@ class Intl {
static const char* path() { return nullptr; }
};
- template <typename T, typename C = SkipResourceCheck>
+ template <typename C = SkipResourceCheck>
class AvailableLocales {
public:
AvailableLocales() {
- int32_t num_locales = 0;
- const icu::Locale* icu_available_locales =
- T::getAvailableLocales(num_locales);
- set = Intl::BuildLocaleSet(icu_available_locales, num_locales, C::path(),
- C::key());
+ UErrorCode status = U_ZERO_ERROR;
+ UEnumeration* uenum =
+ uloc_openAvailableByType(ULOC_AVAILABLE_WITH_LEGACY_ALIASES, &status);
+ DCHECK(U_SUCCESS(status));
+
+ std::vector<std::string> all_locales;
+ const char* loc;
+ while ((loc = uenum_next(uenum, NULL, &status)) != nullptr) {
+ DCHECK(U_SUCCESS(status));
+ std::string locstr(loc);
+ std::replace(locstr.begin(), locstr.end(), '_', '-');
+ // Handle special case
+ if (locstr == "en-US-POSIX") locstr = "en-US-u-va-posix";
+ all_locales.push_back(locstr);
+ }
+ uenum_close(uenum);
+
+ set_ = Intl::BuildLocaleSet(all_locales, C::path(), C::key());
}
virtual ~AvailableLocales() {}
- const std::set<std::string>& Get() const { return set; }
+ const std::set<std::string>& Get() const { return set_; }
private:
- std::set<std::string> set;
+ std::set<std::string> set_;
};
// Utility function to set text to BreakIterator.
@@ -311,7 +324,7 @@ class Intl {
static String ConvertOneByteToLower(String src, String dst);
- static const std::set<std::string>& GetAvailableLocalesForLocale();
+ static const std::set<std::string>& GetAvailableLocales();
static const std::set<std::string>& GetAvailableLocalesForDateFormat();
};
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 8f4f130a0d..41db48dae3 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -23,6 +23,7 @@ bitfield struct JSDateTimeFormatFlags extends uint31 {
time_style: DateTimeStyle: 3 bit;
}
+@generateCppClass
extern class JSDateTimeFormat extends JSObject {
locale: String;
icu_locale: Foreign; // Managed<icu::Locale>
@@ -40,6 +41,7 @@ bitfield struct JSDisplayNamesFlags extends uint31 {
fallback: JSDisplayNamesFallback: 1 bit;
}
+@generateCppClass
extern class JSDisplayNames extends JSObject {
internal: Foreign; // Managed<DisplayNamesInternal>
flags: SmiTagged<JSDisplayNamesFlags>;
@@ -52,12 +54,14 @@ bitfield struct JSListFormatFlags extends uint31 {
Type: JSListFormatType: 2 bit; // "type" is a reserved word.
}
+@generateCppClass
extern class JSListFormat extends JSObject {
locale: String;
icu_formatter: Foreign; // Managed<icu::ListFormatter>
flags: SmiTagged<JSListFormatFlags>;
}
+@generateCppClass
extern class JSNumberFormat extends JSObject {
locale: String;
icu_number_formatter:
@@ -70,6 +74,7 @@ bitfield struct JSPluralRulesFlags extends uint31 {
Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
}
+@generateCppClass
extern class JSPluralRules extends JSObject {
locale: String;
flags: SmiTagged<JSPluralRulesFlags>;
@@ -84,6 +89,7 @@ bitfield struct JSRelativeTimeFormatFlags extends uint31 {
numeric: JSRelativeTimeFormatNumeric: 1 bit;
}
+@generateCppClass
extern class JSRelativeTimeFormat extends JSObject {
locale: String;
numberingSystem: String;
@@ -91,6 +97,7 @@ extern class JSRelativeTimeFormat extends JSObject {
flags: SmiTagged<JSRelativeTimeFormatFlags>;
}
+@generateCppClass
extern class JSLocale extends JSObject {
icu_locale: Foreign; // Managed<icu::Locale>
}
@@ -101,6 +108,7 @@ bitfield struct JSSegmenterFlags extends uint31 {
granularity: JSSegmenterGranularity: 2 bit;
}
+@generateCppClass
extern class JSSegmenter extends JSObject {
locale: String;
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
@@ -112,12 +120,14 @@ bitfield struct JSSegmentIteratorFlags extends uint31 {
break_type_set: bool: 1 bit;
}
+@generateCppClass
extern class JSSegmentIterator extends JSObject {
icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
unicode_string: Foreign; // Managed<icu::UnicodeString>
flags: SmiTagged<JSSegmentIteratorFlags>;
}
+@generateCppClass
extern class JSV8BreakIterator extends JSObject {
locale: String;
break_iterator: Foreign; // Managed<icu::BreakIterator>;
@@ -129,6 +139,7 @@ extern class JSV8BreakIterator extends JSObject {
bound_break_type: Undefined|JSFunction;
}
+@generateCppClass
extern class JSCollator extends JSObject {
icu_collator: Foreign; // Managed<icu::Collator>
bound_compare: Undefined|JSFunction;
diff --git a/deps/v8/src/objects/js-aggregate-error-inl.h b/deps/v8/src/objects/js-aggregate-error-inl.h
new file mode 100644
index 0000000000..552012c37f
--- /dev/null
+++ b/deps/v8/src/objects/js-aggregate-error-inl.h
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_AGGREGATE_ERROR_INL_H_
+#define V8_OBJECTS_JS_AGGREGATE_ERROR_INL_H_
+
+#include "src/objects/js-aggregate-error.h"
+
+#include "src/objects/objects-inl.h" // Needed for write barriers
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSAggregateError)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_AGGREGATE_ERROR_INL_H_
diff --git a/deps/v8/src/objects/js-aggregate-error.h b/deps/v8/src/objects/js-aggregate-error.h
new file mode 100644
index 0000000000..c77633d44e
--- /dev/null
+++ b/deps/v8/src/objects/js-aggregate-error.h
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_AGGREGATE_ERROR_H_
+#define V8_OBJECTS_JS_AGGREGATE_ERROR_H_
+
+#include "src/objects/js-objects.h"
+#include "torque-generated/builtin-definitions-tq.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSAggregateError
+ : public TorqueGeneratedJSAggregateError<JSAggregateError, JSObject> {
+ public:
+ DECL_PRINTER(JSAggregateError)
+ TQ_OBJECT_CONSTRUCTORS(JSAggregateError)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_JS_AGGREGATE_ERROR_H_
diff --git a/deps/v8/src/objects/js-aggregate-error.tq b/deps/v8/src/objects/js-aggregate-error.tq
new file mode 100644
index 0000000000..efa416e9fb
--- /dev/null
+++ b/deps/v8/src/objects/js-aggregate-error.tq
@@ -0,0 +1,81 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-aggregate-error.h'
+
+@generateCppClass
+extern class JSAggregateError extends JSObject {
+ // Only Undefined during AggregateError object creation. In order to make the
+ // field type FixedArray, we'd need to initialize it in ErrorUtils::Construct
+ // (after it, it's too late) which we don't want.
+ errors: FixedArray|Undefined;
+}
+
+namespace error {
+
+transitioning javascript builtin AggregateErrorConstructor(
+ js-implicit context: NativeContext, target: JSFunction,
+ newTarget: JSAny)(...arguments): JSAny {
+ // This function is implementing the spec as suggested by
+ // https://github.com/tc39/proposal-promise-any/pull/59 . FIXME(marja):
+ // change this if the PR is declined.
+
+ // 1. If NewTarget is undefined, let newTarget be the active function
+ // object, else let newTarget be NewTarget.
+ // 2. Let O be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%AggregateError.prototype%", Ā« [[ErrorData]], [[AggregateErrors]] Ā»).
+ // 3. If _message_ is not _undefined_, then
+ // a. Let msg be ? ToString(_message_).
+ // b. Let msgDesc be the PropertyDescriptor { [[Value]]: _msg_,
+ // [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true*
+ // c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_).
+ const message: JSAny = arguments[1];
+ const obj: JSAggregateError =
+ ConstructAggregateErrorHelper(context, target, newTarget, message);
+
+ // 4. Let errorsList be ? IterableToList(errors).
+ const errors: JSAny = arguments[0];
+ const errorsArray =
+ iterator::IterableToFixedArrayWithSymbolLookupSlow(errors);
+ // errorsArray must be marked copy-on-write, since the "errors" getter
+ // creates a thin JSArray wrapper around it.
+ MakeFixedArrayCOW(errorsArray);
+
+ // 5. Set O.[[AggregateErrors]] to errorsList.
+ obj.errors = errorsArray;
+
+ // 6. Return O.
+ return obj;
+}
+
+transitioning javascript builtin AggregateErrorPrototypeErrorsGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ // 1. Let E be the this value.
+ // 2. If Type(E) is not Object, throw a TypeError exception.
+ // 3. If E does not have an [[ErrorData]] internal slot, throw a TypeError
+ // exception.
+ // 4. If E does not have an [[AggregateErrors]] internal slot, throw a
+ // TypeError exception.
+ // 5. Return ! CreateArrayFromList(E.[[AggregateErrors]]).
+ typeswitch (receiver) {
+ case (receiver: JSAggregateError): {
+ return array::CreateJSArrayWithElements(
+ UnsafeCast<FixedArray>(receiver.errors));
+ }
+ case (Object): {
+ ThrowTypeError(
+ MessageTemplate::kNotGeneric, 'JSAggregateError.prototype.errors.get',
+ 'AggregateError');
+ }
+ }
+}
+
+extern runtime ConstructAggregateErrorHelper(
+ Context, JSFunction, JSAny, Object): JSAggregateError;
+
+extern runtime ConstructInternalAggregateErrorHelper(
+ Context, Object): JSAggregateError;
+
+extern macro MakeFixedArrayCOW(FixedArray);
+}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index b77f5580e2..6737cdc719 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -5,8 +5,10 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_INL_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_INL_H_
+#include "src/common/external-pointer.h"
#include "src/objects/js-array-buffer.h"
+#include "src/common/external-pointer-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/objects-inl.h"
@@ -18,15 +20,10 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSArrayBuffer, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSTypedArray, JSArrayBufferView)
-OBJECT_CONSTRUCTORS_IMPL(JSDataView, JSArrayBufferView)
-
-CAST_ACCESSOR(JSArrayBuffer)
-CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSTypedArray)
-CAST_ACCESSOR(JSDataView)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBuffer)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSTypedArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSDataView)
size_t JSArrayBuffer::byte_length() const {
return ReadField<size_t>(kByteLengthOffset);
@@ -36,12 +33,27 @@ void JSArrayBuffer::set_byte_length(size_t value) {
WriteField<size_t>(kByteLengthOffset, value);
}
-void* JSArrayBuffer::backing_store() const {
- return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
+DEF_GETTER(JSArrayBuffer, backing_store, void*) {
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kBackingStoreOffset);
+ return reinterpret_cast<void*>(DecodeExternalPointer(isolate, encoded_value));
+}
+
+void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
+ ExternalPointer_t encoded_value =
+ EncodeExternalPointer(isolate, reinterpret_cast<Address>(value));
+ WriteField<ExternalPointer_t>(kBackingStoreOffset, encoded_value);
}
-void JSArrayBuffer::set_backing_store(void* value) {
- WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
+uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kBackingStoreOffset);
+ return static_cast<uint32_t>(encoded_value);
+}
+
+void JSArrayBuffer::SetBackingStoreRefForSerialization(uint32_t ref) {
+ ExternalPointer_t encoded_value = ref;
+ WriteField<ExternalPointer_t>(kBackingStoreOffset, encoded_value);
}
ArrayBufferExtension* JSArrayBuffer::extension() const {
@@ -173,8 +185,6 @@ void JSArrayBufferView::set_byte_length(size_t value) {
WriteField<size_t>(kByteLengthOffset, value);
}
-ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
-
bool JSArrayBufferView::WasDetached() const {
return JSArrayBuffer::cast(buffer()).was_detached();
}
@@ -185,12 +195,15 @@ void JSTypedArray::set_length(size_t value) {
WriteField<size_t>(kLengthOffset, value);
}
-Address JSTypedArray::external_pointer() const {
- return ReadField<Address>(kExternalPointerOffset);
+DEF_GETTER(JSTypedArray, external_pointer, Address) {
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kExternalPointerOffset);
+ return DecodeExternalPointer(isolate, encoded_value);
}
-void JSTypedArray::set_external_pointer(Address value) {
- WriteField<Address>(kExternalPointerOffset, value);
+void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
+ ExternalPointer_t encoded_value = EncodeExternalPointer(isolate, value);
+ WriteField<ExternalPointer_t>(kExternalPointerOffset, encoded_value);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
@@ -202,14 +215,30 @@ Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
#endif
}
-void JSTypedArray::RemoveExternalPointerCompensationForSerialization() {
- DCHECK(is_on_heap());
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
- set_external_pointer(external_pointer() -
- ExternalPointerCompensationForOnHeapArray(isolate));
+uint32_t JSTypedArray::GetExternalBackingStoreRefForDeserialization() const {
+ DCHECK(!is_on_heap());
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kExternalPointerOffset);
+ return static_cast<uint32_t>(encoded_value);
+}
+
+void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
+ DCHECK(!is_on_heap());
+ ExternalPointer_t encoded_value = ref;
+ WriteField<ExternalPointer_t>(kExternalPointerOffset, encoded_value);
}
-ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
+void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
+ Isolate* isolate) {
+ DCHECK(is_on_heap());
+ // TODO(v8:10391): once we have an external table, avoid the need for
+ // compensation by replacing external_pointer and base_pointer fields
+ // with one data_pointer field which can point to either external data
+ // backing store or into on-heap backing store.
+ set_external_pointer(
+ isolate,
+ external_pointer() - ExternalPointerCompensationForOnHeapArray(isolate));
+}
void* JSTypedArray::DataPtr() {
// Zero-extend Tagged_t to Address according to current compression scheme
@@ -220,18 +249,19 @@ void* JSTypedArray::DataPtr() {
static_cast<Tagged_t>(base_pointer().ptr()));
}
-void JSTypedArray::SetOffHeapDataPtr(void* base, Address offset) {
+void JSTypedArray::SetOffHeapDataPtr(Isolate* isolate, void* base,
+ Address offset) {
set_base_pointer(Smi::zero(), SKIP_WRITE_BARRIER);
Address address = reinterpret_cast<Address>(base) + offset;
- set_external_pointer(address);
+ set_external_pointer(isolate, address);
DCHECK_EQ(address, reinterpret_cast<Address>(DataPtr()));
}
-void JSTypedArray::SetOnHeapDataPtr(HeapObject base, Address offset) {
+void JSTypedArray::SetOnHeapDataPtr(Isolate* isolate, HeapObject base,
+ Address offset) {
set_base_pointer(base);
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
- set_external_pointer(offset +
- ExternalPointerCompensationForOnHeapArray(isolate));
+ set_external_pointer(
+ isolate, offset + ExternalPointerCompensationForOnHeapArray(isolate));
DCHECK_EQ(base.ptr() + offset, reinterpret_cast<Address>(DataPtr()));
}
@@ -264,12 +294,16 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
-void* JSDataView::data_pointer() const {
- return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
+DEF_GETTER(JSDataView, data_pointer, void*) {
+ ExternalPointer_t encoded_value =
+ ReadField<ExternalPointer_t>(kDataPointerOffset);
+ return reinterpret_cast<void*>(DecodeExternalPointer(isolate, encoded_value));
}
-void JSDataView::set_data_pointer(void* value) {
- WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
+void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
+ WriteField<ExternalPointer_t>(
+ kDataPointerOffset,
+ EncodeExternalPointer(isolate, reinterpret_cast<Address>(value)));
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 0c2aca6d71..c67777acc0 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -45,7 +45,7 @@ void JSArrayBuffer::Setup(SharedFlag shared,
}
set_extension(nullptr);
if (!backing_store) {
- set_backing_store(nullptr);
+ set_backing_store(GetIsolate(), nullptr);
set_byte_length(0);
} else {
Attach(std::move(backing_store));
@@ -60,19 +60,20 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
DCHECK_NOT_NULL(backing_store);
DCHECK_EQ(is_shared(), backing_store->is_shared());
DCHECK(!was_detached());
- set_backing_store(backing_store->buffer_start());
+ Isolate* isolate = GetIsolate();
+ set_backing_store(isolate, backing_store->buffer_start());
set_byte_length(backing_store->byte_length());
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- Heap* heap = GetIsolate()->heap();
+ Heap* heap = isolate->heap();
ArrayBufferExtension* extension = EnsureExtension();
size_t bytes = backing_store->PerIsolateAccountingLength();
extension->set_accounting_length(bytes);
extension->set_backing_store(std::move(backing_store));
heap->AppendArrayBufferExtension(*this, extension);
} else {
- GetIsolate()->heap()->RegisterBackingStore(*this, std::move(backing_store));
+ isolate->heap()->RegisterBackingStore(*this, std::move(backing_store));
}
}
@@ -103,7 +104,7 @@ void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
DCHECK(!is_shared());
DCHECK(!is_asmjs_memory());
- set_backing_store(nullptr);
+ set_backing_store(isolate, nullptr);
set_byte_length(0);
set_was_detached(true);
}
@@ -193,7 +194,7 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
// Clear the elements of the typed array.
self->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
- self->SetOffHeapDataPtr(array_buffer->backing_store(), 0);
+ self->SetOffHeapDataPtr(isolate, array_buffer->backing_store(), 0);
DCHECK(!self->is_on_heap());
return array_buffer;
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 624b716713..3d6b293e2d 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -17,7 +17,8 @@ namespace internal {
class ArrayBufferExtension;
-class JSArrayBuffer : public JSObject {
+class JSArrayBuffer
+ : public TorqueGeneratedJSArrayBuffer<JSArrayBuffer, JSObject> {
public:
// The maximum length for JSArrayBuffer's supported by V8.
// On 32-bit architectures we limit this to 2GiB, so that
@@ -33,7 +34,8 @@ class JSArrayBuffer : public JSObject {
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
// [backing_store]: backing memory for this array
- DECL_PRIMITIVE_ACCESSORS(backing_store, void*)
+ DECL_GETTER(backing_store, void*)
+ inline void set_backing_store(Isolate* isolate, void* value);
// [extension]: extension object used for GC
DECL_PRIMITIVE_ACCESSORS(extension, ArrayBufferExtension*)
@@ -50,7 +52,7 @@ class JSArrayBuffer : public JSObject {
// is deterministic. Depending on the V8 build mode there could be no padding.
V8_INLINE void clear_padding();
-// Bit positions for [bit_field].
+ // Bit positions for [bit_field].
DEFINE_TORQUE_GENERATED_JS_ARRAY_BUFFER_FLAGS()
// [is_external]: true indicates that the embedder is in charge of freeing the
@@ -70,8 +72,6 @@ class JSArrayBuffer : public JSObject {
// [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer.
DECL_BOOLEAN_ACCESSORS(is_shared)
- DECL_CAST(JSArrayBuffer)
-
// Initializes the fields of the ArrayBuffer. The provided backing_store can
// be nullptr. If it is not nullptr, then the function registers it with
// src/heap/array-buffer-tracker.h.
@@ -110,13 +110,21 @@ class JSArrayBuffer : public JSObject {
void YoungMarkExtension();
void YoungMarkExtensionPromoted();
+ //
+ // Serializer/deserializer support.
+ //
+
+ // Backing stores are serialized/deserialized separately. During serialization
+ // the backing store reference is stored in the backing store field and upon
+ // deserialization it is converted back to actual external (off-heap) pointer
+ // value.
+ inline uint32_t GetBackingStoreRefForDeserialization() const;
+ inline void SetBackingStoreRefForSerialization(uint32_t ref);
+
// Dispatched behavior.
DECL_PRINTER(JSArrayBuffer)
DECL_VERIFIER(JSArrayBuffer)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_BUFFER_FIELDS)
static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
static const int kSizeWithEmbedderFields =
@@ -125,8 +133,6 @@ class JSArrayBuffer : public JSObject {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(JSArrayBuffer, JSObject);
-
private:
inline ArrayBufferExtension** extension_location() const;
@@ -136,6 +142,8 @@ class JSArrayBuffer : public JSObject {
inline uint32_t* extension_lo() const;
inline uint32_t* extension_hi() const;
#endif
+
+ TQ_OBJECT_CONSTRUCTORS(JSArrayBuffer)
};
// Each JSArrayBuffer (with a backing store) has a corresponding native-heap
@@ -209,44 +217,29 @@ class ArrayBufferExtension : public Malloced {
void set_next(ArrayBufferExtension* extension) { next_ = extension; }
};
-class JSArrayBufferView : public JSObject {
+class JSArrayBufferView
+ : public TorqueGeneratedJSArrayBufferView<JSArrayBufferView, JSObject> {
public:
- // [buffer]: ArrayBuffer that this typed array views.
- DECL_ACCESSORS(buffer, Object)
-
// [byte_offset]: offset of typed array in bytes.
DECL_PRIMITIVE_ACCESSORS(byte_offset, size_t)
// [byte_length]: length of typed array in bytes.
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
- DECL_CAST(JSArrayBufferView)
-
DECL_VERIFIER(JSArrayBufferView)
inline bool WasDetached() const;
-// Layout description.
-#define JS_ARRAY_BUFFER_VIEW_FIELDS(V) \
- V(kBufferOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
- /* Raw data fields. */ \
- V(kByteOffsetOffset, kUIntptrSize) \
- V(kByteLengthOffset, kUIntptrSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_ARRAY_BUFFER_VIEW_FIELDS)
-#undef JS_ARRAY_BUFFER_VIEW_FIELDS
+ static constexpr int kEndOfTaggedFieldsOffset = kByteOffsetOffset;
STATIC_ASSERT(IsAligned(kByteOffsetOffset, kUIntptrSize));
STATIC_ASSERT(IsAligned(kByteLengthOffset, kUIntptrSize));
- OBJECT_CONSTRUCTORS(JSArrayBufferView, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArrayBufferView)
};
-class JSTypedArray : public JSArrayBufferView {
+class JSTypedArray
+ : public TorqueGeneratedJSTypedArray<JSTypedArray, JSArrayBufferView> {
public:
// TODO(v8:4153): This should be equal to JSArrayBuffer::kMaxByteLength
// eventually.
@@ -260,8 +253,6 @@ class JSTypedArray : public JSArrayBufferView {
Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
- DECL_CAST(JSTypedArray)
-
ExternalArrayType type();
V8_EXPORT_PRIVATE size_t element_size();
@@ -270,8 +261,9 @@ class JSTypedArray : public JSArrayBufferView {
// Use with care: returns raw pointer into heap.
inline void* DataPtr();
- inline void SetOffHeapDataPtr(void* base, Address offset);
- inline void SetOnHeapDataPtr(HeapObject base, Address offset);
+ inline void SetOffHeapDataPtr(Isolate* isolate, void* base, Address offset);
+ inline void SetOnHeapDataPtr(Isolate* isolate, HeapObject base,
+ Address offset);
// Whether the buffer's backing store is on-heap or off-heap.
inline bool is_on_heap() const;
@@ -288,8 +280,21 @@ class JSTypedArray : public JSArrayBufferView {
static inline Address ExternalPointerCompensationForOnHeapArray(
const Isolate* isolate);
+ //
+ // Serializer/deserializer support.
+ //
+
+ // External backing stores are serialized/deserialized separately.
+ // During serialization the backing store reference is stored in the typed
+ // array object and upon deserialization it is converted back to actual
+ // external (off-heap) pointer value.
+ // The backing store reference is stored in the external_pointer field.
+ inline uint32_t GetExternalBackingStoreRefForDeserialization() const;
+ inline void SetExternalBackingStoreRefForSerialization(uint32_t ref);
+
// Subtracts external pointer compensation from the external pointer value.
- inline void RemoveExternalPointerCompensationForSerialization();
+ inline void RemoveExternalPointerCompensationForSerialization(
+ Isolate* isolate);
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -299,19 +304,6 @@ class JSTypedArray : public JSArrayBufferView {
DECL_PRINTER(JSTypedArray)
DECL_VERIFIER(JSTypedArray)
-// Layout description.
-#define JS_TYPED_ARRAY_FIELDS(V) \
- /* Raw data fields. */ \
- V(kLengthOffset, kUIntptrSize) \
- V(kExternalPointerOffset, kSystemPointerSize) \
- V(kBasePointerOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
- JS_TYPED_ARRAY_FIELDS)
-#undef JS_TYPED_ARRAY_FIELDS
-
STATIC_ASSERT(IsAligned(kLengthOffset, kUIntptrSize));
STATIC_ASSERT(IsAligned(kExternalPointerOffset, kSystemPointerSize));
@@ -330,37 +322,24 @@ class JSTypedArray : public JSArrayBufferView {
private:
friend class Deserializer;
- // [base_pointer]: TODO(v8:4153)
- DECL_ACCESSORS(base_pointer, Object)
-
// [external_pointer]: TODO(v8:4153)
- DECL_PRIMITIVE_ACCESSORS(external_pointer, Address)
+ DECL_GETTER(external_pointer, Address)
+ inline void set_external_pointer(Isolate* isolate, Address value);
- OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView);
+ TQ_OBJECT_CONSTRUCTORS(JSTypedArray)
};
-class JSDataView : public JSArrayBufferView {
+class JSDataView
+ : public TorqueGeneratedJSDataView<JSDataView, JSArrayBufferView> {
public:
// [data_pointer]: pointer to the actual data.
- DECL_PRIMITIVE_ACCESSORS(data_pointer, void*)
-
- DECL_CAST(JSDataView)
+ DECL_GETTER(data_pointer, void*)
+ inline void set_data_pointer(Isolate* isolate, void* value);
// Dispatched behavior.
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
- // Layout description.
-#define JS_DATA_VIEW_FIELDS(V) \
- /* Raw data fields. */ \
- V(kDataPointerOffset, kIntptrSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
- JS_DATA_VIEW_FIELDS)
-#undef JS_DATA_VIEW_FIELDS
-
STATIC_ASSERT(IsAligned(kDataPointerOffset, kUIntptrSize));
static const int kSizeWithEmbedderFields =
@@ -369,7 +348,7 @@ class JSDataView : public JSArrayBufferView {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(JSDataView, JSArrayBufferView);
+ TQ_OBJECT_CONSTRUCTORS(JSDataView)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index 9f4f1ba377..bd99ca7443 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -10,9 +10,10 @@ bitfield struct JSArrayBufferFlags extends uint32 {
is_shared: bool: 1 bit;
}
+@generateCppClass
extern class JSArrayBuffer extends JSObject {
byte_length: uintptr;
- backing_store: RawPtr;
+ backing_store: ExternalPointer;
@if(V8_ARRAY_BUFFER_EXTENSION_BOOL) extension: RawPtr;
@ifnot(V8_ARRAY_BUFFER_EXTENSION_BOOL) extension: void;
bit_field: JSArrayBufferFlags;
@@ -21,6 +22,9 @@ extern class JSArrayBuffer extends JSObject {
@ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
}
+extern operator '.backing_store_ptr' macro LoadJSArrayBufferBackingStorePtr(
+ JSArrayBuffer): RawPtr;
+
@export
macro IsDetachedBuffer(buffer: JSArrayBuffer): bool {
return buffer.bit_field.was_detached;
@@ -31,16 +35,22 @@ macro IsSharedArrayBuffer(buffer: JSArrayBuffer): bool {
}
@abstract
+@generateCppClass
extern class JSArrayBufferView extends JSObject {
buffer: JSArrayBuffer;
byte_offset: uintptr;
byte_length: uintptr;
}
+@generateCppClass
extern class JSTypedArray extends JSArrayBufferView {
length: uintptr;
- external_pointer: RawPtr;
+ external_pointer: ExternalPointer;
+ // [base_pointer]: TODO(v8:4153)
base_pointer: ByteArray|Smi;
}
-extern class JSDataView extends JSArrayBufferView { data_pointer: RawPtr; }
+@generateCppClass
+extern class JSDataView extends JSArrayBufferView {
+ data_pointer: ExternalPointer;
+}
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index b14e794d4d..0cba7203a5 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -176,8 +176,7 @@ struct FastJSArrayWitness {
macro LoadElementOrUndefined(implicit context: Context)(k: Smi): JSAny {
try {
return this.LoadElementNoHole(k) otherwise FoundHole;
- }
- label FoundHole {
+ } label FoundHole {
return Undefined;
}
}
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 0dd23edaa5..729aff90af 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -18,20 +18,12 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator)
-ACCESSORS(JSV8BreakIterator, locale, String, kLocaleOffset)
ACCESSORS(JSV8BreakIterator, break_iterator, Managed<icu::BreakIterator>,
kBreakIteratorOffset)
ACCESSORS(JSV8BreakIterator, unicode_string, Managed<icu::UnicodeString>,
kUnicodeStringOffset)
-ACCESSORS(JSV8BreakIterator, bound_adopt_text, Object, kBoundAdoptTextOffset)
-ACCESSORS(JSV8BreakIterator, bound_first, Object, kBoundFirstOffset)
-ACCESSORS(JSV8BreakIterator, bound_next, Object, kBoundNextOffset)
-ACCESSORS(JSV8BreakIterator, bound_current, Object, kBoundCurrentOffset)
-ACCESSORS(JSV8BreakIterator, bound_break_type, Object, kBoundBreakTypeOffset)
-
-CAST_ACCESSOR(JSV8BreakIterator)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 28db1699f4..483cc1f8c4 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -239,9 +239,7 @@ String JSV8BreakIterator::BreakType(Isolate* isolate,
}
const std::set<std::string>& JSV8BreakIterator::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::BreakIterator>>::type
- available_locales = LAZY_INSTANCE_INITIALIZER;
- return available_locales.Pointer()->Get();
+ return Intl::GetAvailableLocales();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index e21fa9f0eb..20c177a5f6 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -27,7 +27,8 @@ class BreakIterator;
namespace v8 {
namespace internal {
-class JSV8BreakIterator : public JSObject {
+class JSV8BreakIterator
+ : public TorqueGeneratedJSV8BreakIterator<JSV8BreakIterator, JSObject> {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> New(
Isolate* isolate, Handle<Map> map, Handle<Object> input_locales,
@@ -51,25 +52,12 @@ class JSV8BreakIterator : public JSObject {
static String BreakType(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator);
- DECL_CAST(JSV8BreakIterator)
DECL_PRINTER(JSV8BreakIterator)
- DECL_VERIFIER(JSV8BreakIterator)
- DECL_ACCESSORS(locale, String)
DECL_ACCESSORS(break_iterator, Managed<icu::BreakIterator>)
DECL_ACCESSORS(unicode_string, Managed<icu::UnicodeString>)
- DECL_ACCESSORS(bound_adopt_text, Object)
- DECL_ACCESSORS(bound_first, Object)
- DECL_ACCESSORS(bound_next, Object)
- DECL_ACCESSORS(bound_current, Object)
- DECL_ACCESSORS(bound_break_type, Object)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_V8_BREAK_ITERATOR_FIELDS)
-
- private:
- OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject);
+
+ TQ_OBJECT_CONSTRUCTORS(JSV8BreakIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index a8d3893316..30660f2e14 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -18,12 +18,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSCollator, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollator)
ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kIcuCollatorOffset)
-ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset)
-
-CAST_ACCESSOR(JSCollator)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index 046fa25e3d..ea9120cbed 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -493,18 +493,33 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
namespace {
-struct CheckColl {
- static const char* key() { return nullptr; }
+class CollatorAvailableLocales {
+ public:
+ CollatorAvailableLocales() {
+ int32_t num_locales = 0;
+ const icu::Locale* icu_available_locales =
+ icu::Collator::getAvailableLocales(num_locales);
+ std::vector<std::string> locales;
+ for (int32_t i = 0; i < num_locales; ++i) {
+ locales.push_back(
+ Intl::ToLanguageTag(icu_available_locales[i]).FromJust());
+ }
#define U_ICUDATA_COLL U_ICUDATA_NAME U_TREE_SEPARATOR_STRING "coll"
- static const char* path() { return U_ICUDATA_COLL; }
+ set_ = Intl::BuildLocaleSet(locales, U_ICUDATA_COLL, nullptr);
#undef U_ICUDATA_COLL
+ }
+ virtual ~CollatorAvailableLocales() {}
+ const std::set<std::string>& Get() const { return set_; }
+
+ private:
+ std::set<std::string> set_;
};
} // namespace
const std::set<std::string>& JSCollator::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::Collator, CheckColl>>::
- type available_locales = LAZY_INSTANCE_INITIALIZER;
+ static base::LazyInstance<CollatorAvailableLocales>::type available_locales =
+ LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index 0147b80ebb..bad0f93c67 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -29,7 +29,7 @@ class Collator;
namespace v8 {
namespace internal {
-class JSCollator : public JSObject {
+class JSCollator : public TorqueGeneratedJSCollator<JSCollator, JSObject> {
public:
// ecma402/#sec-initializecollator
V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> New(
@@ -42,18 +42,11 @@ class JSCollator : public JSObject {
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
- DECL_CAST(JSCollator)
DECL_PRINTER(JSCollator)
- DECL_VERIFIER(JSCollator)
-
-// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_COLLATOR_FIELDS)
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
- DECL_ACCESSORS(bound_compare, Object)
- OBJECT_CONSTRUCTORS(JSCollator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSCollator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index a0350726c0..17f9c3e198 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -30,7 +30,6 @@ class JSSet : public TorqueGeneratedJSSet<JSSet, JSCollection> {
public:
static void Initialize(Handle<JSSet> set, Isolate* isolate);
static void Clear(Isolate* isolate, Handle<JSSet> set);
- void Rehash(Isolate* isolate);
// Dispatched behavior.
DECL_PRINTER(JSSet)
@@ -57,7 +56,6 @@ class JSMap : public TorqueGeneratedJSMap<JSMap, JSCollection> {
public:
static void Initialize(Handle<JSMap> map, Isolate* isolate);
static void Clear(Isolate* isolate, Handle<JSMap> map);
- void Rehash(Isolate* isolate);
// Dispatched behavior.
DECL_PRINTER(JSMap)
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 4ab9adb844..56d44cacf9 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -18,16 +18,13 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat)
-ACCESSORS(JSDateTimeFormat, locale, String, kLocaleOffset)
ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
Managed<icu::SimpleDateFormat>, kIcuSimpleDateFormatOffset)
ACCESSORS(JSDateTimeFormat, icu_date_interval_format,
Managed<icu::DateIntervalFormat>, kIcuDateIntervalFormatOffset)
-ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset)
-SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset)
inline void JSDateTimeFormat::set_hour_cycle(HourCycle hour_cycle) {
int hints = flags();
@@ -61,8 +58,6 @@ inline JSDateTimeFormat::DateTimeStyle JSDateTimeFormat::time_style() const {
return TimeStyleBits::decode(flags());
}
-CAST_ACCESSOR(JSDateTimeFormat)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 5643ee57d5..669dfd88ab 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -34,6 +34,23 @@ namespace internal {
namespace {
+std::string ToHourCycleString(JSDateTimeFormat::HourCycle hc) {
+ switch (hc) {
+ case JSDateTimeFormat::HourCycle::kH11:
+ return "h11";
+ case JSDateTimeFormat::HourCycle::kH12:
+ return "h12";
+ case JSDateTimeFormat::HourCycle::kH23:
+ return "h23";
+ case JSDateTimeFormat::HourCycle::kH24:
+ return "h24";
+ case JSDateTimeFormat::HourCycle::kUndefined:
+ return "";
+ default:
+ UNREACHABLE();
+ }
+}
+
JSDateTimeFormat::HourCycle ToHourCycle(const std::string& hc) {
if (hc == "h11") return JSDateTimeFormat::HourCycle::kH11;
if (hc == "h12") return JSDateTimeFormat::HourCycle::kH12;
@@ -261,7 +278,7 @@ const std::vector<PatternData>& GetPatternData(
}
}
-std::string GetGMTTzID(Isolate* isolate, const std::string& input) {
+std::string GetGMTTzID(const std::string& input) {
std::string ret = "Etc/GMT";
switch (input.length()) {
case 8:
@@ -304,10 +321,8 @@ char LocaleIndependentAsciiToLower(char ch) {
// or ho_cHi_minH -> Ho_Chi_Minh. It is locale-agnostic and only
// deals with ASCII only characters.
// 'of', 'au' and 'es' are special-cased and lowercased.
-// Also "Antarctica/DumontDUrville" is special case.
// ICU's timezone parsing is case sensitive, but ECMAScript is case insensitive
-std::string ToTitleCaseTimezoneLocation(Isolate* isolate,
- const std::string& input) {
+std::string ToTitleCaseTimezoneLocation(const std::string& input) {
std::string title_cased;
int word_length = 0;
for (char ch : input) {
@@ -332,34 +347,102 @@ std::string ToTitleCaseTimezoneLocation(Isolate* isolate,
return std::string();
}
}
- // Special case
- if (title_cased == "Antarctica/Dumontdurville") {
- return "Antarctica/DumontDUrville";
- }
+
return title_cased;
}
+class SpecialTimeZoneMap {
+ public:
+ SpecialTimeZoneMap() {
+ Add("America/Argentina/ComodRivadavia");
+ Add("America/Knox_IN");
+ Add("Antarctica/McMurdo");
+ Add("Australia/ACT");
+ Add("Australia/LHI");
+ Add("Australia/NSW");
+ Add("Antarctica/DumontDUrville");
+ Add("Brazil/DeNoronha");
+ Add("CET");
+ Add("CST6CDT");
+ Add("Chile/EasterIsland");
+ Add("EET");
+ Add("EST");
+ Add("EST5EDT");
+ Add("GB");
+ Add("GB-Eire");
+ Add("HST");
+ Add("MET");
+ Add("MST");
+ Add("MST7MDT");
+ Add("Mexico/BajaNorte");
+ Add("Mexico/BajaSur");
+ Add("NZ");
+ Add("NZ-CHAT");
+ Add("PRC");
+ Add("PST8PDT");
+ Add("ROC");
+ Add("ROK");
+ Add("UCT");
+ Add("W-SU");
+ Add("WET");
+ }
+
+ std::string Find(const std::string& id) {
+ auto it = map_.find(id);
+ if (it != map_.end()) {
+ return it->second;
+ }
+ return "";
+ }
+
+ private:
+ void Add(const char* id) {
+ std::string upper(id);
+ transform(upper.begin(), upper.end(), upper.begin(),
+ LocaleIndependentAsciiToUpper);
+ map_.insert({upper, id});
+ }
+ std::map<std::string, std::string> map_;
+};
+
// Return the time zone id which match ICU's expectation of title casing
// return empty string when error.
-std::string CanonicalizeTimeZoneID(Isolate* isolate, const std::string& input) {
+std::string CanonicalizeTimeZoneID(const std::string& input) {
std::string upper = input;
transform(upper.begin(), upper.end(), upper.begin(),
LocaleIndependentAsciiToUpper);
- if (upper == "UTC" || upper == "GMT" || upper == "ETC/UTC" ||
- upper == "ETC/GMT") {
- return "UTC";
+ if (upper.length() >= 3) {
+ if (memcmp(upper.c_str(), "ETC", 3) == 0) {
+ if (upper == "ETC/UTC" || upper == "ETC/GMT" || upper == "ETC/UCT") {
+ return "UTC";
+ }
+ if (strncmp(upper.c_str(), "ETC/GMT", 7) == 0) {
+ return GetGMTTzID(input);
+ }
+ } else if (memcmp(upper.c_str(), "GMT", 3) == 0) {
+ if (upper == "GMT" || upper == "GMT0" || upper == "GMT+0" ||
+ upper == "GMT-0") {
+ return "UTC";
+ }
+ } else if (memcmp(upper.c_str(), "US/", 3) == 0) {
+ std::string title = ToTitleCaseTimezoneLocation(input);
+ // Change "Us/" to "US/"
+ title[1] = 'S';
+ return title;
+ } else if (upper == "UTC") {
+ return "UTC";
+ }
}
// We expect only _, '-' and / beside ASCII letters.
- // All inputs should conform to Area/Location(/Location)*, or Etc/GMT* .
- // TODO(jshin): 1. Support 'GB-Eire", 'EST5EDT", "ROK', 'US/*', 'NZ' and many
- // other aliases/linked names when moving timezone validation code to C++.
- // See crbug.com/364374 and crbug.com/v8/8007 .
- // 2. Resolve the difference betwee CLDR/ICU and IANA time zone db.
- // See http://unicode.org/cldr/trac/ticket/9892 and crbug.com/645807 .
- if (strncmp(upper.c_str(), "ETC/GMT", 7) == 0) {
- return GetGMTTzID(isolate, input);
- }
- return ToTitleCaseTimezoneLocation(isolate, input);
+
+ static base::LazyInstance<SpecialTimeZoneMap>::type special_time_zone_map =
+ LAZY_INSTANCE_INITIALIZER;
+
+ std::string special_case = special_time_zone_map.Pointer()->Find(upper);
+ if (!special_case.empty()) {
+ return special_case;
+ }
+ return ToTitleCaseTimezoneLocation(input);
}
Handle<String> DateTimeStyleAsString(Isolate* isolate,
@@ -479,6 +562,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
// [[Minute]] "minute"
// [[Second]] "second"
// [[TimeZoneName]] "timeZoneName"
+ // [[FractionalSecondDigits]] "fractionalSecondDigits"
CHECK(JSReceiver::CreateDataProperty(isolate, options,
factory->locale_string(), locale,
Just(kDontThrow))
@@ -549,6 +633,13 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
}
}
}
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ int fsd = FractionalSecondDigitsFromPattern(pattern);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->fractionalSecondDigits_string(),
+ factory->NewNumberFromInt(fsd), Just(kDontThrow))
+ .FromJust());
+ }
}
// dateStyle
@@ -568,14 +659,6 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
- if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
- int fsd = FractionalSecondDigitsFromPattern(pattern);
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->fractionalSecondDigits_string(),
- factory->NewNumberFromInt(fsd), Just(kDontThrow))
- .FromJust());
- }
-
return options;
}
@@ -868,15 +951,14 @@ bool IsValidTimeZoneName(const icu::TimeZone& tz) {
canonical != icu::UnicodeString("Etc/Unknown", -1, US_INV);
}
-std::unique_ptr<icu::TimeZone> CreateTimeZone(Isolate* isolate,
- const char* timezone) {
+std::unique_ptr<icu::TimeZone> CreateTimeZone(const char* timezone) {
// Create time zone as specified by the user. We have to re-create time zone
// since calendar takes ownership.
if (timezone == nullptr) {
// 19.a. Else / Let timeZone be DefaultTimeZone().
return std::unique_ptr<icu::TimeZone>(icu::TimeZone::createDefault());
}
- std::string canonicalized = CanonicalizeTimeZoneID(isolate, timezone);
+ std::string canonicalized = CanonicalizeTimeZoneID(timezone);
if (canonicalized.empty()) return std::unique_ptr<icu::TimeZone>();
std::unique_ptr<icu::TimeZone> tz(
icu::TimeZone::createTimeZone(canonicalized.c_str()));
@@ -1078,10 +1160,18 @@ icu::DateIntervalFormat* LazyCreateDateIntervalFormat(
icu::SimpleDateFormat* icu_simple_date_format =
date_time_format->icu_simple_date_format().raw();
UErrorCode status = U_ZERO_ERROR;
+
+ icu::Locale loc = *(date_time_format->icu_locale().raw());
+ // We need to pass in the hc to DateIntervalFormat by using Unicode 'hc'
+ // extension.
+ std::string hcString = ToHourCycleString(date_time_format->hour_cycle());
+ if (!hcString.empty()) {
+ loc.setUnicodeKeywordValue("hc", hcString, status);
+ }
+
std::unique_ptr<icu::DateIntervalFormat> date_interval_format(
icu::DateIntervalFormat::createInstance(
- SkeletonFromDateFormat(*icu_simple_date_format),
- *(date_time_format->icu_locale().raw()), status));
+ SkeletonFromDateFormat(*icu_simple_date_format), loc, status));
if (U_FAILURE(status)) {
return nullptr;
}
@@ -1262,17 +1352,15 @@ class DateTimePatternGeneratorCache {
public:
// Return a clone copy that the caller have to free.
icu::DateTimePatternGenerator* CreateGenerator(const icu::Locale& locale) {
- std::string key(FLAG_harmony_intl_other_calendars ? locale.getName()
- : locale.getBaseName());
+ std::string key(locale.getName());
base::MutexGuard guard(&mutex_);
auto it = map_.find(key);
if (it != map_.end()) {
return it->second->clone();
}
UErrorCode status = U_ZERO_ERROR;
- map_[key].reset(icu::DateTimePatternGenerator::createInstance(
- FLAG_harmony_intl_other_calendars ? locale : icu::Locale(key.c_str()),
- status));
+ map_[key].reset(
+ icu::DateTimePatternGenerator::createInstance(locale, status));
// Fallback to use "root".
if (U_FAILURE(status)) {
status = U_ZERO_ERROR;
@@ -1321,32 +1409,29 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
std::unique_ptr<char[]> calendar_str = nullptr;
std::unique_ptr<char[]> numbering_system_str = nullptr;
- if (FLAG_harmony_intl_add_calendar_numbering_system) {
- const std::vector<const char*> empty_values = {};
- // 6. Let calendar be ? GetOption(options, "calendar",
- // "string", undefined, undefined).
- Maybe<bool> maybe_calendar = Intl::GetStringOption(
- isolate, options, "calendar", empty_values, service, &calendar_str);
- MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
- if (maybe_calendar.FromJust() && calendar_str != nullptr) {
- icu::Locale default_locale;
- if (!Intl::IsWellFormedCalendar(calendar_str.get())) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(
- MessageTemplate::kInvalid, factory->calendar_string(),
- factory->NewStringFromAsciiChecked(calendar_str.get())),
- JSDateTimeFormat);
- }
+ const std::vector<const char*> empty_values = {};
+ // 6. Let calendar be ? GetOption(options, "calendar",
+ // "string", undefined, undefined).
+ Maybe<bool> maybe_calendar = Intl::GetStringOption(
+ isolate, options, "calendar", empty_values, service, &calendar_str);
+ MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
+ if (maybe_calendar.FromJust() && calendar_str != nullptr) {
+ icu::Locale default_locale;
+ if (!Intl::IsWellFormedCalendar(calendar_str.get())) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid, factory->calendar_string(),
+ factory->NewStringFromAsciiChecked(calendar_str.get())),
+ JSDateTimeFormat);
}
-
- // 8. Let numberingSystem be ? GetOption(options, "numberingSystem",
- // "string", undefined, undefined).
- Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
- isolate, options, service, &numbering_system_str);
- MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSDateTimeFormat>());
}
+ // 8. Let numberingSystem be ? GetOption(options, "numberingSystem",
+ // "string", undefined, undefined).
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, service, &numbering_system_str);
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSDateTimeFormat>());
+
// 6. Let hour12 be ? GetOption(options, "hour12", "boolean", undefined,
// undefined).
bool hour12;
@@ -1424,13 +1509,12 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
}
// 17. Let timeZone be ? Get(options, "timeZone").
- const std::vector<const char*> empty_values;
std::unique_ptr<char[]> timezone = nullptr;
Maybe<bool> maybe_timezone = Intl::GetStringOption(
isolate, options, "timeZone", empty_values, service, &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
- std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone.get());
+ std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(timezone.get());
if (tz.get() == nullptr) {
THROW_NEW_ERROR(
isolate,
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index ef50b71dc9..1e5720760e 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -31,7 +31,8 @@ class SimpleDateFormat;
namespace v8 {
namespace internal {
-class JSDateTimeFormat : public JSObject {
+class JSDateTimeFormat
+ : public TorqueGeneratedJSDateTimeFormat<JSDateTimeFormat, JSObject> {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
@@ -84,15 +85,10 @@ class JSDateTimeFormat : public JSObject {
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
Handle<String> HourCycleAsString() const;
- DECL_CAST(JSDateTimeFormat)
// ecma-402/#sec-properties-of-intl-datetimeformat-instances
enum class DateTimeStyle { kUndefined, kFull, kLong, kMedium, kShort };
-// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_DATE_TIME_FORMAT_FIELDS)
-
// enum for "hourCycle" option.
enum class HourCycle { kUndefined, kH11, kH12, kH23, kH24 };
@@ -126,17 +122,13 @@ class JSDateTimeFormat : public JSObject {
STATIC_ASSERT(DateTimeStyle::kMedium <= TimeStyleBits::kMax);
STATIC_ASSERT(DateTimeStyle::kShort <= TimeStyleBits::kMax);
- DECL_ACCESSORS(locale, String)
DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
DECL_ACCESSORS(icu_simple_date_format, Managed<icu::SimpleDateFormat>)
DECL_ACCESSORS(icu_date_interval_format, Managed<icu::DateIntervalFormat>)
- DECL_ACCESSORS(bound_format, Object)
- DECL_INT_ACCESSORS(flags)
DECL_PRINTER(JSDateTimeFormat)
- DECL_VERIFIER(JSDateTimeFormat)
- OBJECT_CONSTRUCTORS(JSDateTimeFormat, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSDateTimeFormat)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-display-names-inl.h b/deps/v8/src/objects/js-display-names-inl.h
index 103f3b388c..40bea22c97 100644
--- a/deps/v8/src/objects/js-display-names-inl.h
+++ b/deps/v8/src/objects/js-display-names-inl.h
@@ -20,12 +20,7 @@ namespace internal {
ACCESSORS(JSDisplayNames, internal, Managed<DisplayNamesInternal>,
kInternalOffset)
-OBJECT_CONSTRUCTORS_IMPL(JSDisplayNames, JSObject)
-
-// Base display names accessors.
-SMI_ACCESSORS(JSDisplayNames, flags, kFlagsOffset)
-
-CAST_ACCESSOR(JSDisplayNames)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSDisplayNames)
inline void JSDisplayNames::set_style(Style style) {
DCHECK_GE(StyleBits::kMax, style);
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index 95f4641173..7de863b19b 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -76,8 +76,8 @@ UDisplayContext ToUDisplayContext(JSDisplayNames::Style style) {
// Abstract class for all different types.
class DisplayNamesInternal {
public:
- DisplayNamesInternal() {}
- virtual ~DisplayNamesInternal() {}
+ DisplayNamesInternal() = default;
+ virtual ~DisplayNamesInternal() = default;
virtual const char* type() const = 0;
virtual icu::Locale locale() const = 0;
virtual Maybe<icu::UnicodeString> of(Isolate* isolate,
@@ -101,7 +101,7 @@ class LocaleDisplayNamesCommon : public DisplayNamesInternal {
icu::LocaleDisplayNames::createInstance(locale, display_context, 4));
}
- virtual ~LocaleDisplayNamesCommon() {}
+ ~LocaleDisplayNamesCommon() override = default;
icu::Locale locale() const override { return ldn_->getLocale(); }
@@ -118,7 +118,7 @@ class LanguageNames : public LocaleDisplayNamesCommon {
LanguageNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback)
: LocaleDisplayNamesCommon(locale, style, fallback) {}
- virtual ~LanguageNames() {}
+ ~LanguageNames() override = default;
const char* type() const override { return "language"; }
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
@@ -145,7 +145,7 @@ class RegionNames : public LocaleDisplayNamesCommon {
RegionNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback)
: LocaleDisplayNamesCommon(locale, style, fallback) {}
- virtual ~RegionNames() {}
+ ~RegionNames() override = default;
const char* type() const override { return "region"; }
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
@@ -167,7 +167,7 @@ class ScriptNames : public LocaleDisplayNamesCommon {
ScriptNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback)
: LocaleDisplayNamesCommon(locale, style, fallback) {}
- virtual ~ScriptNames() {}
+ ~ScriptNames() override = default;
const char* type() const override { return "script"; }
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
@@ -189,7 +189,7 @@ class CurrencyNames : public LocaleDisplayNamesCommon {
CurrencyNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback)
: LocaleDisplayNamesCommon(locale, style, fallback) {}
- virtual ~CurrencyNames() {}
+ ~CurrencyNames() override = default;
const char* type() const override { return "currency"; }
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
@@ -267,7 +267,7 @@ class DateTimeFieldNames : public DisplayNamesInternal {
icu::DateTimePatternGenerator::createInstance(locale_, status));
CHECK(U_SUCCESS(status));
}
- virtual ~DateTimeFieldNames() {}
+ ~DateTimeFieldNames() override = default;
const char* type() const override { return "dateTimeField"; }
icu::Locale locale() const override { return locale_; }
Maybe<icu::UnicodeString> of(Isolate* isolate,
@@ -314,7 +314,7 @@ class DateFormatSymbolsNames : public DisplayNamesInternal {
length_(length),
calendar_(calendar) {}
- virtual ~DateFormatSymbolsNames() {}
+ ~DateFormatSymbolsNames() override = default;
const char* type() const override { return type_; }
@@ -354,7 +354,7 @@ class WeekdayNames : public DateFormatSymbolsNames {
const icu::UnicodeString* array, int32_t length,
const char* calendar)
: DateFormatSymbolsNames(type, locale, array, length, calendar) {}
- virtual ~WeekdayNames() {}
+ ~WeekdayNames() override = default;
int32_t ComputeIndex(const char* code) const override {
int32_t i = atoi(code);
@@ -370,7 +370,7 @@ class MonthNames : public DateFormatSymbolsNames {
const icu::UnicodeString* array, int32_t length,
const char* calendar)
: DateFormatSymbolsNames(type, locale, array, length, calendar) {}
- virtual ~MonthNames() {}
+ ~MonthNames() override = default;
int32_t ComputeIndex(const char* code) const override {
return atoi(code) - 1;
@@ -383,7 +383,7 @@ class QuarterNames : public DateFormatSymbolsNames {
const icu::UnicodeString* array, int32_t length,
const char* calendar)
: DateFormatSymbolsNames(type, locale, array, length, calendar) {}
- virtual ~QuarterNames() {}
+ ~QuarterNames() override = default;
int32_t ComputeIndex(const char* code) const override {
return atoi(code) - 1;
@@ -396,7 +396,7 @@ class DayPeriodNames : public DateFormatSymbolsNames {
const icu::UnicodeString* array, int32_t length,
const char* calendar)
: DateFormatSymbolsNames(type, locale, array, length, calendar) {}
- virtual ~DayPeriodNames() {}
+ ~DayPeriodNames() override = default;
int32_t ComputeIndex(const char* code) const override {
if (strcmp("am", code) == 0) {
@@ -750,8 +750,7 @@ struct CheckCalendar {
} // namespace
const std::set<std::string>& JSDisplayNames::GetAvailableLocales() {
- static base::LazyInstance<
- Intl::AvailableLocales<icu::Locale, CheckCalendar>>::type
+ static base::LazyInstance<Intl::AvailableLocales<CheckCalendar>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-display-names.h b/deps/v8/src/objects/js-display-names.h
index c656a25779..cd3ca1ea47 100644
--- a/deps/v8/src/objects/js-display-names.h
+++ b/deps/v8/src/objects/js-display-names.h
@@ -25,7 +25,8 @@ namespace internal {
class DisplayNamesInternal;
-class JSDisplayNames : public JSObject {
+class JSDisplayNames
+ : public TorqueGeneratedJSDisplayNames<JSDisplayNames, JSObject> {
public:
// Creates display names object with properties derived from input
// locales and options.
@@ -65,8 +66,6 @@ class JSDisplayNames : public JSObject {
inline void set_fallback(Fallback fallback);
inline Fallback fallback() const;
- DECL_CAST(JSDisplayNames)
-
// Bit positions in |flags|.
DEFINE_TORQUE_GENERATED_JS_DISPLAY_NAMES_FLAGS()
@@ -76,19 +75,11 @@ class JSDisplayNames : public JSObject {
STATIC_ASSERT(Fallback::kCode <= FallbackBit::kMax);
STATIC_ASSERT(Fallback::kNone <= FallbackBit::kMax);
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
-
DECL_ACCESSORS(internal, Managed<DisplayNamesInternal>)
DECL_PRINTER(JSDisplayNames)
- DECL_VERIFIER(JSDisplayNames)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_DISPLAY_NAMES_FIELDS)
- OBJECT_CONSTRUCTORS(JSDisplayNames, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSDisplayNames)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 6a1529ad33..5cf95db4d5 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -18,13 +18,11 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSListFormat, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSListFormat)
// Base list format accessors.
-ACCESSORS(JSListFormat, locale, String, kLocaleOffset)
ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
kIcuFormatterOffset)
-SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
inline void JSListFormat::set_style(Style style) {
DCHECK_GE(StyleBits::kMax, style);
@@ -48,8 +46,6 @@ inline JSListFormat::Type JSListFormat::type() const {
return TypeBits::decode(flags());
}
-CAST_ACCESSOR(JSListFormat)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 978f0ea38f..047c03b842 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -331,8 +331,7 @@ struct CheckListPattern {
} // namespace
const std::set<std::string>& JSListFormat::GetAvailableLocales() {
- static base::LazyInstance<
- Intl::AvailableLocales<icu::Locale, CheckListPattern>>::type
+ static base::LazyInstance<Intl::AvailableLocales<CheckListPattern>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 0040bccb97..1f94c957f8 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -29,7 +29,8 @@ class ListFormatter;
namespace v8 {
namespace internal {
-class JSListFormat : public JSObject {
+class JSListFormat
+ : public TorqueGeneratedJSListFormat<JSListFormat, JSObject> {
public:
// Creates relative time format object with properties derived from input
// locales and options.
@@ -55,10 +56,7 @@ class JSListFormat : public JSObject {
Handle<String> StyleAsString() const;
Handle<String> TypeAsString() const;
- DECL_CAST(JSListFormat)
-
// ListFormat accessors.
- DECL_ACCESSORS(locale, String)
DECL_ACCESSORS(icu_formatter, Managed<icu::ListFormatter>)
// Style: identifying the relative time format style used.
@@ -93,17 +91,9 @@ class JSListFormat : public JSObject {
STATIC_ASSERT(Type::DISJUNCTION <= TypeBits::kMax);
STATIC_ASSERT(Type::UNIT <= TypeBits::kMax);
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
-
DECL_PRINTER(JSListFormat)
- DECL_VERIFIER(JSListFormat)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_LIST_FORMAT_FIELDS)
- OBJECT_CONSTRUCTORS(JSListFormat, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSListFormat)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index 17859ea6ab..cbd62b9a93 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -19,12 +19,10 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSLocale, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSLocale)
ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
-CAST_ACCESSOR(JSLocale)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 6bbb86a0b1..53738049d9 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -365,38 +365,47 @@ MaybeHandle<JSLocale> JSLocale::New(Isolate* isolate, Handle<Map> map,
}
namespace {
-Handle<String> MorphLocale(Isolate* isolate, String locale,
- void (*morph_func)(icu::Locale*, UErrorCode*)) {
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale =
- icu::Locale::forLanguageTag(locale.ToCString().get(), status);
- // TODO(ftang): Remove the following lines after ICU-8420 fixed.
- // Due to ICU-8420 "und" is turn into "" by forLanguageTag,
- // we have to work around to use icu::Locale("und") directly
- if (icu_locale.getName()[0] == '\0') icu_locale = icu::Locale("und");
- CHECK(U_SUCCESS(status));
- CHECK(!icu_locale.isBogus());
- (*morph_func)(&icu_locale, &status);
- CHECK(U_SUCCESS(status));
- CHECK(!icu_locale.isBogus());
- std::string locale_str = Intl::ToLanguageTag(icu_locale).FromJust();
- return isolate->factory()->NewStringFromAsciiChecked(locale_str.c_str());
+
+MaybeHandle<JSLocale> Construct(Isolate* isolate,
+ const icu::Locale& icu_locale) {
+ Handle<Managed<icu::Locale>> managed_locale =
+ Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
+
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_locale_function(), isolate);
+
+ Handle<Map> map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, map,
+ JSFunction::GetDerivedMap(isolate, constructor, constructor), JSLocale);
+
+ Handle<JSLocale> locale = Handle<JSLocale>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ locale->set_icu_locale(*managed_locale);
+ return locale;
}
} // namespace
-Handle<String> JSLocale::Maximize(Isolate* isolate, String locale) {
- return MorphLocale(isolate, locale,
- [](icu::Locale* icu_locale, UErrorCode* status) {
- icu_locale->addLikelySubtags(*status);
- });
+MaybeHandle<JSLocale> JSLocale::Maximize(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ UErrorCode status = U_ZERO_ERROR;
+ icu_locale.addLikelySubtags(status);
+ DCHECK(U_SUCCESS(status));
+ DCHECK(!icu_locale.isBogus());
+ return Construct(isolate, icu_locale);
}
-Handle<String> JSLocale::Minimize(Isolate* isolate, String locale) {
- return MorphLocale(isolate, locale,
- [](icu::Locale* icu_locale, UErrorCode* status) {
- icu_locale->minimizeSubtags(*status);
- });
+MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ UErrorCode status = U_ZERO_ERROR;
+ icu_locale.minimizeSubtags(status);
+ DCHECK(U_SUCCESS(status));
+ DCHECK(!icu_locale.isBogus());
+ return Construct(isolate, icu_locale);
}
Handle<Object> JSLocale::Language(Isolate* isolate, Handle<JSLocale> locale) {
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 4e73b0499f..7904505d29 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -25,15 +25,18 @@ class Locale;
namespace v8 {
namespace internal {
-class JSLocale : public JSObject {
+class JSLocale : public TorqueGeneratedJSLocale<JSLocale, JSObject> {
public:
// Creates locale object with properties derived from input locale string
// and options.
static MaybeHandle<JSLocale> New(Isolate* isolate, Handle<Map> map,
Handle<String> locale,
Handle<JSReceiver> options);
- static Handle<String> Maximize(Isolate* isolate, String locale);
- static Handle<String> Minimize(Isolate* isolate, String locale);
+
+ static MaybeHandle<JSLocale> Maximize(Isolate* isolate,
+ Handle<JSLocale> locale);
+ static MaybeHandle<JSLocale> Minimize(Isolate* isolate,
+ Handle<JSLocale> locale);
static Handle<Object> Language(Isolate* isolate, Handle<JSLocale> locale);
static Handle<Object> Script(Isolate* isolate, Handle<JSLocale> locale);
@@ -59,18 +62,11 @@ class JSLocale : public JSObject {
// Help function to check well-formed "3alpha"
static bool Is3Alpha(const std::string& value);
- DECL_CAST(JSLocale)
-
DECL_ACCESSORS(icu_locale, Managed<icu::Locale>)
DECL_PRINTER(JSLocale)
- DECL_VERIFIER(JSLocale)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_LOCALE_FIELDS)
- OBJECT_CONSTRUCTORS(JSLocale, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSLocale)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 9cd16c35ed..035eaf57a3 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -18,15 +18,11 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat)
-ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset)
ACCESSORS(JSNumberFormat, icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>,
kIcuNumberFormatterOffset)
-ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset)
-
-CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 2d7fd77113..c5b3d06fca 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -18,7 +18,6 @@
#include "unicode/currunit.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
-#include "unicode/nounit.h"
#include "unicode/numberformatter.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
@@ -218,7 +217,7 @@ class UnitFactory {
return found->second;
}
// 2. Return false.
- return icu::NoUnit::base();
+ return icu::MeasureUnit();
}
private:
@@ -236,7 +235,7 @@ icu::MeasureUnit IsSanctionedUnitIdentifier(const std::string& unit) {
Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> IsWellFormedUnitIdentifier(
Isolate* isolate, const std::string& unit) {
icu::MeasureUnit result = IsSanctionedUnitIdentifier(unit);
- icu::MeasureUnit none = icu::NoUnit::base();
+ icu::MeasureUnit none = icu::MeasureUnit();
// 1. If the result of IsSanctionedUnitIdentifier(unitIdentifier) is true,
// then
if (result != none) {
@@ -633,11 +632,12 @@ Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
return Style::CURRENCY;
}
if (skeleton.indexOf("measure-unit/") >= 0) {
+ if (skeleton.indexOf("scale/100") >= 0 &&
+ skeleton.indexOf("measure-unit/concentr-percent") >= 0) {
+ return Style::PERCENT;
+ }
return Style::UNIT;
}
- if (skeleton.indexOf("percent ") >= 0) {
- return Style::PERCENT;
- }
return Style::DECIMAL;
}
@@ -874,17 +874,15 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
std::unique_ptr<char[]> numbering_system_str = nullptr;
- if (FLAG_harmony_intl_add_calendar_numbering_system) {
- // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
- // `"string"`, *undefined*, *undefined*).
- Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
- isolate, options, service, &numbering_system_str);
- // 8. If _numberingSystem_ is not *undefined*, then
- // a. If _numberingSystem_ does not match the
- // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
- // exception.
- MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSNumberFormat>());
- }
+ // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
+ // `"string"`, *undefined*, *undefined*).
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, service, &numbering_system_str);
+ // 8. If _numberingSystem_ is not *undefined*, then
+ // a. If _numberingSystem_ does not match the
+ // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
+ // exception.
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSNumberFormat>());
// 7. Let localeData be %NumberFormat%.[[LocaleData]].
// 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
@@ -1088,11 +1086,12 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair =
maybe_wellformed_unit.FromJust();
+ icu::MeasureUnit none = icu::MeasureUnit();
// 13.b Set intlObj.[[Unit]] to unit.
- if (unit_pair.first != icu::NoUnit::base()) {
+ if (unit_pair.first != none) {
icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
}
- if (unit_pair.second != icu::NoUnit::base()) {
+ if (unit_pair.second != none) {
icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second);
}
@@ -1105,8 +1104,9 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
}
if (style == Style::PERCENT) {
- icu_number_formatter = icu_number_formatter.unit(icu::NoUnit::percent())
- .scale(icu::number::Scale::powerOfTen(2));
+ icu_number_formatter =
+ icu_number_formatter.unit(icu::MeasureUnit::getPercent())
+ .scale(icu::number::Scale::powerOfTen(2));
}
// 23. If style is "currency", then
@@ -1495,8 +1495,7 @@ struct CheckNumberElements {
} // namespace
const std::set<std::string>& JSNumberFormat::GetAvailableLocales() {
- static base::LazyInstance<
- Intl::AvailableLocales<icu::NumberFormat, CheckNumberElements>>::type
+ static base::LazyInstance<Intl::AvailableLocales<CheckNumberElements>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 471398eafa..d41cca0548 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -32,7 +32,8 @@ class LocalizedNumberFormatter;
namespace v8 {
namespace internal {
-class JSNumberFormat : public JSObject {
+class JSNumberFormat
+ : public TorqueGeneratedJSNumberFormat<JSNumberFormat, JSObject> {
public:
// ecma402/#sec-initializenumberformat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> New(
@@ -69,21 +70,12 @@ class JSNumberFormat : public JSObject {
const icu::number::LocalizedNumberFormatter& icu_number_formatter,
const Intl::NumberFormatDigitOptions& digit_options);
- DECL_CAST(JSNumberFormat)
DECL_PRINTER(JSNumberFormat)
- DECL_VERIFIER(JSNumberFormat)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_NUMBER_FORMAT_FIELDS)
-
- DECL_ACCESSORS(locale, String)
DECL_ACCESSORS(icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>)
- DECL_ACCESSORS(bound_format, Object)
- DECL_INT_ACCESSORS(flags)
- OBJECT_CONSTRUCTORS(JSNumberFormat, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSNumberFormat)
};
struct NumberFormatSpan {
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index dd5ac09904..300b40d9d7 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -816,7 +816,7 @@ DEF_GETTER(JSObject, HasFastPackedElements, bool) {
}
DEF_GETTER(JSObject, HasDictionaryElements, bool) {
- return GetElementsKind(isolate) == DICTIONARY_ELEMENTS;
+ return IsDictionaryElementsKind(GetElementsKind(isolate));
}
DEF_GETTER(JSObject, HasPackedElements, bool) {
@@ -836,11 +836,11 @@ DEF_GETTER(JSObject, HasNonextensibleElements, bool) {
}
DEF_GETTER(JSObject, HasFastArgumentsElements, bool) {
- return GetElementsKind(isolate) == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+ return IsFastArgumentsElementsKind(GetElementsKind(isolate));
}
DEF_GETTER(JSObject, HasSlowArgumentsElements, bool) {
- return GetElementsKind(isolate) == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+ return IsSlowArgumentsElementsKind(GetElementsKind(isolate));
}
DEF_GETTER(JSObject, HasSloppyArgumentsElements, bool) {
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 13741c4f62..a77d2dadfc 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -14,6 +14,7 @@
#include "src/handles/maybe-handles.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -26,6 +27,7 @@
#include "src/objects/field-type.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-number.h"
+#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
@@ -404,19 +406,6 @@ String JSReceiver::class_name() {
if (IsJSWeakSet()) return roots.WeakSet_string();
if (IsJSGlobalProxy()) return roots.global_string();
- Object maybe_constructor = map().GetConstructor();
- if (maybe_constructor.IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (constructor.shared().IsApiFunction()) {
- maybe_constructor = constructor.shared().get_api_func_data();
- }
- }
-
- if (maybe_constructor.IsFunctionTemplateInfo()) {
- FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info.class_name().IsString()) return String::cast(info.class_name());
- }
-
return roots.Object_string();
}
@@ -440,12 +429,6 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
return std::make_pair(handle(constructor, isolate),
handle(name, isolate));
}
- } else if (maybe_constructor.IsFunctionTemplateInfo()) {
- FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info.class_name().IsString()) {
- return std::make_pair(MaybeHandle<JSFunction>(),
- handle(String::cast(info.class_name()), isolate));
- }
}
}
@@ -2097,6 +2080,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSObject::kHeaderSize;
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kHeaderSize;
+ case JS_AGGREGATE_ERROR_TYPE:
+ return JSAggregateError::kHeaderSize;
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
return JSAsyncFunctionObject::kHeaderSize;
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
@@ -2140,8 +2125,6 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSWeakRef::kHeaderSize;
case JS_FINALIZATION_REGISTRY_TYPE:
return JSFinalizationRegistry::kHeaderSize;
- case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
- return JSFinalizationRegistryCleanupIterator::kHeaderSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kHeaderSize;
case JS_WEAK_SET_TYPE:
@@ -4509,14 +4492,13 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
NewTypeError(MessageTemplate::kImmutablePrototypeSet, object));
}
- // From 8.6.2 Object Internal Methods
- // ...
- // In addition, if [[Extensible]] is false the value of the [[Class]] and
- // [[Prototype]] internal properties of the object may not be modified.
- // ...
- // Implementation specific extensions that modify [[Class]], [[Prototype]]
- // or [[Extensible]] must not violate the invariants defined in the preceding
- // paragraph.
+ // From 6.1.7.3 Invariants of the Essential Internal Methods
+ //
+ // [[SetPrototypeOf]] ( V )
+ // * ...
+ // * If target is non-extensible, [[SetPrototypeOf]] must return false,
+ // unless V is the SameValue as the target's observed [[GetPrototypeOf]]
+ // value.
if (!all_extensible) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kNonExtensibleProto, object));
@@ -4552,7 +4534,6 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// static
void JSObject::SetImmutableProto(Handle<JSObject> object) {
- DCHECK(!object->IsAccessCheckNeeded()); // Never called from JS
Handle<Map> map(object->map(), object->GetIsolate());
// Nothing to do if prototype is already set.
@@ -5221,6 +5202,7 @@ namespace {
bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
switch (instance_type) {
+ case JS_AGGREGATE_ERROR_TYPE:
case JS_API_OBJECT_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
@@ -5741,18 +5723,24 @@ double JSDate::CurrentTimeValue(Isolate* isolate) {
}
// static
-Address JSDate::GetField(Address raw_object, Address smi_index) {
+Address JSDate::GetField(Isolate* isolate, Address raw_object,
+ Address smi_index) {
+ // Called through CallCFunction.
+ DisallowHeapAllocation no_gc;
+ DisallowHandleAllocation no_handles;
+ DisallowJavascriptExecution no_js(isolate);
+
Object object(raw_object);
Smi index(smi_index);
return JSDate::cast(object)
- .DoGetField(static_cast<FieldIndex>(index.value()))
+ .DoGetField(isolate, static_cast<FieldIndex>(index.value()))
.ptr();
}
-Object JSDate::DoGetField(FieldIndex index) {
+Object JSDate::DoGetField(Isolate* isolate, FieldIndex index) {
DCHECK_NE(index, kDateValue);
- DateCache* date_cache = GetIsolate()->date_cache();
+ DateCache* date_cache = isolate->date_cache();
if (index < kFirstUncachedField) {
Object stamp = cache_stamp();
@@ -5809,7 +5797,6 @@ Object JSDate::GetUTCField(FieldIndex index, double value,
int64_t time_ms = static_cast<int64_t>(value);
if (index == kTimezoneOffset) {
- GetIsolate()->CountUsage(v8::Isolate::kDateGetTimezoneOffset);
return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
}
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 418c12ac50..9e9f8e3128 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -206,15 +206,17 @@ class JSReceiver : public HeapObject {
V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(
Handle<JSReceiver> object);
- // Returns the class name ([[Class]] property in the specification).
+ // Returns the class name.
V8_EXPORT_PRIVATE String class_name();
// Returns the constructor (the function that was used to instantiate the
// object).
static MaybeHandle<JSFunction> GetConstructor(Handle<JSReceiver> receiver);
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
+ // Returns the constructor name (the (possibly inferred) name of the function
+ // that was used to instantiate the object), if any. If a FunctionTemplate is
+ // used to instantiate the object, the class_name of the FunctionTemplate is
+ // returned instead.
static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
V8_EXPORT_PRIVATE Handle<NativeContext> GetCreationContext();
@@ -1278,7 +1280,8 @@ class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
// {raw_date} is a tagged Object pointer.
// {smi_index} is a tagged Smi.
// The return value is a tagged Object pointer.
- static Address GetField(Address raw_date, Address smi_index);
+ static Address GetField(Isolate* isolate, Address raw_date,
+ Address smi_index);
static Handle<Object> SetValue(Handle<JSDate> date, double v);
@@ -1318,8 +1321,7 @@ class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
};
private:
- inline Object DoGetField(FieldIndex index);
-
+ Object DoGetField(Isolate* isolate, FieldIndex index);
Object GetUTCField(FieldIndex index, double value, DateCache* date_cache);
// Computes and caches the cacheable fields of the date.
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index 823fef1e94..1139deeb3d 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -79,8 +79,7 @@ macro GetDerivedMap(implicit context: Context)(
}
return map;
- }
- label SlowPath {
+ } label SlowPath {
return runtime::GetDerivedMap(context, target, newTarget);
}
}
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index 40aae56b20..60340931fe 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -19,10 +19,8 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSPluralRules, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSPluralRules)
-ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
-SMI_ACCESSORS(JSPluralRules, flags, kFlagsOffset)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
kIcuPluralRulesOffset)
ACCESSORS(JSPluralRules, icu_number_formatter,
@@ -40,8 +38,6 @@ inline JSPluralRules::Type JSPluralRules::type() const {
return TypeBit::decode(flags());
}
-CAST_ACCESSOR(JSPluralRules)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index ffbd53034e..2e12ca0375 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -325,7 +325,6 @@ const std::set<std::string>& JSPluralRules::GetAvailableLocales() {
static base::LazyInstance<PluralRulesAvailableLocales>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
- // return Intl::GetAvailableLocalesForLocale();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index c5df77b57b..4a43b582d7 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -32,7 +32,8 @@ class LocalizedNumberFormatter;
namespace v8 {
namespace internal {
-class JSPluralRules : public JSObject {
+class JSPluralRules
+ : public TorqueGeneratedJSPluralRules<JSPluralRules, JSObject> {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> New(
Isolate* isolate, Handle<Map> map, Handle<Object> locales,
@@ -54,9 +55,7 @@ class JSPluralRules : public JSObject {
Handle<String> TypeAsString() const;
- DECL_CAST(JSPluralRules)
DECL_PRINTER(JSPluralRules)
- DECL_VERIFIER(JSPluralRules)
// Bit positions in |flags|.
DEFINE_TORQUE_GENERATED_JS_PLURAL_RULES_FLAGS()
@@ -64,17 +63,11 @@ class JSPluralRules : public JSObject {
STATIC_ASSERT(Type::CARDINAL <= TypeBit::kMax);
STATIC_ASSERT(Type::ORDINAL <= TypeBit::kMax);
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_PLURAL_RULES_FIELDS)
-
- DECL_ACCESSORS(locale, String)
- DECL_INT_ACCESSORS(flags)
DECL_ACCESSORS(icu_plural_rules, Managed<icu::PluralRules>)
DECL_ACCESSORS(icu_number_formatter,
Managed<icu::number::LocalizedNumberFormatter>)
- OBJECT_CONSTRUCTORS(JSPluralRules, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSPluralRules)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index cffd10b9f1..601de6612b 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -18,8 +18,8 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPromise)
-BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
-BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
+BOOL_ACCESSORS(JSPromise, flags, has_handler, HasHandlerBit::kShift)
+BOOL_ACCESSORS(JSPromise, flags, handled_hint, HandledHintBit::kShift)
Object JSPromise::result() const {
DCHECK_NE(Promise::kPending, status());
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index efb13dc015..cd54f9349e 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -7,6 +7,7 @@
#include "src/objects/js-objects.h"
#include "src/objects/promise.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -64,15 +65,8 @@ class JSPromise : public TorqueGeneratedJSPromise<JSPromise, JSObject> {
kHeaderSize + v8::Promise::kEmbedderFieldCount * kEmbedderDataSlotSize;
// Flags layout.
- // The first two bits store the v8::Promise::PromiseState.
- static const int kStatusBits = 2;
- static const int kHasHandlerBit = 2;
- static const int kHandledHintBit = 3;
- using AsyncTaskIdField = base::BitField<int, kHandledHintBit + 1, 22>;
-
- static const int kStatusShift = 0;
- static const int kStatusMask = 0x3;
- static const int kHasHandlerMask = 0x4;
+ DEFINE_TORQUE_GENERATED_JS_PROMISE_FLAGS()
+
STATIC_ASSERT(v8::Promise::kPending == 0);
STATIC_ASSERT(v8::Promise::kFulfilled == 1);
STATIC_ASSERT(v8::Promise::kRejected == 2);
diff --git a/deps/v8/src/objects/js-promise.tq b/deps/v8/src/objects/js-promise.tq
index 515dedcb9b..ae1c2bcc9d 100644
--- a/deps/v8/src/objects/js-promise.tq
+++ b/deps/v8/src/objects/js-promise.tq
@@ -2,40 +2,36 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// JSPromise constants
-const kJSPromiseStatusMask: constexpr int31
- generates 'JSPromise::kStatusMask';
-const kJSPromiseStatusShift: constexpr int31
- generates 'JSPromise::kStatusShift';
-const kJSPromiseHasHandlerMask: constexpr int31
- generates 'JSPromise::kHasHandlerMask';
+bitfield struct JSPromiseFlags extends uint31 {
+ status: PromiseState: 2 bit;
+ has_handler: bool: 1 bit;
+ handled_hint: bool: 1 bit;
+ async_task_id: int32: 22 bit;
+}
@generateCppClass
extern class JSPromise extends JSObject {
macro Status(): PromiseState {
- StaticAssert(kJSPromiseStatusShift == 0);
- const status: int32 = Convert<int32>(this.flags) & kJSPromiseStatusMask;
- return Convert<PromiseState>(status);
+ return this.flags.status;
}
macro SetStatus(status: constexpr PromiseState): void {
assert(this.Status() == PromiseState::kPending);
assert(status != PromiseState::kPending);
- const mask: Smi = SmiConstant(status);
- this.flags = this.flags | mask;
+ this.flags.status = status;
}
macro HasHandler(): bool {
- return (this.flags & kJSPromiseHasHandlerMask) != 0;
+ return this.flags.has_handler;
}
macro SetHasHandler(): void {
- this.flags |= kJSPromiseHasHandlerMask;
+ this.flags.has_handler = true;
}
// Smi 0 terminated list of PromiseReaction objects in case the JSPromise was
// not settled yet, otherwise the result.
reactions_or_result: Zero|PromiseReaction|JSAny;
- flags: Smi;
+ flags: SmiTagged<JSPromiseFlags>;
}
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index a6f59468fd..37f39dfb4f 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -34,7 +34,7 @@ JSRegExp::Type JSRegExp::TypeTag() const {
return static_cast<JSRegExp::Type>(smi.value());
}
-int JSRegExp::CaptureCount() {
+int JSRegExp::CaptureCount() const {
switch (TypeTag()) {
case ATOM:
return 0;
@@ -45,6 +45,11 @@ int JSRegExp::CaptureCount() {
}
}
+int JSRegExp::MaxRegisterCount() const {
+ CHECK_EQ(TypeTag(), IRREGEXP);
+ return Smi::ToInt(DataAt(kIrregexpMaxRegisterCountIndex));
+}
+
JSRegExp::Flags JSRegExp::GetFlags() {
DCHECK(this->data().IsFixedArray());
Object data = this->data();
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index ea44aaae27..b0d8e4c5ec 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -17,9 +17,9 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator)
-BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, kDoneBit)
-BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, kGlobalBit)
-BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, kUnicodeBit)
+BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, DoneBit::kShift)
+BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, GlobalBit::kShift)
+BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, UnicodeBit::kShift)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index e54aedbc2b..1fdd503072 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
#include "src/objects/js-objects.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,9 +29,7 @@ class JSRegExpStringIterator
DECL_PRINTER(JSRegExpStringIterator)
- static const int kDoneBit = 0;
- static const int kGlobalBit = 1;
- static const int kUnicodeBit = 2;
+ DEFINE_TORQUE_GENERATED_JS_REG_EXP_STRING_ITERATOR_FLAGS()
TQ_OBJECT_CONSTRUCTORS(JSRegExpStringIterator)
};
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.tq b/deps/v8/src/objects/js-regexp-string-iterator.tq
index 3ab1679699..4daed7af2d 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.tq
+++ b/deps/v8/src/objects/js-regexp-string-iterator.tq
@@ -2,11 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+bitfield struct JSRegExpStringIteratorFlags extends uint31 {
+ done: bool: 1 bit;
+ global: bool: 1 bit;
+ unicode: bool: 1 bit;
+}
+
@generateCppClass
extern class JSRegExpStringIterator extends JSObject {
// The [[IteratingRegExp]] internal property.
iterating_reg_exp: JSReceiver;
// The [[IteratedString]] internal property.
iterated_string: String;
- flags: Smi;
+ flags: SmiTagged<JSRegExpStringIteratorFlags>;
}
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 07236e5ba8..3d584b9f1a 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -113,7 +113,11 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static constexpr int kMaxCaptures = 1 << 16;
// Number of captures (without the match itself).
- inline int CaptureCount();
+ inline int CaptureCount() const;
+ // Each capture (including the match itself) needs two registers.
+ static int RegistersForCaptureCount(int count) { return (count + 1) * 2; }
+
+ inline int MaxRegisterCount() const;
inline Flags GetFlags();
inline String Pattern();
inline Object CaptureNameMap();
@@ -131,9 +135,10 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
}
// This could be a Smi kUninitializedValue or Code.
- Object Code(bool is_latin1) const;
+ V8_EXPORT_PRIVATE Object Code(bool is_latin1) const;
// This could be a Smi kUninitializedValue or ByteArray.
- Object Bytecode(bool is_latin1) const;
+ V8_EXPORT_PRIVATE Object Bytecode(bool is_latin1) const;
+
bool ShouldProduceBytecode();
inline bool HasCompiledCode() const;
inline void DiscardCompiledCodeForSerialization();
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 74da187246..52d9d12261 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -18,14 +18,11 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat)
// Base relative time format accessors.
-ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset)
-ACCESSORS(JSRelativeTimeFormat, numberingSystem, String, kNumberingSystemOffset)
ACCESSORS(JSRelativeTimeFormat, icu_formatter,
Managed<icu::RelativeDateTimeFormatter>, kIcuFormatterOffset)
-SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
inline void JSRelativeTimeFormat::set_numeric(Numeric numeric) {
DCHECK_GE(NumericBit::kMax, numeric);
@@ -38,8 +35,6 @@ inline JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::numeric() const {
return NumericBit::decode(flags());
}
-CAST_ACCESSOR(JSRelativeTimeFormat)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 87aac9f060..53f44b2b40 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -29,7 +29,9 @@ class RelativeDateTimeFormatter;
namespace v8 {
namespace internal {
-class JSRelativeTimeFormat : public JSObject {
+class JSRelativeTimeFormat
+ : public TorqueGeneratedJSRelativeTimeFormat<JSRelativeTimeFormat,
+ JSObject> {
public:
// Creates relative time format object with properties derived from input
// locales and options.
@@ -54,12 +56,7 @@ class JSRelativeTimeFormat : public JSObject {
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
- DECL_CAST(JSRelativeTimeFormat)
-
// RelativeTimeFormat accessors.
- DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(numberingSystem, String)
-
DECL_ACCESSORS(icu_formatter, Managed<icu::RelativeDateTimeFormatter>)
// Numeric: identifying whether numerical descriptions are always used, or
@@ -81,18 +78,9 @@ class JSRelativeTimeFormat : public JSObject {
STATIC_ASSERT(Numeric::AUTO <= NumericBit::kMax);
STATIC_ASSERT(Numeric::ALWAYS <= NumericBit::kMax);
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
-
DECL_PRINTER(JSRelativeTimeFormat)
- DECL_VERIFIER(JSRelativeTimeFormat)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_RELATIVE_TIME_FORMAT_FIELDS)
- private:
- OBJECT_CONSTRUCTORS(JSRelativeTimeFormat, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSRelativeTimeFormat)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index f312f7c91a..2e32cc5d87 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -18,7 +18,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator)
// Base segment iterator accessors.
ACCESSORS(JSSegmentIterator, icu_break_iterator, Managed<icu::BreakIterator>,
@@ -29,10 +29,6 @@ ACCESSORS(JSSegmentIterator, unicode_string, Managed<icu::UnicodeString>,
BIT_FIELD_ACCESSORS(JSSegmentIterator, flags, is_break_type_set,
JSSegmentIterator::BreakTypeSetBit)
-SMI_ACCESSORS(JSSegmentIterator, flags, kFlagsOffset)
-
-CAST_ACCESSOR(JSSegmentIterator)
-
inline void JSSegmentIterator::set_granularity(
JSSegmenter::Granularity granularity) {
DCHECK_GE(GranularityBits::kMax, granularity);
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index 81ebc4dbd2..f1310233e1 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -28,7 +28,8 @@ class UnicodeString;
namespace v8 {
namespace internal {
-class JSSegmentIterator : public JSObject {
+class JSSegmentIterator
+ : public TorqueGeneratedJSSegmentIterator<JSSegmentIterator, JSObject> {
public:
// ecma402 #sec-CreateSegmentIterator
V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmentIterator> Create(
@@ -64,14 +65,11 @@ class JSSegmentIterator : public JSObject {
int32_t start,
int32_t end) const;
- DECL_CAST(JSSegmentIterator)
-
// SegmentIterator accessors.
DECL_ACCESSORS(icu_break_iterator, Managed<icu::BreakIterator>)
DECL_ACCESSORS(unicode_string, Managed<icu::UnicodeString>)
DECL_PRINTER(JSSegmentIterator)
- DECL_VERIFIER(JSSegmentIterator)
inline void set_granularity(JSSegmenter::Granularity granularity);
inline JSSegmenter::Granularity granularity() const;
@@ -83,14 +81,7 @@ class JSSegmentIterator : public JSObject {
STATIC_ASSERT(JSSegmenter::Granularity::WORD <= GranularityBits::kMax);
STATIC_ASSERT(JSSegmenter::Granularity::SENTENCE <= GranularityBits::kMax);
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
-
-// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_SEGMENT_ITERATOR_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSSegmentIterator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSSegmentIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index a31de29c25..ebf4002e70 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -18,13 +18,11 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSSegmenter, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegmenter)
// Base segmenter accessors.
-ACCESSORS(JSSegmenter, locale, String, kLocaleOffset)
ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
kIcuBreakIteratorOffset)
-SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
inline void JSSegmenter::set_granularity(Granularity granularity) {
DCHECK_GE(GranularityBits::kMax, granularity);
@@ -37,8 +35,6 @@ inline JSSegmenter::Granularity JSSegmenter::granularity() const {
return GranularityBits::decode(flags());
}
-CAST_ACCESSOR(JSSegmenter)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 39cf15f628..f12bac2031 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -163,9 +163,7 @@ Handle<String> JSSegmenter::GranularityAsString() const {
}
const std::set<std::string>& JSSegmenter::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::BreakIterator>>::type
- available_locales = LAZY_INSTANCE_INITIALIZER;
- return available_locales.Pointer()->Get();
+ return Intl::GetAvailableLocales();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index b2cd1cac1b..39b32480ba 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -29,7 +29,7 @@ class BreakIterator;
namespace v8 {
namespace internal {
-class JSSegmenter : public JSObject {
+class JSSegmenter : public TorqueGeneratedJSSegmenter<JSSegmenter, JSObject> {
public:
// Creates segmenter object with properties derived from input locales and
// options.
@@ -44,11 +44,7 @@ class JSSegmenter : public JSObject {
Handle<String> GranularityAsString() const;
- DECL_CAST(JSSegmenter)
-
// Segmenter accessors.
- DECL_ACCESSORS(locale, String)
-
DECL_ACCESSORS(icu_break_iterator, Managed<icu::BreakIterator>)
// Granularity: identifying the segmenter used.
@@ -69,18 +65,9 @@ class JSSegmenter : public JSObject {
STATIC_ASSERT(Granularity::WORD <= GranularityBits::kMax);
STATIC_ASSERT(Granularity::SENTENCE <= GranularityBits::kMax);
- // [flags] Bit field containing various flags about the function.
- DECL_INT_ACCESSORS(flags)
-
DECL_PRINTER(JSSegmenter)
- DECL_VERIFIER(JSSegmenter)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_SEGMENTER_FIELDS)
- private:
- OBJECT_CONSTRUCTORS(JSSegmenter, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSSegmenter)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 939964b47e..8b9eb19d6c 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -20,7 +20,6 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
-TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistryCleanupIterator)
ACCESSORS(JSFinalizationRegistry, native_context, NativeContext,
kNativeContextOffset)
@@ -33,6 +32,9 @@ SMI_ACCESSORS(JSFinalizationRegistry, flags, kFlagsOffset)
ACCESSORS(JSFinalizationRegistry, next_dirty, Object, kNextDirtyOffset)
CAST_ACCESSOR(JSFinalizationRegistry)
+BIT_FIELD_ACCESSORS(JSFinalizationRegistry, flags, scheduled_for_cleanup,
+ JSFinalizationRegistry::ScheduledForCleanupBit)
+
void JSFinalizationRegistry::Register(
Handle<JSFinalizationRegistry> finalization_registry,
Handle<JSReceiver> target, Handle<Object> holdings,
@@ -175,72 +177,8 @@ bool JSFinalizationRegistry::NeedsCleanup() const {
return cleared_cells().IsWeakCell();
}
-bool JSFinalizationRegistry::scheduled_for_cleanup() const {
- return ScheduledForCleanupField::decode(flags());
-}
-
-void JSFinalizationRegistry::set_scheduled_for_cleanup(
- bool scheduled_for_cleanup) {
- set_flags(ScheduledForCleanupField::update(flags(), scheduled_for_cleanup));
-}
-
-Object JSFinalizationRegistry::PopClearedCellHoldings(
- Handle<JSFinalizationRegistry> finalization_registry, Isolate* isolate) {
- Handle<WeakCell> weak_cell =
- handle(WeakCell::cast(finalization_registry->cleared_cells()), isolate);
- DCHECK(weak_cell->prev().IsUndefined(isolate));
- finalization_registry->set_cleared_cells(weak_cell->next());
- weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
-
- if (finalization_registry->cleared_cells().IsWeakCell()) {
- WeakCell cleared_cells_head =
- WeakCell::cast(finalization_registry->cleared_cells());
- DCHECK_EQ(cleared_cells_head.prev(), *weak_cell);
- cleared_cells_head.set_prev(ReadOnlyRoots(isolate).undefined_value());
- } else {
- DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
- }
-
- // Also remove the WeakCell from the key_map (if it's there).
- if (!weak_cell->unregister_token().IsUndefined(isolate)) {
- if (weak_cell->key_list_prev().IsUndefined(isolate)) {
- Handle<SimpleNumberDictionary> key_map =
- handle(SimpleNumberDictionary::cast(finalization_registry->key_map()),
- isolate);
- Handle<Object> unregister_token =
- handle(weak_cell->unregister_token(), isolate);
- uint32_t key = Smi::ToInt(unregister_token->GetHash());
- InternalIndex entry = key_map->FindEntry(isolate, key);
-
- if (weak_cell->key_list_next().IsUndefined(isolate)) {
- // weak_cell is the only one associated with its key; remove the key
- // from the hash table.
- DCHECK(entry.is_found());
- key_map = SimpleNumberDictionary::DeleteEntry(isolate, key_map, entry);
- finalization_registry->set_key_map(*key_map);
- } else {
- // weak_cell is the list head for its key; we need to change the value
- // of the key in the hash table.
- Handle<WeakCell> next =
- handle(WeakCell::cast(weak_cell->key_list_next()), isolate);
- DCHECK_EQ(next->key_list_prev(), *weak_cell);
- next->set_key_list_prev(ReadOnlyRoots(isolate).undefined_value());
- weak_cell->set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
- key_map = SimpleNumberDictionary::Set(isolate, key_map, key, next);
- finalization_registry->set_key_map(*key_map);
- }
- } else {
- // weak_cell is somewhere in the middle of its key list.
- WeakCell prev = WeakCell::cast(weak_cell->key_list_prev());
- prev.set_key_list_next(weak_cell->key_list_next());
- if (!weak_cell->key_list_next().IsUndefined()) {
- WeakCell next = WeakCell::cast(weak_cell->key_list_next());
- next.set_key_list_prev(weak_cell->key_list_prev());
- }
- }
- }
-
- return weak_cell->holdings();
+HeapObject WeakCell::relaxed_target() const {
+ return TaggedField<HeapObject>::Relaxed_Load(*this, kTargetOffset);
}
template <typename GCNotifyUpdatedSlotCallback>
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index ff5cad7ee3..e0f109c353 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_WEAK_REFS_H_
#include "src/objects/js-objects.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -35,6 +36,8 @@ class JSFinalizationRegistry : public JSObject {
DECL_INT_ACCESSORS(flags)
+ DECL_BOOLEAN_ACCESSORS(scheduled_for_cleanup)
+
class BodyDescriptor;
inline static void Register(
@@ -58,28 +61,22 @@ class JSFinalizationRegistry : public JSObject {
// Returns true if the cleared_cells list is non-empty.
inline bool NeedsCleanup() const;
- inline bool scheduled_for_cleanup() const;
- inline void set_scheduled_for_cleanup(bool scheduled_for_cleanup);
-
- // Remove the first cleared WeakCell from the cleared_cells
- // list (assumes there is one) and return its holdings.
- inline static Object PopClearedCellHoldings(
- Handle<JSFinalizationRegistry> finalization_registry, Isolate* isolate);
-
- // Constructs an iterator for the WeakCells in the cleared_cells list and
- // calls the user's cleanup function.
+ // Remove the already-popped weak_cell from its unregister token linked list,
+ // as well as removing the entry from the key map if it is the only WeakCell
+ // with its unregister token. This method cannot GC and does not shrink the
+ // key map. Asserts that weak_cell has a non-undefined unregister token.
//
- // Returns Nothing<bool> if exception occurs, otherwise returns Just(true).
- static V8_WARN_UNUSED_RESULT Maybe<bool> Cleanup(
- Isolate* isolate, Handle<JSFinalizationRegistry> finalization_registry,
- Handle<Object> callback);
+ // It takes raw Addresses because it is called from CSA and Torque.
+ V8_EXPORT_PRIVATE static void RemoveCellFromUnregisterTokenMap(
+ Isolate* isolate, Address raw_finalization_registry,
+ Address raw_weak_cell);
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
JSObject::kHeaderSize, TORQUE_GENERATED_JS_FINALIZATION_REGISTRY_FIELDS)
// Bitfields in flags.
- using ScheduledForCleanupField = base::BitField<bool, 0, 1>;
+ DEFINE_TORQUE_GENERATED_FINALIZATION_REGISTRY_FLAGS()
OBJECT_CONSTRUCTORS(JSFinalizationRegistry, JSObject);
};
@@ -92,6 +89,9 @@ class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
class BodyDescriptor;
+ // Provide relaxed load access to target field.
+ inline HeapObject relaxed_target() const;
+
// Nullify is called during GC and it modifies the pointers in WeakCell and
// JSFinalizationRegistry. Thus we need to tell the GC about the modified
// slots via the gc_notify_updated_slot function. The normal write barrier is
@@ -115,16 +115,6 @@ class JSWeakRef : public TorqueGeneratedJSWeakRef<JSWeakRef, JSObject> {
TQ_OBJECT_CONSTRUCTORS(JSWeakRef)
};
-class JSFinalizationRegistryCleanupIterator
- : public TorqueGeneratedJSFinalizationRegistryCleanupIterator<
- JSFinalizationRegistryCleanupIterator, JSObject> {
- public:
- DECL_PRINTER(JSFinalizationRegistryCleanupIterator)
- DECL_VERIFIER(JSFinalizationRegistryCleanupIterator)
-
- TQ_OBJECT_CONSTRUCTORS(JSFinalizationRegistryCleanupIterator)
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-weak-refs.tq b/deps/v8/src/objects/js-weak-refs.tq
index 7adcb93d70..9008f64290 100644
--- a/deps/v8/src/objects/js-weak-refs.tq
+++ b/deps/v8/src/objects/js-weak-refs.tq
@@ -2,29 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+bitfield struct FinalizationRegistryFlags extends uint31 {
+ scheduled_for_cleanup: bool: 1 bit;
+}
+
extern class JSFinalizationRegistry extends JSObject {
native_context: NativeContext;
- cleanup: Object;
+ cleanup: Callable;
active_cells: Undefined|WeakCell;
cleared_cells: Undefined|WeakCell;
key_map: Object;
// For the linked list of FinalizationRegistries that need cleanup. This
// link is weak.
next_dirty: Undefined|JSFinalizationRegistry;
- flags: Smi;
-}
-
-@generateCppClass
-extern class JSFinalizationRegistryCleanupIterator extends JSObject {
- finalization_registry: JSFinalizationRegistry;
+ flags: SmiTagged<FinalizationRegistryFlags>;
}
@generateCppClass
extern class WeakCell extends HeapObject {
finalization_registry: Undefined|JSFinalizationRegistry;
target: Undefined|JSReceiver;
- unregister_token: Object;
- holdings: Object;
+ unregister_token: JSAny;
+ holdings: JSAny;
// For storing doubly linked lists of WeakCells in JSFinalizationRegistry's
// "active_cells" and "cleared_cells" lists.
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index 30fe132129..561e79505e 100644
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -9,6 +9,7 @@
#include "src/handles/handles-inl.h"
#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index f0f3a21baf..d5fbf7c894 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -4,7 +4,6 @@
#include "src/objects/lookup.h"
-#include "include/v8config.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
@@ -193,21 +192,11 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
if (!receiver_generic->IsHeapObject()) return;
Handle<HeapObject> receiver = Handle<HeapObject>::cast(receiver_generic);
- // Getting the native_context from the isolate as a fallback. If possible, we
- // use the receiver's creation context instead.
- Handle<NativeContext> native_context = isolate->native_context();
-
ReadOnlyRoots roots(isolate);
if (*name == roots.constructor_string()) {
- // Fetching the context in here since the operation is rather expensive.
- if (receiver->IsJSReceiver()) {
- native_context = Handle<JSReceiver>::cast(receiver)->GetCreationContext();
- }
-
if (!Protectors::IsArraySpeciesLookupChainIntact(isolate) &&
!Protectors::IsPromiseSpeciesLookupChainIntact(isolate) &&
- !Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
- native_context) &&
+ !Protectors::IsRegExpSpeciesLookupChainIntact(isolate) &&
!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) {
return;
}
@@ -223,12 +212,8 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
Protectors::InvalidatePromiseSpeciesLookupChain(isolate);
return;
} else if (receiver->IsJSRegExp(isolate)) {
- if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
- native_context)) {
- return;
- }
- Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate,
- native_context);
+ if (!Protectors::IsRegExpSpeciesLookupChainIntact(isolate)) return;
+ Protectors::InvalidateRegExpSpeciesLookupChain(isolate);
return;
} else if (receiver->IsJSTypedArray(isolate)) {
if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) return;
@@ -254,12 +239,8 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
Protectors::InvalidatePromiseSpeciesLookupChain(isolate);
} else if (isolate->IsInAnyContext(*receiver,
Context::REGEXP_PROTOTYPE_INDEX)) {
- if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
- native_context)) {
- return;
- }
- Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate,
- native_context);
+ if (!Protectors::IsRegExpSpeciesLookupChainIntact(isolate)) return;
+ Protectors::InvalidateRegExpSpeciesLookupChain(isolate);
} else if (isolate->IsInAnyContext(
receiver->map(isolate).prototype(isolate),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
@@ -295,15 +276,9 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
Protectors::InvalidateStringIteratorLookupChain(isolate);
}
} else if (*name == roots.species_symbol()) {
- // Fetching the context in here since the operation is rather expensive.
- if (receiver->IsJSReceiver()) {
- native_context = Handle<JSReceiver>::cast(receiver)->GetCreationContext();
- }
-
if (!Protectors::IsArraySpeciesLookupChainIntact(isolate) &&
!Protectors::IsPromiseSpeciesLookupChainIntact(isolate) &&
- !Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
- native_context) &&
+ !Protectors::IsRegExpSpeciesLookupChainIntact(isolate) &&
!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) {
return;
}
@@ -320,12 +295,8 @@ void LookupIterator::InternalUpdateProtector(Isolate* isolate,
Protectors::InvalidatePromiseSpeciesLookupChain(isolate);
} else if (isolate->IsInAnyContext(*receiver,
Context::REGEXP_FUNCTION_INDEX)) {
- if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
- native_context)) {
- return;
- }
- Protectors::InvalidateRegExpSpeciesLookupChainProtector(isolate,
- native_context);
+ if (!Protectors::IsRegExpSpeciesLookupChainIntact(isolate)) return;
+ Protectors::InvalidateRegExpSpeciesLookupChain(isolate);
} else if (IsTypedArrayFunctionInAnyContext(isolate, *receiver)) {
if (!Protectors::IsTypedArraySpeciesLookupChainIntact(isolate)) return;
Protectors::InvalidateTypedArraySpeciesLookupChain(isolate);
@@ -943,12 +914,7 @@ Handle<Map> LookupIterator::GetFieldOwnerMap() const {
isolate_);
}
-#if defined(__clang__) && defined(V8_OS_WIN)
-// Force function alignment to work around CPU bug: https://crbug.com/968683
-__attribute__((__aligned__(32)))
-#endif
-FieldIndex
-LookupIterator::GetFieldIndex() const {
+FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
diff --git a/deps/v8/src/objects/managed.h b/deps/v8/src/objects/managed.h
index c3203df8a5..8d56a13aef 100644
--- a/deps/v8/src/objects/managed.h
+++ b/deps/v8/src/objects/managed.h
@@ -92,11 +92,12 @@ class Managed : public Foreign {
// Create a {Managed<CppType>} from an existing {std::shared_ptr<CppType>}.
static Handle<Managed<CppType>> FromSharedPtr(
Isolate* isolate, size_t estimated_size,
- const std::shared_ptr<CppType>& shared_ptr) {
+ std::shared_ptr<CppType> shared_ptr) {
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(estimated_size);
auto destructor = new ManagedPtrDestructor(
- estimated_size, new std::shared_ptr<CppType>{shared_ptr}, Destructor);
+ estimated_size, new std::shared_ptr<CppType>{std::move(shared_ptr)},
+ Destructor);
Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
isolate->factory()->NewForeign(reinterpret_cast<Address>(destructor)));
Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 9b17805c6a..d529a8bbc9 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -756,6 +756,9 @@ ACCESSORS_CHECKED2(Map, constructor_or_backpointer, Object,
ACCESSORS_CHECKED(Map, native_context, NativeContext,
kConstructorOrBackPointerOrNativeContextOffset,
IsContextMap())
+ACCESSORS_CHECKED(Map, wasm_type_info, Foreign,
+ kConstructorOrBackPointerOrNativeContextOffset,
+ IsWasmStructMap() || IsWasmArrayMap())
bool Map::IsPrototypeValidityCellValid() const {
Object validity_cell = prototype_validity_cell();
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 74b2fea2fe..bb13ace4bb 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -13,6 +13,7 @@
#include "src/logging/counters-inl.h"
#include "src/logging/log.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/elements-kind.h"
#include "src/objects/field-type.h"
#include "src/objects/js-objects.h"
#include "src/objects/layout-descriptor.h"
@@ -84,11 +85,11 @@ Map Map::GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type) {
break;
STRUCT_LIST(MAKE_CASE)
#undef MAKE_CASE
-#define MAKE_CASE(_, TYPE, Name, name) \
- case TYPE: \
- map = roots.name##_map(); \
+#define MAKE_CASE(TYPE, Name, name) \
+ case TYPE: \
+ map = roots.name##_map(); \
break;
- TORQUE_INTERNAL_CLASS_LIST_GENERATOR(MAKE_CASE, _)
+ TORQUE_INTERNAL_INSTANCE_TYPE_LIST(MAKE_CASE)
#undef MAKE_CASE
default:
UNREACHABLE();
@@ -142,7 +143,6 @@ VisitorId Map::GetVisitorId(Map map) {
case EMBEDDER_DATA_ARRAY_TYPE:
return kVisitEmbedderDataArray;
- case FIXED_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
case HASH_TABLE_TYPE:
@@ -175,10 +175,6 @@ VisitorId Map::GetVisitorId(Map map) {
case EPHEMERON_HASH_TABLE_TYPE:
return kVisitEphemeronHashTable;
- case WEAK_FIXED_ARRAY_TYPE:
- case WEAK_ARRAY_LIST_TYPE:
- return kVisitWeakArray;
-
case FIXED_DOUBLE_ARRAY_TYPE:
return kVisitFixedDoubleArray;
@@ -272,6 +268,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
+ case JS_AGGREGATE_ERROR_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -295,7 +292,6 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_PROMISE_TYPE:
case JS_REG_EXP_TYPE:
case JS_REG_EXP_STRING_ITERATOR_TYPE:
- case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
case JS_FINALIZATION_REGISTRY_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_V8_BREAK_ITERATOR_TYPE:
@@ -367,10 +363,15 @@ VisitorId Map::GetVisitorId(Map map) {
case SYNTHETIC_MODULE_TYPE:
return kVisitSyntheticModule;
+ case WASM_ARRAY_TYPE:
+ return kVisitWasmArray;
+ case WASM_STRUCT_TYPE:
+ return kVisitWasmStruct;
+
#define MAKE_TQ_CASE(TYPE, Name) \
case TYPE: \
return kVisit##Name;
- TORQUE_BODY_DESCRIPTOR_LIST(MAKE_TQ_CASE)
+ TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(MAKE_TQ_CASE)
#undef MAKE_TQ_CASE
default:
@@ -794,8 +795,21 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
new_representation, wrapped_type);
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldOwnerGroup);
+
+ if (new_constness != old_constness) {
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kFieldConstGroup);
+ }
+
+ if (!new_field_type->Equals(*old_field_type)) {
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kFieldTypeGroup);
+ }
+
+ if (!new_representation.Equals(old_representation)) {
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kFieldRepresentationGroup);
+ }
if (FLAG_trace_generalization) {
map->PrintGeneralization(
@@ -1405,27 +1419,26 @@ bool Map::OnlyHasSimpleProperties() const {
!IsSpecialReceiverMap() && !is_dictionary_map();
}
-bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
- if (IsDictionaryElementsKind(elements_kind())) {
- return false;
- }
-
+bool Map::MayHaveReadOnlyElementsInPrototypeChain(Isolate* isolate) {
for (PrototypeIterator iter(isolate, *this); !iter.IsAtEnd();
iter.Advance()) {
- // Be conservative, don't walk into proxies.
- if (iter.GetCurrent().IsJSProxy()) return true;
- // String wrappers have non-configurable, non-writable elements.
- if (iter.GetCurrent().IsStringWrapper()) return true;
+ // Be conservative, don't look into any JSReceivers that may have custom
+ // elements. For example, into JSProxies, String wrappers (which have have
+ // non-configurable, non-writable elements), API objects, etc.
+ if (iter.GetCurrent().map().IsCustomElementsReceiverMap()) return true;
+
JSObject current = iter.GetCurrent<JSObject>();
+ ElementsKind elements_kind = current.GetElementsKind(isolate);
+ if (IsFrozenElementsKind(elements_kind)) return true;
- if (current.HasDictionaryElements() &&
- current.element_dictionary().requires_slow_elements()) {
+ if (IsDictionaryElementsKind(elements_kind) &&
+ current.element_dictionary(isolate).requires_slow_elements()) {
return true;
}
- if (current.HasSlowArgumentsElements()) {
- FixedArray parameter_map = FixedArray::cast(current.elements());
- Object arguments = parameter_map.get(1);
+ if (IsSlowArgumentsElementsKind(elements_kind)) {
+ FixedArray parameter_map = FixedArray::cast(current.elements(isolate));
+ Object arguments = parameter_map.get(isolate, 1);
if (NumberDictionary::cast(arguments).requires_slow_elements()) {
return true;
}
@@ -1443,7 +1456,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
Handle<HeapObject> prototype(map->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
result->set_constructor_or_backpointer(map->GetConstructor());
- result->set_bit_field(map->bit_field());
+ result->set_relaxed_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index a84173188e..9876d85d3e 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -28,9 +28,7 @@ enum InstanceType : uint16_t;
V(CoverageInfo) \
V(DataObject) \
V(FeedbackMetadata) \
- V(FixedDoubleArray) \
- V(SeqOneByteString) \
- V(SeqTwoByteString)
+ V(FixedDoubleArray)
#define POINTER_VISITOR_ID_LIST(V) \
V(AllocationSite) \
@@ -38,7 +36,6 @@ enum InstanceType : uint16_t;
V(Cell) \
V(Code) \
V(CodeDataContainer) \
- V(ConsString) \
V(Context) \
V(DataHandler) \
V(DescriptorArray) \
@@ -46,7 +43,6 @@ enum InstanceType : uint16_t;
V(EphemeronHashTable) \
V(FeedbackCell) \
V(FeedbackVector) \
- V(FixedArray) \
V(FreeSpace) \
V(JSApiObject) \
V(JSArrayBuffer) \
@@ -66,7 +62,6 @@ enum InstanceType : uint16_t;
V(PrototypeInfo) \
V(SharedFunctionInfo) \
V(ShortcutCandidate) \
- V(SlicedString) \
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
@@ -74,22 +69,19 @@ enum InstanceType : uint16_t;
V(Struct) \
V(Symbol) \
V(SyntheticModule) \
- V(ThinString) \
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
V(WasmCapiFunctionData) \
V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
- V(WeakArray) \
+ V(WasmArray) \
+ V(WasmStruct) \
V(WeakCell)
-#define TORQUE_OBJECT_BODY_TO_VISITOR_ID_LIST_ADAPTER(V, TYPE, TypeName) \
- V(TypeName)
-
-#define TORQUE_VISITOR_ID_LIST(V) \
- TORQUE_BODY_DESCRIPTOR_LIST_GENERATOR( \
- TORQUE_OBJECT_BODY_TO_VISITOR_ID_LIST_ADAPTER, V)
+#define TORQUE_VISITOR_ID_LIST(V) \
+ TORQUE_DATA_ONLY_VISITOR_ID_LIST(V) \
+ TORQUE_POINTER_VISITOR_ID_LIST(V)
// Objects with the same visitor id are processed in the same way by
// the heap visitors. The visitor ids for data only objects must precede
@@ -97,9 +89,11 @@ enum InstanceType : uint16_t;
// of whether an object contains only data or may contain pointers.
enum VisitorId {
#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
- DATA_ONLY_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL) kDataOnlyVisitorIdCount,
+ DATA_ONLY_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+ TORQUE_DATA_ONLY_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+ kDataOnlyVisitorIdCount,
POINTER_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
- TORQUE_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+ TORQUE_POINTER_VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
#undef VISITOR_ID_ENUM_DECL
kVisitorIdCount
};
@@ -426,9 +420,11 @@ class Map : public HeapObject {
// there is no guarantee it is attached.
inline bool IsDetached(Isolate* isolate) const;
- // Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
- // map with DICTIONARY_ELEMENTS was found in the prototype chain.
- bool DictionaryElementsInPrototypeChainOnly(Isolate* isolate);
+ // Returns true if there is an object with potentially read-only elements
+ // in the prototype chain. It could be a Proxy, a string wrapper,
+ // an object with DICTIONARY_ELEMENTS potentially containing read-only
+ // elements or an object with any frozen elements, or a slow arguments object.
+ bool MayHaveReadOnlyElementsInPrototypeChain(Isolate* isolate);
inline Map ElementsTransitionMap(Isolate* isolate);
@@ -578,9 +574,11 @@ class Map : public HeapObject {
// back pointer chain until they find the map holding their constructor.
// Returns null_value if there's neither a constructor function nor a
// FunctionTemplateInfo available.
- // The field also overlaps with the native context pointer for context maps.
+ // The field also overlaps with the native context pointer for context maps,
+ // and with the Wasm type info for WebAssembly object maps.
DECL_ACCESSORS(constructor_or_backpointer, Object)
DECL_ACCESSORS(native_context, NativeContext)
+ DECL_ACCESSORS(wasm_type_info, Foreign)
DECL_GETTER(GetConstructor, Object)
DECL_GETTER(GetFunctionTemplateInfo, FunctionTemplateInfo)
inline void SetConstructor(Object constructor,
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 02f5b485ce..7c236a8ff9 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -5,11 +5,8 @@
#ifndef V8_OBJECTS_MAYBE_OBJECT_INL_H_
#define V8_OBJECTS_MAYBE_OBJECT_INL_H_
+#include "src/common/ptr-compr-inl.h"
#include "src/objects/maybe-object.h"
-
-#ifdef V8_COMPRESS_POINTERS
-#include "src/execution/isolate.h"
-#endif
#include "src/objects/smi-inl.h"
#include "src/objects/tagged-impl-inl.h"
@@ -59,16 +56,32 @@ HeapObjectReference HeapObjectReference::Weak(Object object) {
}
// static
-HeapObjectReference HeapObjectReference::ClearedValue(Isolate* isolate) {
+HeapObjectReference HeapObjectReference::ClearedValue(const Isolate* isolate) {
// Construct cleared weak ref value.
+#ifdef V8_COMPRESS_POINTERS
+ // This is necessary to make pointer decompression computation also
+ // suitable for cleared weak references.
+ Address raw_value =
+ DecompressTaggedPointer(isolate, kClearedWeakHeapObjectLower32);
+#else
Address raw_value = kClearedWeakHeapObjectLower32;
+#endif
+ // The rest of the code will check only the lower 32-bits.
+ DCHECK_EQ(kClearedWeakHeapObjectLower32, static_cast<uint32_t>(raw_value));
+ return HeapObjectReference(raw_value);
+}
+
+// static
+HeapObjectReference HeapObjectReference::ClearedValue(
+ const OffThreadIsolate* isolate) {
+ // Construct cleared weak ref value.
#ifdef V8_COMPRESS_POINTERS
// This is necessary to make pointer decompression computation also
// suitable for cleared weak references.
- Address isolate_root = isolate->isolate_root();
- raw_value |= isolate_root;
- DCHECK_EQ(raw_value & (~static_cast<Address>(kClearedWeakHeapObjectLower32)),
- isolate_root);
+ Address raw_value =
+ DecompressTaggedPointer(isolate, kClearedWeakHeapObjectLower32);
+#else
+ Address raw_value = kClearedWeakHeapObjectLower32;
#endif
// The rest of the code will check only the lower 32-bits.
DCHECK_EQ(kClearedWeakHeapObjectLower32, static_cast<uint32_t>(raw_value));
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index 304cf90d28..0bb312692a 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -47,7 +47,10 @@ class HeapObjectReference : public MaybeObject {
V8_INLINE static HeapObjectReference Weak(Object object);
- V8_INLINE static HeapObjectReference ClearedValue(Isolate* isolate);
+ V8_INLINE static HeapObjectReference ClearedValue(const Isolate* isolate);
+
+ V8_INLINE static HeapObjectReference ClearedValue(
+ const OffThreadIsolate* isolate);
template <typename THeapObjectSlot>
V8_INLINE static void Update(THeapObjectSlot slot, HeapObject value);
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 11b5c034c9..34b3ae26ef 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -7,8 +7,6 @@
#include "torque-generated/instance-types-tq.h"
-#define TORQUE_INTERNAL_CLASS_NAMES_ADAPTER(V, NAME, Name, name) V(Name)
-
namespace v8 {
namespace internal {
@@ -128,6 +126,7 @@ class ZoneForwardList;
V(HandlerTable) \
V(HeapNumber) \
V(InternalizedString) \
+ V(JSAggregateError) \
V(JSArgumentsObject) \
V(JSArray) \
V(JSArrayBuffer) \
@@ -145,7 +144,6 @@ class ZoneForwardList;
V(JSDate) \
V(JSError) \
V(JSFinalizationRegistry) \
- V(JSFinalizationRegistryCleanupIterator) \
V(JSFunction) \
V(JSFunctionOrBoundFunction) \
V(JSGeneratorObject) \
@@ -228,17 +226,19 @@ class ZoneForwardList;
V(UncompiledDataWithoutPreparseData) \
V(Undetectable) \
V(UniqueName) \
+ V(WasmArray) \
V(WasmExceptionObject) \
V(WasmExceptionPackage) \
V(WasmGlobalObject) \
V(WasmInstanceObject) \
V(WasmMemoryObject) \
V(WasmModuleObject) \
+ V(WasmStruct) \
V(WasmTableObject) \
V(WeakFixedArray) \
V(WeakArrayList) \
V(WeakCell) \
- TORQUE_INTERNAL_CLASS_LIST_GENERATOR(TORQUE_INTERNAL_CLASS_NAMES_ADAPTER, V)
+ TORQUE_INTERNAL_CLASS_LIST(V)
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 60c508e336..58b4106e88 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -5,8 +5,6 @@
#ifndef V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
#define V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
-#include "src/objects/objects-body-descriptors.h"
-
#include <algorithm>
#include "src/codegen/reloc-info.h"
@@ -17,12 +15,16 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-weak-refs.h"
+#include "src/objects/objects-body-descriptors.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/source-text-module.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -681,34 +683,6 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
}
};
-class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {}
-
- static inline int SizeOf(Map map, HeapObject obj) {
- SeqOneByteString string = SeqOneByteString::cast(obj);
- return SeqOneByteString::SizeFor(string.synchronized_length());
- }
-};
-
-class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {}
-
- static inline int SizeOf(Map map, HeapObject obj) {
- SeqTwoByteString string = SeqTwoByteString::cast(obj);
- return SeqTwoByteString::SizeFor(string.synchronized_length());
- }
-};
-
class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -825,6 +799,54 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ // Fields in WasmArrays never change their types in place, so
+ // there should never be a need to call this function.
+ UNREACHABLE();
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ if (!WasmArray::type(map)->element_type().IsReferenceType()) return;
+ IteratePointers(obj, WasmArray::kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return WasmArray::SizeFor(map, WasmArray::cast(object).length());
+ }
+};
+
+class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ // Fields in WasmStructs never change their types in place, so
+ // there should never be a need to call this function.
+ UNREACHABLE();
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ WasmStruct wasm_struct = WasmStruct::cast(obj);
+ wasm::StructType* type = WasmStruct::GcSafeType(map);
+ for (uint32_t i = 0; i < type->field_count(); i++) {
+ if (!type->field(i).IsReferenceType()) continue;
+ int offset =
+ WasmStruct::kHeaderSize + static_cast<int>(type->field_offset(i));
+ v->VisitPointer(wasm_struct, wasm_struct.RawField(offset));
+ }
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -896,7 +918,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case EMBEDDER_DATA_ARRAY_TYPE:
return Op::template apply<EmbedderDataArray::BodyDescriptor>(p1, p2, p3,
p4);
- case FIXED_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
case HASH_TABLE_TYPE:
@@ -926,10 +947,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<Context::BodyDescriptor>(p1, p2, p3, p4);
case NATIVE_CONTEXT_TYPE:
return Op::template apply<NativeContext::BodyDescriptor>(p1, p2, p3, p4);
- case WEAK_FIXED_ARRAY_TYPE:
- return Op::template apply<WeakFixedArray::BodyDescriptor>(p1, p2, p3, p4);
- case WEAK_ARRAY_LIST_TYPE:
- return Op::template apply<WeakArrayList::BodyDescriptor>(p1, p2, p3, p4);
case FIXED_DOUBLE_ARRAY_TYPE:
return ReturnType();
case FEEDBACK_METADATA_TYPE:
@@ -949,6 +966,10 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3, p4);
case COVERAGE_INFO_TYPE:
return Op::template apply<CoverageInfo::BodyDescriptor>(p1, p2, p3, p4);
+ case WASM_ARRAY_TYPE:
+ return Op::template apply<WasmArray::BodyDescriptor>(p1, p2, p3, p4);
+ case WASM_STRUCT_TYPE:
+ return Op::template apply<WasmStruct::BodyDescriptor>(p1, p2, p3, p4);
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
@@ -960,6 +981,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_DATE_TYPE:
+ case JS_AGGREGATE_ERROR_TYPE:
case JS_ARRAY_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
@@ -979,7 +1001,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
- case JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE:
case JS_FINALIZATION_REGISTRY_TYPE:
#ifdef V8_INTL_SUPPORT
case JS_V8_BREAK_ITERATOR_TYPE:
@@ -1100,10 +1121,12 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case SYNTHETIC_MODULE_TYPE:
return Op::template apply<SyntheticModule::BodyDescriptor>(p1, p2, p3,
p4);
+// TODO(tebbi): Avoid duplicated cases when the body descriptors are identical.
#define MAKE_TORQUE_BODY_DESCRIPTOR_APPLY(TYPE, TypeName) \
case TYPE: \
return Op::template apply<TypeName::BodyDescriptor>(p1, p2, p3, p4);
- TORQUE_BODY_DESCRIPTOR_LIST(MAKE_TORQUE_BODY_DESCRIPTOR_APPLY)
+ TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(
+ MAKE_TORQUE_BODY_DESCRIPTOR_APPLY)
#undef MAKE_TORQUE_BODY_DESCRIPTOR_APPLY
default:
@@ -1164,6 +1187,8 @@ class EphemeronHashTable::BodyDescriptor final : public BodyDescriptorBase {
}
};
+#include "torque-generated/objects-body-descriptors-tq-inl.inc"
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/objects-body-descriptors.h b/deps/v8/src/objects/objects-body-descriptors.h
index 13adf4c3db..8135e1f170 100644
--- a/deps/v8/src/objects/objects-body-descriptors.h
+++ b/deps/v8/src/objects/objects-body-descriptors.h
@@ -74,15 +74,14 @@ class BodyDescriptorBase {
ObjectVisitor* v);
};
-// This class describes a body of an object of a fixed size
-// in which all pointer fields are located in the [start_offset, end_offset)
-// interval.
-template <int start_offset, int end_offset, int size>
-class FixedBodyDescriptor final : public BodyDescriptorBase {
+// This class describes a body of an object in which all pointer fields are
+// located in the [start_offset, end_offset) interval.
+// All pointers have to be strong.
+template <int start_offset, int end_offset>
+class FixedRangeBodyDescriptor : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
static const int kEndOffset = end_offset;
- static const int kSize = size;
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset >= kStartOffset && offset < kEndOffset;
@@ -99,14 +98,30 @@ class FixedBodyDescriptor final : public BodyDescriptorBase {
IterateBody(map, obj, v);
}
- static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+ private:
+ static inline int SizeOf(Map map, HeapObject object) {
+ // Has to be implemented by the subclass.
+ UNREACHABLE();
+ }
};
-// This class describes a body of an object of a variable size
-// in which all pointer fields are located in the [start_offset, object_size)
+// This class describes a body of an object of a fixed size
+// in which all pointer fields are located in the [start_offset, end_offset)
// interval.
+// All pointers have to be strong.
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor
+ : public FixedRangeBodyDescriptor<start_offset, end_offset> {
+ public:
+ static const int kSize = size;
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
+// This class describes a body of an object in which all pointer fields are
+// located in the [start_offset, object_size) interval.
+// All pointers have to be strong.
template <int start_offset>
-class FlexibleBodyDescriptor final : public BodyDescriptorBase {
+class SuffixRangeBodyDescriptor : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
@@ -120,13 +135,30 @@ class FlexibleBodyDescriptor final : public BodyDescriptorBase {
IteratePointers(obj, start_offset, object_size, v);
}
+ private:
+ static inline int SizeOf(Map map, HeapObject object) {
+ // Has to be implemented by the subclass.
+ UNREACHABLE();
+ }
+};
+
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval.
+// All pointers have to be strong.
+template <int start_offset>
+class FlexibleBodyDescriptor : public SuffixRangeBodyDescriptor<start_offset> {
+ public:
static inline int SizeOf(Map map, HeapObject object);
};
using StructBodyDescriptor = FlexibleBodyDescriptor<HeapObject::kHeaderSize>;
+// This class describes a body of an object in which all pointer fields are
+// located in the [start_offset, object_size) interval.
+// Pointers may be strong or may be MaybeObject-style weak pointers.
template <int start_offset>
-class FlexibleWeakBodyDescriptor final : public BodyDescriptorBase {
+class SuffixRangeWeakBodyDescriptor : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
@@ -140,9 +172,40 @@ class FlexibleWeakBodyDescriptor final : public BodyDescriptorBase {
IterateMaybeWeakPointers(obj, start_offset, object_size, v);
}
+ private:
+ static inline int SizeOf(Map map, HeapObject object) {
+ // Has to be implemented by the subclass.
+ UNREACHABLE();
+ }
+};
+
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval.
+// Pointers may be strong or may be MaybeObject-style weak pointers.
+template <int start_offset>
+class FlexibleWeakBodyDescriptor
+ : public SuffixRangeWeakBodyDescriptor<start_offset> {
+ public:
static inline int SizeOf(Map map, HeapObject object);
};
+// This class describes a body of an object without any pointers.
+class DataOnlyBodyDescriptor : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ private:
+ static inline int SizeOf(Map map, HeapObject object) {
+ // Has to be implemented by the subclass.
+ UNREACHABLE();
+ }
+};
+
// This class describes a body of an object which has a parent class that also
// has a body descriptor. This represents a union of the parent's body
// descriptor, and a new descriptor for the child -- so, both parent and child's
@@ -180,10 +243,6 @@ class SubclassBodyDescriptor final : public BodyDescriptorBase {
}
};
-#define TORQUE_BODY_DESCRIPTOR_LIST_ADAPTER(V, TYPE, TypeName) V(TYPE, TypeName)
-#define TORQUE_BODY_DESCRIPTOR_LIST(V) \
- TORQUE_BODY_DESCRIPTOR_LIST_GENERATOR(TORQUE_BODY_DESCRIPTOR_LIST_ADAPTER, V)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index a830e13ed1..8a990cbc63 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -157,7 +157,8 @@ namespace internal {
wasm_exported_function_data) \
V(_, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
wasm_indirect_function_table) \
- V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data)
+ V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \
+ V(_, WASM_VALUE_TYPE, WasmValue, wasm_value)
#define STRUCT_LIST_GENERATOR(V, _) STRUCT_LIST_GENERATOR_BASE(V, _)
@@ -174,15 +175,6 @@ namespace internal {
// Produces (Map, struct_name_map, StructNameMap) entries
#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
-// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
-#define TORQUE_INTERNAL_CLASS_LIST_MAPS_ADAPTER(V, NAME, Name, name) \
- V(Map, name##_map, Name##Map)
-
-// Produces (NAME, Name, name) entries.
-#define TORQUE_INTERNAL_CLASS_MAPS_LIST(V) \
- TORQUE_INTERNAL_CLASS_LIST_GENERATOR( \
- TORQUE_INTERNAL_CLASS_LIST_MAPS_ADAPTER, V)
-
//
// The following macros define list of allocation size objects and list of
// their maps.
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index e457d62706..53693149e1 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -123,10 +123,9 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
-
#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
#include "torque-generated/internal-class-definitions-tq-inl.h"
-#include "torque-generated/objects-body-descriptors-tq-inl.h"
namespace v8 {
namespace internal {
@@ -2278,12 +2277,11 @@ int HeapObject::SizeFromMap(Map map) const {
PreparseData data = PreparseData::unchecked_cast(*this);
return PreparseData::SizeFor(data.data_length(), data.children_length());
}
-#define MAKE_TORQUE_SIZE_FOR(TYPE, TypeName) \
- if (instance_type == TYPE) { \
- TypeName instance = TypeName::unchecked_cast(*this); \
- return TypeName::SizeFor(instance); \
+#define MAKE_TORQUE_SIZE_FOR(TYPE, TypeName) \
+ if (instance_type == TYPE) { \
+ return TypeName::unchecked_cast(*this).AllocatedSize(); \
}
- TORQUE_BODY_DESCRIPTOR_LIST(MAKE_TORQUE_SIZE_FOR)
+ TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(MAKE_TORQUE_SIZE_FOR)
#undef MAKE_TORQUE_SIZE_FOR
if (instance_type == CODE_TYPE) {
@@ -2293,6 +2291,9 @@ int HeapObject::SizeFromMap(Map map) const {
return CoverageInfo::SizeFor(
CoverageInfo::unchecked_cast(*this).slot_count());
}
+ if (instance_type == WASM_ARRAY_TYPE) {
+ return WasmArray::SizeFor(map, WasmArray::cast(*this).length());
+ }
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
return EmbedderDataArray::SizeFor(
EmbedderDataArray::unchecked_cast(*this).length());
@@ -2305,8 +2306,9 @@ bool HeapObject::NeedsRehashing() const {
case TRANSITION_ARRAY_TYPE:
return TransitionArray::cast(*this).number_of_entries() > 1;
case ORDERED_HASH_MAP_TYPE:
+ return OrderedHashMap::cast(*this).NumberOfElements() > 0;
case ORDERED_HASH_SET_TYPE:
- return false; // We'll rehash from the JSMap or JSSet referencing them.
+ return OrderedHashSet::cast(*this).NumberOfElements() > 0;
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -2316,8 +2318,6 @@ bool HeapObject::NeedsRehashing() const {
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_TYPE:
return true;
default:
return false;
@@ -2327,13 +2327,10 @@ bool HeapObject::NeedsRehashing() const {
bool HeapObject::CanBeRehashed() const {
DCHECK(NeedsRehashing());
switch (map().instance_type()) {
- case JS_MAP_TYPE:
- case JS_SET_TYPE:
- return true;
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
- UNREACHABLE(); // We'll rehash from the JSMap or JSSet referencing them.
case ORDERED_NAME_DICTIONARY_TYPE:
+ // TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
return false;
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
@@ -2357,8 +2354,7 @@ bool HeapObject::CanBeRehashed() const {
return false;
}
-void HeapObject::RehashBasedOnMap(Isolate* isolate) {
- ReadOnlyRoots roots = ReadOnlyRoots(isolate);
+void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
switch (map().instance_type()) {
case HASH_TABLE_TYPE:
UNREACHABLE();
@@ -2390,17 +2386,6 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
case SMALL_ORDERED_HASH_SET_TYPE:
DCHECK_EQ(0, SmallOrderedHashSet::cast(*this).NumberOfElements());
break;
- case ORDERED_HASH_MAP_TYPE:
- case ORDERED_HASH_SET_TYPE:
- UNREACHABLE(); // We'll rehash from the JSMap or JSSet referencing them.
- case JS_MAP_TYPE: {
- JSMap::cast(*this).Rehash(isolate);
- break;
- }
- case JS_SET_TYPE: {
- JSSet::cast(*this).Rehash(isolate);
- break;
- }
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
DCHECK_EQ(0, SmallOrderedNameDictionary::cast(*this).NumberOfElements());
break;
@@ -3122,20 +3107,10 @@ MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
JSProxy);
}
- if (target->IsJSProxy() && JSProxy::cast(*target).IsRevoked()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
- JSProxy);
- }
if (!handler->IsJSReceiver()) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
JSProxy);
}
- if (handler->IsJSProxy() && JSProxy::cast(*handler).IsRevoked()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
- JSProxy);
- }
return isolate->factory()->NewJSProxy(Handle<JSReceiver>::cast(target),
Handle<JSReceiver>::cast(handler));
}
@@ -4228,7 +4203,8 @@ Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture_count) {
DCHECK_GE(match_info->length(), kLastMatchOverhead);
- int capture_register_count = (capture_count + 1) * 2;
+ int capture_register_count =
+ JSRegExp::RegistersForCaptureCount(capture_count);
const int required_length = kFirstCaptureIndex + capture_register_count;
Handle<RegExpMatchInfo> result = Handle<RegExpMatchInfo>::cast(
EnsureSpaceInFixedArray(isolate, match_info, required_length));
@@ -4263,9 +4239,8 @@ Handle<FrameArray> FrameArray::AppendWasmFrame(
int wasm_function_index, wasm::WasmCode* code, int offset, int flags) {
// This must be either a compiled or interpreted wasm frame, or an asm.js
// frame (which is always compiled).
- DCHECK_EQ(1, ((flags & kIsWasmInterpretedFrame) != 0) +
- ((flags & kIsWasmCompiledFrame) != 0) +
- ((flags & kIsAsmJsWasmFrame) != 0));
+ DCHECK_EQ(1,
+ ((flags & kIsWasmFrame) != 0) + ((flags & kIsAsmJsWasmFrame) != 0));
Isolate* isolate = wasm_instance->GetIsolate();
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
@@ -4935,14 +4910,14 @@ Object Script::GetNameOrSourceURL() {
template <typename LocalIsolate>
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- LocalIsolate* isolate, const FunctionLiteral* fun) {
- CHECK_NE(fun->function_literal_id(), kFunctionLiteralIdInvalid);
+ LocalIsolate* isolate, int function_literal_id) {
+ CHECK_NE(function_literal_id, kFunctionLiteralIdInvalid);
// If this check fails, the problem is most probably the function id
// renumbering done by AstFunctionLiteralIdReindexer; in particular, that
// AstTraversalVisitor doesn't recurse properly in the construct which
// triggers the mismatch.
- CHECK_LT(fun->function_literal_id(), shared_function_infos().length());
- MaybeObject shared = shared_function_infos().Get(fun->function_literal_id());
+ CHECK_LT(function_literal_id, shared_function_infos().length());
+ MaybeObject shared = shared_function_infos().Get(function_literal_id);
HeapObject heap_object;
if (!shared->GetHeapObject(&heap_object) ||
heap_object.IsUndefined(isolate)) {
@@ -4951,9 +4926,9 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
return handle(SharedFunctionInfo::cast(heap_object), isolate);
}
template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- Isolate* isolate, const FunctionLiteral* fun);
+ Isolate* isolate, int function_literal_id);
template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- OffThreadIsolate* isolate, const FunctionLiteral* fun);
+ OffThreadIsolate* isolate, int function_literal_id);
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
@@ -5195,9 +5170,9 @@ bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
}
bool SharedFunctionInfo::HasSourceCode() const {
- Isolate* isolate = GetIsolate();
- return !script().IsUndefined(isolate) &&
- !Script::cast(script()).source().IsUndefined(isolate);
+ ReadOnlyRoots roots = GetReadOnlyRoots();
+ return !script().IsUndefined(roots) &&
+ !Script::cast(script()).source().IsUndefined(roots);
}
void SharedFunctionInfo::DiscardCompiledMetadata(
@@ -5891,13 +5866,13 @@ class StringSharedKey : public HashTableKey {
};
v8::Promise::PromiseState JSPromise::status() const {
- int value = flags() & kStatusMask;
+ int value = flags() & StatusBits::kMask;
DCHECK(value == 0 || value == 1 || value == 2);
return static_cast<v8::Promise::PromiseState>(value);
}
void JSPromise::set_status(Promise::PromiseState status) {
- int value = flags() & ~kStatusMask;
+ int value = flags() & ~StatusBits::kMask;
set_flags(value | status);
}
@@ -5905,7 +5880,7 @@ void JSPromise::set_status(Promise::PromiseState status) {
const char* JSPromise::Status(v8::Promise::PromiseState status) {
switch (status) {
case v8::Promise::kFulfilled:
- return "resolved";
+ return "fulfilled";
case v8::Promise::kPending:
return "pending";
case v8::Promise::kRejected:
@@ -5915,11 +5890,11 @@ const char* JSPromise::Status(v8::Promise::PromiseState status) {
}
int JSPromise::async_task_id() const {
- return AsyncTaskIdField::decode(flags());
+ return AsyncTaskIdBits::decode(flags());
}
void JSPromise::set_async_task_id(int id) {
- set_flags(AsyncTaskIdField::update(flags(), id));
+ set_flags(AsyncTaskIdBits::update(flags(), id));
}
// static
@@ -5999,6 +5974,7 @@ Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
PromiseReaction::kReject);
}
+// https://tc39.es/ecma262/#sec-promise-resolve-functions
// static
MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
Handle<Object> resolution) {
@@ -6007,7 +5983,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
isolate->factory()->undefined_value());
- // 6. If SameValue(resolution, promise) is true, then
+ // 7. If SameValue(resolution, promise) is true, then
if (promise.is_identical_to(resolution)) {
// a. Let selfResolutionError be a newly created TypeError object.
Handle<Object> self_resolution_error = isolate->factory()->NewTypeError(
@@ -6016,13 +5992,13 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
return Reject(promise, self_resolution_error);
}
- // 7. If Type(resolution) is not Object, then
+ // 8. If Type(resolution) is not Object, then
if (!resolution->IsJSReceiver()) {
// a. Return FulfillPromise(promise, resolution).
return Fulfill(promise, resolution);
}
- // 8. Let then be Get(resolution, "then").
+ // 9. Let then be Get(resolution, "then").
MaybeHandle<Object> then;
Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(resolution));
@@ -6045,7 +6021,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
isolate->factory()->then_string());
}
- // 9. If then is an abrupt completion, then
+ // 10. If then is an abrupt completion, then
Handle<Object> then_action;
if (!then.ToHandle(&then_action)) {
// a. Return RejectPromise(promise, then.[[Value]]).
@@ -6054,19 +6030,15 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
return Reject(promise, reason, false);
}
- // 10. Let thenAction be then.[[Value]].
- // 11. If IsCallable(thenAction) is false, then
+ // 11. Let thenAction be then.[[Value]].
+ // 12. If IsCallable(thenAction) is false, then
if (!then_action->IsCallable()) {
// a. Return FulfillPromise(promise, resolution).
return Fulfill(promise, resolution);
}
- // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
- // Ā«promise, resolution, thenActionĀ»).
-
- // According to HTML, we use the context of the then function (|thenAction|)
- // as the context of the microtask. See step 3 of HTML's EnqueueJob:
- // https://html.spec.whatwg.org/C/#enqueuejob(queuename,-job,-arguments)
+ // 13. Let job be NewPromiseResolveThenableJob(promise, resolution,
+ // thenAction).
Handle<NativeContext> then_context;
if (!JSReceiver::GetContextForMicrotask(Handle<JSReceiver>::cast(then_action))
.ToHandle(&then_context)) {
@@ -6075,8 +6047,8 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
Handle<PromiseResolveThenableJobTask> task =
isolate->factory()->NewPromiseResolveThenableJobTask(
- promise, Handle<JSReceiver>::cast(then_action),
- Handle<JSReceiver>::cast(resolution), then_context);
+ promise, Handle<JSReceiver>::cast(resolution),
+ Handle<JSReceiver>::cast(then_action), then_context);
if (isolate->debug()->is_active() && resolution->IsJSPromise()) {
// Mark the dependency of the new {promise} on the {resolution}.
Object::SetProperty(isolate, resolution,
@@ -6087,7 +6059,7 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
MicrotaskQueue* microtask_queue = then_context->microtask_queue();
if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
- // 13. Return undefined.
+ // 15. Return undefined.
return isolate->factory()->undefined_value();
}
@@ -7172,7 +7144,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
Handle<Context> native_context, LanguageMode language_mode) {
// We use the empty function SFI as part of the key. Although the
// empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the PartialSnapshotCache, and so this does not prevent
+ // snapshot builds by the StartupObjectCache, and so this does not prevent
// reuse of scripts in the compilation cache across native contexts.
Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
native_context->GetIsolate());
@@ -7230,7 +7202,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Isolate* isolate = native_context->GetIsolate();
// We use the empty function SFI as part of the key. Although the
// empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the PartialSnapshotCache, and so this does not prevent
+ // snapshot builds by the StartupObjectCache, and so this does not prevent
// reuse of scripts in the compilation cache across native contexts.
Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
isolate);
@@ -7880,13 +7852,6 @@ void JSSet::Clear(Isolate* isolate, Handle<JSSet> set) {
set->set_table(*table);
}
-void JSSet::Rehash(Isolate* isolate) {
- Handle<OrderedHashSet> table_handle(OrderedHashSet::cast(table()), isolate);
- Handle<OrderedHashSet> new_table =
- OrderedHashSet::Rehash(isolate, table_handle).ToHandleChecked();
- set_table(*new_table);
-}
-
void JSMap::Initialize(Handle<JSMap> map, Isolate* isolate) {
Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
map->set_table(*table);
@@ -7898,13 +7863,6 @@ void JSMap::Clear(Isolate* isolate, Handle<JSMap> map) {
map->set_table(*table);
}
-void JSMap::Rehash(Isolate* isolate) {
- Handle<OrderedHashMap> table_handle(OrderedHashMap::cast(table()), isolate);
- Handle<OrderedHashMap> new_table =
- OrderedHashMap::Rehash(isolate, table_handle).ToHandleChecked();
- set_table(*new_table);
-}
-
void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
Isolate* isolate) {
Handle<EphemeronHashTable> table = EphemeronHashTable::New(isolate, 0);
@@ -8339,48 +8297,50 @@ EXTERN_DEFINE_BASE_NAME_DICTIONARY(GlobalDictionary, GlobalDictionaryShape)
#undef EXTERN_DEFINE_DICTIONARY
#undef EXTERN_DEFINE_BASE_NAME_DICTIONARY
-Maybe<bool> JSFinalizationRegistry::Cleanup(
- Isolate* isolate, Handle<JSFinalizationRegistry> finalization_registry,
- Handle<Object> cleanup) {
- DCHECK(cleanup->IsCallable());
- // Attempt to shrink key_map now, as unregister tokens are held weakly and the
- // map is not shrinkable when sweeping dead tokens during GC itself.
- if (!finalization_registry->key_map().IsUndefined(isolate)) {
- Handle<SimpleNumberDictionary> key_map =
- handle(SimpleNumberDictionary::cast(finalization_registry->key_map()),
- isolate);
- key_map = SimpleNumberDictionary::Shrink(isolate, key_map);
- finalization_registry->set_key_map(*key_map);
- }
-
- // It's possible that the cleared_cells list is empty, since
- // FinalizationRegistry.unregister() removed all its elements before this task
- // ran. In that case, don't call the cleanup function.
- if (!finalization_registry->cleared_cells().IsUndefined(isolate)) {
- // Construct the iterator.
- Handle<JSFinalizationRegistryCleanupIterator> iterator;
- {
- Handle<Map> cleanup_iterator_map(
- isolate->native_context()
- ->js_finalization_registry_cleanup_iterator_map(),
- isolate);
- iterator = Handle<JSFinalizationRegistryCleanupIterator>::cast(
- isolate->factory()->NewJSObjectFromMap(
- cleanup_iterator_map, AllocationType::kYoung,
- Handle<AllocationSite>::null()));
- iterator->set_finalization_registry(*finalization_registry);
- }
- Handle<Object> args[] = {iterator};
- if (Execution::Call(
- isolate, cleanup,
- handle(ReadOnlyRoots(isolate).undefined_value(), isolate), 1, args)
- .is_null()) {
- return Nothing<bool>();
+void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
+ Isolate* isolate, Address raw_finalization_registry,
+ Address raw_weak_cell) {
+ DisallowHeapAllocation no_gc;
+ JSFinalizationRegistry finalization_registry =
+ JSFinalizationRegistry::cast(Object(raw_finalization_registry));
+ WeakCell weak_cell = WeakCell::cast(Object(raw_weak_cell));
+ DCHECK(!weak_cell.unregister_token().IsUndefined(isolate));
+
+ // Remove weak_cell from the linked list of other WeakCells with the same
+ // unregister token and remove its unregister token from key_map if necessary
+ // without shrinking it. Since shrinking may allocate, it is performed by the
+ // caller after looping, or on exception.
+ if (weak_cell.key_list_prev().IsUndefined(isolate)) {
+ SimpleNumberDictionary key_map =
+ SimpleNumberDictionary::cast(finalization_registry.key_map());
+ Object unregister_token = weak_cell.unregister_token();
+ uint32_t key = Smi::ToInt(unregister_token.GetHash());
+ InternalIndex entry = key_map.FindEntry(isolate, key);
+ DCHECK(entry.is_found());
+
+ if (weak_cell.key_list_next().IsUndefined(isolate)) {
+ // weak_cell is the only one associated with its key; remove the key
+ // from the hash table.
+ key_map.ClearEntry(entry);
+ key_map.ElementRemoved();
+ } else {
+ // weak_cell is the list head for its key; we need to change the value
+ // of the key in the hash table.
+ WeakCell next = WeakCell::cast(weak_cell.key_list_next());
+ DCHECK_EQ(next.key_list_prev(), weak_cell);
+ next.set_key_list_prev(ReadOnlyRoots(isolate).undefined_value());
+ weak_cell.set_key_list_next(ReadOnlyRoots(isolate).undefined_value());
+ key_map.ValueAtPut(entry, next);
+ }
+ } else {
+ // weak_cell is somewhere in the middle of its key list.
+ WeakCell prev = WeakCell::cast(weak_cell.key_list_prev());
+ prev.set_key_list_next(weak_cell.key_list_next());
+ if (!weak_cell.key_list_next().IsUndefined()) {
+ WeakCell next = WeakCell::cast(weak_cell.key_list_next());
+ next.set_key_list_prev(weak_cell.key_list_prev());
}
- // TODO(marja): (spec): Should the iterator be invalidated after the
- // function returns?
}
- return Just(true);
}
} // namespace internal
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index ef437446fd..a52865c23b 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -181,6 +181,7 @@
// - SourceTextModule
// - SyntheticModule
// - SourceTextModuleInfoEntry
+// - WasmValue
// - FeedbackCell
// - FeedbackVector
// - PreparseData
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index d3250bd92d..cbf3ba373b 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -196,13 +196,6 @@ HeapObject OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
template <class Derived, int entrysize>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
- Isolate* isolate, Handle<Derived> table) {
- return OrderedHashTable<Derived, entrysize>::Rehash(isolate, table,
- table->Capacity());
-}
-
-template <class Derived, int entrysize>
-MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
Isolate* isolate, Handle<Derived> table, int new_capacity) {
DCHECK(!table->IsObsolete());
@@ -257,20 +250,6 @@ MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
new_capacity);
}
-MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(
- Isolate* isolate, Handle<OrderedHashSet> table) {
- return OrderedHashTable<
- OrderedHashSet, OrderedHashSet::kEntrySizeWithoutChain>::Rehash(isolate,
- table);
-}
-
-MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(
- Isolate* isolate, Handle<OrderedHashMap> table) {
- return OrderedHashTable<
- OrderedHashMap, OrderedHashMap::kEntrySizeWithoutChain>::Rehash(isolate,
- table);
-}
-
MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity) {
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 5f3c45a110..b587960432 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -138,7 +138,6 @@ class OrderedHashTable : public FixedArray {
// The extra +1 is for linking the bucket chains together.
static const int kEntrySize = entrysize + 1;
- static const int kEntrySizeWithoutChain = entrysize;
static const int kChainOffset = entrysize;
static const int kNotFound = -1;
@@ -201,8 +200,6 @@ class OrderedHashTable : public FixedArray {
static MaybeHandle<Derived> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
-
- static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table);
static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
int new_capacity);
@@ -247,8 +244,6 @@ class V8_EXPORT_PRIVATE OrderedHashSet
static MaybeHandle<OrderedHashSet> Rehash(Isolate* isolate,
Handle<OrderedHashSet> table,
int new_capacity);
- static MaybeHandle<OrderedHashSet> Rehash(Isolate* isolate,
- Handle<OrderedHashSet> table);
static MaybeHandle<OrderedHashSet> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
@@ -278,8 +273,6 @@ class V8_EXPORT_PRIVATE OrderedHashMap
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity);
- static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
- Handle<OrderedHashMap> table);
Object ValueAt(int entry);
// This takes and returns raw Address values containing tagged Object
diff --git a/deps/v8/src/objects/promise.tq b/deps/v8/src/objects/promise.tq
index 391f742e44..90ef565cad 100644
--- a/deps/v8/src/objects/promise.tq
+++ b/deps/v8/src/objects/promise.tq
@@ -72,6 +72,6 @@ extern class PromiseRejectReactionJobTask extends PromiseReactionJobTask {
extern class PromiseResolveThenableJobTask extends Microtask {
context: Context;
promise_to_resolve: JSPromise;
- then: JSReceiver;
thenable: JSReceiver;
+ then: JSReceiver;
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index b9fb7ea4f9..d5876de2e0 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -22,6 +22,10 @@ namespace internal {
// Script describes a script which has been added to the VM.
class Script : public Struct {
public:
+ // Script ID used for temporary scripts, which shouldn't be added to the
+ // script list.
+ static constexpr int kTemporaryScriptId = -2;
+
NEVER_READ_ONLY_SPACE
// Script types.
enum Type {
@@ -197,7 +201,7 @@ class Script : public Struct {
// that matches the function literal. Return empty handle if not found.
template <typename LocalIsolate>
MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
- LocalIsolate* isolate, const FunctionLiteral* fun);
+ LocalIsolate* isolate, int function_literal_id);
// Iterate over all script objects on the heap.
class V8_EXPORT_PRIVATE Iterator {
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index af1b685ca8..169e3c0c15 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
#define V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
+#include "src/base/macros.h"
#include "src/objects/shared-function-info.h"
#include "src/handles/handles-inl.h"
@@ -250,6 +251,7 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
}
FunctionKind SharedFunctionInfo::kind() const {
+ STATIC_ASSERT(FunctionKindBits::kSize == kFunctionKindBitSize);
return FunctionKindBits::decode(flags());
}
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index f54df2b7ca..f1b0c9c2c4 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -583,6 +583,16 @@ Handle<JSModuleNamespace> SourceTextModule::GetModuleNamespace(
return Module::GetModuleNamespace(isolate, requested_module);
}
+Handle<JSObject> SourceTextModule::GetImportMeta(
+ Isolate* isolate, Handle<SourceTextModule> module) {
+ Handle<HeapObject> import_meta(module->import_meta(), isolate);
+ if (import_meta->IsTheHole(isolate)) {
+ import_meta = isolate->RunHostInitializeImportMetaObjectCallback(module);
+ module->set_import_meta(*import_meta);
+ }
+ return Handle<JSObject>::cast(import_meta);
+}
+
MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
Isolate* isolate, Handle<SourceTextModule> module) {
// In the event of errored evaluation, return a rejected promise.
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index 57c84d833d..7e64668a7e 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -59,6 +59,11 @@ class SourceTextModule
static Handle<JSModuleNamespace> GetModuleNamespace(
Isolate* isolate, Handle<SourceTextModule> module, int module_request);
+ // Get the import.meta object of [module]. If it doesn't exist yet, it is
+ // created and passed to the embedder callback for initialization.
+ V8_EXPORT_PRIVATE static Handle<JSObject> GetImportMeta(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
using BodyDescriptor =
SubclassBodyDescriptor<Module::BodyDescriptor,
FixedBodyDescriptor<kCodeOffset, kSize, kSize>>;
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 5dd460a959..78f859b78c 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -5,8 +5,9 @@
#ifndef V8_OBJECTS_STRING_INL_H_
#define V8_OBJECTS_STRING_INL_H_
-#include "src/objects/string.h"
-
+#include "src/common/external-pointer-inl.h"
+#include "src/common/external-pointer.h"
+#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/numbers/conversions-inl.h"
@@ -14,6 +15,7 @@
#include "src/objects/name-inl.h"
#include "src/objects/smi-inl.h"
#include "src/objects/string-table-inl.h"
+#include "src/objects/string.h"
#include "src/strings/string-hasher-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -569,12 +571,13 @@ void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
WriteField<uint16_t>(kHeaderSize + index * kShortSize, value);
}
-int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
+// Due to ThinString rewriting, concurrent visitors need to read the length with
+// acquire semantics.
+inline int SeqOneByteString::AllocatedSize() {
+ return SizeFor(synchronized_length());
}
-
-int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
+inline int SeqTwoByteString::AllocatedSize() {
+ return SizeFor(synchronized_length());
}
void SlicedString::set_parent(String parent, WriteBarrierMode mode) {
@@ -599,54 +602,71 @@ bool ExternalString::is_uncached() const {
return (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
-Address ExternalString::resource_as_address() {
- return ReadField<Address>(kResourceOffset);
+DEF_GETTER(ExternalString, resource_as_address, Address) {
+ ExternalPointer_t encoded_address =
+ ReadField<ExternalPointer_t>(kResourceOffset);
+ return DecodeExternalPointer(isolate, encoded_address);
}
-void ExternalString::set_address_as_resource(Address address) {
- WriteField<Address>(kResourceOffset, address);
+void ExternalString::set_address_as_resource(Isolate* isolate,
+ Address address) {
+ const ExternalPointer_t encoded_address =
+ EncodeExternalPointer(isolate, address);
+ WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
if (IsExternalOneByteString()) {
- ExternalOneByteString::cast(*this).update_data_cache();
+ ExternalOneByteString::cast(*this).update_data_cache(isolate);
} else {
- ExternalTwoByteString::cast(*this).update_data_cache();
+ ExternalTwoByteString::cast(*this).update_data_cache(isolate);
}
}
uint32_t ExternalString::resource_as_uint32() {
- return static_cast<uint32_t>(ReadField<Address>(kResourceOffset));
+ ExternalPointer_t encoded_address =
+ ReadField<ExternalPointer_t>(kResourceOffset);
+ return static_cast<uint32_t>(encoded_address);
}
-void ExternalString::set_uint32_as_resource(uint32_t value) {
- WriteField<Address>(kResourceOffset, value);
+void ExternalString::set_uint32_as_resource(Isolate* isolate, uint32_t value) {
+ WriteField<ExternalPointer_t>(kResourceOffset, value);
if (is_uncached()) return;
- WriteField<Address>(kResourceDataOffset, kNullAddress);
+ WriteField<ExternalPointer_t>(kResourceDataOffset,
+ EncodeExternalPointer(isolate, kNullAddress));
}
-void ExternalString::DisposeResource() {
+void ExternalString::DisposeResource(Isolate* isolate) {
+ const ExternalPointer_t encoded_address =
+ ReadField<ExternalPointer_t>(kResourceOffset);
v8::String::ExternalStringResourceBase* resource =
reinterpret_cast<v8::String::ExternalStringResourceBase*>(
- ReadField<Address>(ExternalString::kResourceOffset));
+ DecodeExternalPointer(isolate, encoded_address));
// Dispose of the C++ object if it has not already been disposed.
if (resource != nullptr) {
resource->Dispose();
- WriteField<Address>(ExternalString::kResourceOffset, kNullAddress);
+ const ExternalPointer_t encoded_address =
+ EncodeExternalPointer(isolate, kNullAddress);
+ WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
}
}
-const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
- return reinterpret_cast<Resource*>(ReadField<Address>(kResourceOffset));
+DEF_GETTER(ExternalOneByteString, resource,
+ const ExternalOneByteString::Resource*) {
+ const ExternalPointer_t encoded_address =
+ ReadField<ExternalPointer_t>(kResourceOffset);
+ return reinterpret_cast<Resource*>(
+ DecodeExternalPointer(isolate, encoded_address));
}
-void ExternalOneByteString::update_data_cache() {
+void ExternalOneByteString::update_data_cache(Isolate* isolate) {
if (is_uncached()) return;
- WriteField<Address>(kResourceDataOffset,
- reinterpret_cast<Address>(resource()->data()));
+ const ExternalPointer_t encoded_resource_data = EncodeExternalPointer(
+ isolate, reinterpret_cast<Address>(resource()->data()));
+ WriteField<ExternalPointer_t>(kResourceDataOffset, encoded_resource_data);
}
void ExternalOneByteString::SetResource(
Isolate* isolate, const ExternalOneByteString::Resource* resource) {
- set_resource(resource);
+ set_resource(isolate, resource);
size_t new_payload = resource == nullptr ? 0 : resource->length();
if (new_payload > 0) {
isolate->heap()->UpdateExternalString(*this, 0, new_payload);
@@ -654,9 +674,11 @@ void ExternalOneByteString::SetResource(
}
void ExternalOneByteString::set_resource(
- const ExternalOneByteString::Resource* resource) {
- WriteField<Address>(kResourceOffset, reinterpret_cast<Address>(resource));
- if (resource != nullptr) update_data_cache();
+ Isolate* isolate, const ExternalOneByteString::Resource* resource) {
+ const ExternalPointer_t encoded_address =
+ EncodeExternalPointer(isolate, reinterpret_cast<Address>(resource));
+ WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ if (resource != nullptr) update_data_cache(isolate);
}
const uint8_t* ExternalOneByteString::GetChars() {
@@ -668,19 +690,24 @@ uint8_t ExternalOneByteString::Get(int index) {
return GetChars()[index];
}
-const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return reinterpret_cast<Resource*>(ReadField<Address>(kResourceOffset));
+DEF_GETTER(ExternalTwoByteString, resource,
+ const ExternalTwoByteString::Resource*) {
+ const ExternalPointer_t encoded_address =
+ ReadField<ExternalPointer_t>(kResourceOffset);
+ return reinterpret_cast<Resource*>(
+ DecodeExternalPointer(isolate, encoded_address));
}
-void ExternalTwoByteString::update_data_cache() {
+void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
if (is_uncached()) return;
- WriteField<Address>(kResourceDataOffset,
- reinterpret_cast<Address>(resource()->data()));
+ const ExternalPointer_t encoded_resource_data = EncodeExternalPointer(
+ isolate, reinterpret_cast<Address>(resource()->data()));
+ WriteField<ExternalPointer_t>(kResourceDataOffset, encoded_resource_data);
}
void ExternalTwoByteString::SetResource(
Isolate* isolate, const ExternalTwoByteString::Resource* resource) {
- set_resource(resource);
+ set_resource(isolate, resource);
size_t new_payload = resource == nullptr ? 0 : resource->length() * 2;
if (new_payload > 0) {
isolate->heap()->UpdateExternalString(*this, 0, new_payload);
@@ -688,9 +715,11 @@ void ExternalTwoByteString::SetResource(
}
void ExternalTwoByteString::set_resource(
- const ExternalTwoByteString::Resource* resource) {
- WriteField<Address>(kResourceOffset, reinterpret_cast<Address>(resource));
- if (resource != nullptr) update_data_cache();
+ Isolate* isolate, const ExternalTwoByteString::Resource* resource) {
+ const ExternalPointer_t encoded_address =
+ EncodeExternalPointer(isolate, reinterpret_cast<Address>(resource));
+ WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ if (resource != nullptr) update_data_cache(isolate);
}
const uint16_t* ExternalTwoByteString::GetChars() { return resource()->data(); }
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 54af9a4452..76f29a01e3 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -16,7 +16,7 @@ namespace internal {
class StringTableKey {
public:
- virtual ~StringTableKey() {}
+ virtual ~StringTableKey() = default;
inline StringTableKey(uint32_t hash_field, int length);
virtual Handle<String> AsHandle(Isolate* isolate) = 0;
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 8c96900bbe..90abd00eba 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -6,7 +6,8 @@
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
-#include "src/heap/heap-inl.h" // For LooksValid implementation.
+#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/numbers/conversions.h"
#include "src/objects/map.h"
@@ -1113,13 +1114,8 @@ MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
isolate, capture,
match->GetNamedCapture(capture_name, &capture_state), String);
- switch (capture_state) {
- case CaptureState::INVALID:
- case CaptureState::UNMATCHED:
- break;
- case CaptureState::MATCHED:
- builder.AppendString(capture);
- break;
+ if (capture_state == CaptureState::MATCHED) {
+ builder.AppendString(capture);
}
continue_from_ix = closing_bracket_ix + 1;
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 616a4b967b..f9f92a53dd 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -259,9 +259,9 @@ class String : public TorqueGeneratedString<String, Name> {
virtual Handle<String> GetPrefix() = 0;
virtual Handle<String> GetSuffix() = 0;
- // A named capture can be invalid (if it is not specified in the pattern),
- // unmatched (specified but not matched in the current string), and matched.
- enum CaptureState { INVALID, UNMATCHED, MATCHED };
+ // A named capture can be unmatched (either not specified in the pattern,
+ // or specified but unmatched in the current string), or matched.
+ enum CaptureState { UNMATCHED, MATCHED };
virtual int CaptureCount() = 0;
virtual bool HasNamedCaptures() = 0;
@@ -558,16 +558,13 @@ class SeqOneByteString
// instance.
inline int SeqOneByteStringSize(InstanceType instance_type);
- // Computes the size for an OneByteString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
- }
-
// Maximal memory usage for a single sequential one-byte string.
static const int kMaxCharsSize = kMaxLength;
static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
+ int AllocatedSize();
+
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(SeqOneByteString)
@@ -599,17 +596,14 @@ class SeqTwoByteString
// instance.
inline int SeqTwoByteStringSize(InstanceType instance_type);
- // Computes the size for a TwoByteString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
- }
-
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxCharsSize = kMaxLength * 2;
static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize) / sizeof(uint16_t)) >=
String::kMaxLength);
+ int AllocatedSize();
+
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(SeqTwoByteString)
@@ -639,7 +633,7 @@ class ConsString : public TorqueGeneratedConsString<ConsString, String> {
// Minimum length for a cons string.
static const int kMinLength = 13;
- using BodyDescriptor = FixedBodyDescriptor<kFirstOffset, kSize, kSize>;
+ class BodyDescriptor;
DECL_VERIFIER(ConsString)
@@ -661,7 +655,7 @@ class ThinString : public TorqueGeneratedThinString<ThinString, String> {
DECL_VERIFIER(ThinString)
- using BodyDescriptor = FixedBodyDescriptor<kActualOffset, kSize, kSize>;
+ class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(ThinString)
};
@@ -688,7 +682,7 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
// Minimum length for a sliced string.
static const int kMinLength = 13;
- using BodyDescriptor = FixedBodyDescriptor<kParentOffset, kSize, kSize>;
+ class BodyDescriptor;
DECL_VERIFIER(SlicedString)
@@ -722,13 +716,13 @@ class ExternalString : public String {
int ExternalPayloadSize() const;
// Used in the serializer/deserializer.
- inline Address resource_as_address();
- inline void set_address_as_resource(Address address);
+ DECL_GETTER(resource_as_address, Address)
+ inline void set_address_as_resource(Isolate* isolate, Address address);
inline uint32_t resource_as_uint32();
- inline void set_uint32_as_resource(uint32_t value);
+ inline void set_uint32_as_resource(Isolate* isolate, uint32_t value);
// Disposes string's resource object if it has not already been disposed.
- inline void DisposeResource();
+ inline void DisposeResource(Isolate* isolate);
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
static const int kSizeOfAllExternalStrings = kHeaderSize;
@@ -745,19 +739,19 @@ class ExternalOneByteString : public ExternalString {
using Resource = v8::String::ExternalOneByteStringResource;
// The underlying resource.
- inline const Resource* resource();
+ DECL_GETTER(resource, const Resource*)
// It is assumed that the previous resource is null. If it is not null, then
// it is the responsability of the caller the handle the previous resource.
inline void SetResource(Isolate* isolate, const Resource* buffer);
// Used only during serialization.
- inline void set_resource(const Resource* buffer);
+ inline void set_resource(Isolate* isolate, const Resource* buffer);
// Update the pointer cache to the external character array.
// The cached pointer is always valid, as the external character array does =
// not move during lifetime. Deserialization is the only exception, after
// which the pointer cache has to be refreshed.
- inline void update_data_cache();
+ inline void update_data_cache(Isolate* isolate);
inline const uint8_t* GetChars();
@@ -786,19 +780,19 @@ class ExternalTwoByteString : public ExternalString {
using Resource = v8::String::ExternalStringResource;
// The underlying string resource.
- inline const Resource* resource();
+ DECL_GETTER(resource, const Resource*)
// It is assumed that the previous resource is null. If it is not null, then
// it is the responsability of the caller the handle the previous resource.
inline void SetResource(Isolate* isolate, const Resource* buffer);
// Used only during serialization.
- inline void set_resource(const Resource* buffer);
+ inline void set_resource(Isolate* isolate, const Resource* buffer);
// Update the pointer cache to the external character array.
// The cached pointer is always valid, as the external character array does =
// not move during lifetime. Deserialization is the only exception, after
// which the pointer cache has to be refreshed.
- inline void update_data_cache();
+ inline void update_data_cache(Isolate* isolate);
inline const uint16_t* GetChars();
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index b8d9714b36..7d3f250964 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -10,15 +10,17 @@ extern class String extends Name {
}
@generateCppClass
+@generateBodyDescriptor
extern class ConsString extends String {
first: String;
second: String;
}
@abstract
+@generateBodyDescriptor
extern class ExternalString extends String {
- resource: RawPtr;
- resource_data: RawPtr;
+ resource: ExternalPointer;
+ resource_data: ExternalPointer;
}
extern class ExternalOneByteString extends ExternalString {}
@@ -28,27 +30,30 @@ extern class ExternalTwoByteString extends ExternalString {}
extern class InternalizedString extends String {
}
-// TODO(v8:8983): Add declaration for variable-sized region.
@abstract
@generateCppClass
extern class SeqString extends String {
}
@generateCppClass
+@generateBodyDescriptor
extern class SeqOneByteString extends SeqString {
chars[length]: char8;
}
@generateCppClass
+@generateBodyDescriptor
extern class SeqTwoByteString extends SeqString {
chars[length]: char16;
}
@generateCppClass
+@generateBodyDescriptor
extern class SlicedString extends String {
parent: String;
offset: Smi;
}
@generateCppClass
+@generateBodyDescriptor
extern class ThinString extends String {
actual: String;
}
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index c302fdb610..20bbb1ff1c 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -16,29 +16,29 @@ namespace internal {
class CodeDataContainer;
-#define ROOT_ID_LIST(V) \
- V(kStringTable, "(Internalized strings)") \
- V(kExternalStringsTable, "(External strings)") \
- V(kReadOnlyRootList, "(Read-only roots)") \
- V(kStrongRootList, "(Strong roots)") \
- V(kSmiRootList, "(Smi roots)") \
- V(kBootstrapper, "(Bootstrapper)") \
- V(kTop, "(Isolate)") \
- V(kRelocatable, "(Relocatable)") \
- V(kDebug, "(Debugger)") \
- V(kCompilationCache, "(Compilation cache)") \
- V(kHandleScope, "(Handle scope)") \
- V(kBuiltins, "(Builtins)") \
- V(kGlobalHandles, "(Global handles)") \
- V(kEternalHandles, "(Eternal handles)") \
- V(kThreadManager, "(Thread manager)") \
- V(kStrongRoots, "(Strong roots)") \
- V(kExtensions, "(Extensions)") \
- V(kCodeFlusher, "(Code flusher)") \
- V(kPartialSnapshotCache, "(Partial snapshot cache)") \
- V(kReadOnlyObjectCache, "(Read-only object cache)") \
- V(kWeakCollections, "(Weak collections)") \
- V(kWrapperTracing, "(Wrapper tracing)") \
+#define ROOT_ID_LIST(V) \
+ V(kStringTable, "(Internalized strings)") \
+ V(kExternalStringsTable, "(External strings)") \
+ V(kReadOnlyRootList, "(Read-only roots)") \
+ V(kStrongRootList, "(Strong roots)") \
+ V(kSmiRootList, "(Smi roots)") \
+ V(kBootstrapper, "(Bootstrapper)") \
+ V(kTop, "(Isolate)") \
+ V(kRelocatable, "(Relocatable)") \
+ V(kDebug, "(Debugger)") \
+ V(kCompilationCache, "(Compilation cache)") \
+ V(kHandleScope, "(Handle scope)") \
+ V(kBuiltins, "(Builtins)") \
+ V(kGlobalHandles, "(Global handles)") \
+ V(kEternalHandles, "(Eternal handles)") \
+ V(kThreadManager, "(Thread manager)") \
+ V(kStrongRoots, "(Strong roots)") \
+ V(kExtensions, "(Extensions)") \
+ V(kCodeFlusher, "(Code flusher)") \
+ V(kStartupObjectCache, "(Startup object cache)") \
+ V(kReadOnlyObjectCache, "(Read-only object cache)") \
+ V(kWeakCollections, "(Weak collections)") \
+ V(kWrapperTracing, "(Wrapper tracing)") \
V(kUnknown, "(Unknown)")
class VisitorSynchronization : public AllStatic {
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index 6dd55a25c2..9cb04003ab 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -53,15 +53,36 @@ class ExpressionScope {
AsExpressionParsingScope()->TrackVariable(result);
} else {
Variable* var = Declare(name, pos);
- if (IsVarDeclaration() && !parser()->scope()->is_declaration_scope()) {
- // Make sure we'll properly resolve the variable since we might be in a
- // with or catch scope. In those cases the proxy isn't guaranteed to
- // refer to the declared variable, so consider it unresolved.
- parser()->scope()->AddUnresolved(result);
- } else {
- DCHECK_NOT_NULL(var);
- result->BindTo(var);
+ if (IsVarDeclaration()) {
+ bool passed_through_with = false;
+ for (Scope* scope = parser()->scope(); !scope->is_declaration_scope();
+ scope = scope->outer_scope()) {
+ if (scope->is_with_scope()) {
+ passed_through_with = true;
+ } else if (scope->is_catch_scope()) {
+ Variable* var = scope->LookupLocal(name);
+ // If a variable is declared in a catch scope with a masking
+ // catch-declared variable, the initializing assignment is an
+ // assignment to the catch-declared variable instead.
+ // https://tc39.es/ecma262/#sec-variablestatements-in-catch-blocks
+ if (var != nullptr) {
+ result->set_is_assigned();
+ if (passed_through_with) break;
+ result->BindTo(var);
+ var->SetMaybeAssigned();
+ return result;
+ }
+ }
+ }
+ if (passed_through_with) {
+ // If a variable is declared in a with scope, the initializing
+ // assignment might target a with-declared variable instead.
+ parser()->scope()->AddUnresolved(result);
+ return result;
+ }
}
+ DCHECK_NOT_NULL(var);
+ result->BindTo(var);
}
return result;
}
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 3723636804..37432e05b7 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -20,64 +20,108 @@
namespace v8 {
namespace internal {
-ParseInfo::ParseInfo(AccountingAllocator* zone_allocator, int script_id)
- : zone_(std::make_unique<Zone>(zone_allocator, ZONE_NAME)),
- flags_(0),
- extension_(nullptr),
- script_scope_(nullptr),
- stack_limit_(0),
- hash_seed_(0),
- function_kind_(FunctionKind::kNormalFunction),
- function_syntax_kind_(FunctionSyntaxKind::kDeclaration),
+UnoptimizedCompileFlags::UnoptimizedCompileFlags(Isolate* isolate,
+ int script_id)
+ : flags_(0),
script_id_(script_id),
- start_position_(0),
- end_position_(0),
- parameters_end_pos_(kNoSourcePosition),
- function_literal_id_(kFunctionLiteralIdInvalid),
- max_function_literal_id_(kFunctionLiteralIdInvalid),
- character_stream_(nullptr),
- ast_value_factory_(nullptr),
- ast_string_constants_(nullptr),
- function_name_(nullptr),
- runtime_call_stats_(nullptr),
- source_range_map_(nullptr),
- literal_(nullptr) {}
-
-ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator,
- int script_id)
- : ParseInfo(zone_allocator, script_id) {
- set_hash_seed(HashSeed(isolate));
- set_stack_limit(isolate->stack_guard()->real_climit());
- set_runtime_call_stats(isolate->counters()->runtime_call_stats());
- set_logger(isolate->logger());
- set_ast_string_constants(isolate->ast_string_constants());
- set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
- isolate->NeedsDetailedOptimizedCodeLineInfo());
- if (!isolate->is_best_effort_code_coverage()) set_coverage_enabled();
- if (isolate->is_block_code_coverage()) set_block_coverage_enabled();
- if (isolate->is_collecting_type_profile()) set_collect_type_profile();
- if (isolate->compiler_dispatcher()->IsEnabled()) {
- parallel_tasks_.reset(new ParallelTasks(isolate->compiler_dispatcher()));
- }
+ function_kind_(FunctionKind::kNormalFunction),
+ function_syntax_kind_(FunctionSyntaxKind::kDeclaration) {
+ set_collect_type_profile(isolate->is_collecting_type_profile());
+ set_coverage_enabled(!isolate->is_best_effort_code_coverage());
+ set_block_coverage_enabled(isolate->is_block_code_coverage());
set_might_always_opt(FLAG_always_opt || FLAG_prepare_always_opt);
- set_allow_lazy_compile(FLAG_lazy);
set_allow_natives_syntax(FLAG_allow_natives_syntax);
+ set_allow_lazy_compile(FLAG_lazy);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
- set_allow_harmony_optional_chaining(FLAG_harmony_optional_chaining);
- set_allow_harmony_nullish(FLAG_harmony_nullish);
set_allow_harmony_private_methods(FLAG_harmony_private_methods);
+ set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
+ isolate->NeedsDetailedOptimizedCodeLineInfo());
set_allow_harmony_top_level_await(FLAG_harmony_top_level_await);
+ set_allow_harmony_logical_assignment(FLAG_harmony_logical_assignment);
+}
+
+// static
+UnoptimizedCompileFlags UnoptimizedCompileFlags::ForFunctionCompile(
+ Isolate* isolate, SharedFunctionInfo shared) {
+ Script script = Script::cast(shared.script());
+
+ UnoptimizedCompileFlags flags(isolate, script.id());
+
+ flags.SetFlagsFromFunction(&shared);
+ flags.SetFlagsForFunctionFromScript(script);
+
+ flags.set_allow_lazy_parsing(true);
+ flags.set_is_asm_wasm_broken(shared.is_asm_wasm_broken());
+ flags.set_is_repl_mode(shared.is_repl_mode());
+
+ // CollectTypeProfile uses its own feedback slots. If we have existing
+ // FeedbackMetadata, we can only collect type profile if the feedback vector
+ // has the appropriate slots.
+ flags.set_collect_type_profile(
+ isolate->is_collecting_type_profile() &&
+ (shared.HasFeedbackMetadata()
+ ? shared.feedback_metadata().HasTypeProfileSlot()
+ : script.IsUserJavaScript()));
+
+ // Do not support re-parsing top-level function of a wrapped script.
+ DCHECK_IMPLIES(flags.is_toplevel(), !script.is_wrapped());
+
+ return flags;
+}
+
+// static
+UnoptimizedCompileFlags UnoptimizedCompileFlags::ForScriptCompile(
+ Isolate* isolate, Script script) {
+ UnoptimizedCompileFlags flags(isolate, script.id());
+
+ flags.SetFlagsForFunctionFromScript(script);
+ flags.SetFlagsForToplevelCompile(
+ isolate->is_collecting_type_profile(), script.IsUserJavaScript(),
+ flags.outer_language_mode(), construct_repl_mode(script.is_repl_mode()));
+ if (script.is_wrapped()) {
+ flags.set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
+ }
+
+ return flags;
}
-ParseInfo::ParseInfo(Isolate* isolate)
- : ParseInfo(isolate, isolate->allocator(), isolate->GetNextScriptId()) {
- LOG(isolate, ScriptEvent(Logger::ScriptEventType::kReserveId, script_id()));
+// static
+UnoptimizedCompileFlags UnoptimizedCompileFlags::ForToplevelCompile(
+ Isolate* isolate, bool is_user_javascript, LanguageMode language_mode,
+ REPLMode repl_mode) {
+ UnoptimizedCompileFlags flags(isolate, isolate->GetNextScriptId());
+ flags.SetFlagsForToplevelCompile(isolate->is_collecting_type_profile(),
+ is_user_javascript, language_mode,
+ repl_mode);
+
+ LOG(isolate,
+ ScriptEvent(Logger::ScriptEventType::kReserveId, flags.script_id()));
+ return flags;
+}
+
+// static
+UnoptimizedCompileFlags UnoptimizedCompileFlags::ForToplevelFunction(
+ const UnoptimizedCompileFlags toplevel_flags,
+ const FunctionLiteral* literal) {
+ DCHECK(toplevel_flags.is_toplevel());
+ DCHECK(!literal->is_toplevel());
+
+ // Replicate the toplevel flags, then setup the function-specific flags.
+ UnoptimizedCompileFlags flags = toplevel_flags;
+ flags.SetFlagsFromFunction(literal);
+
+ return flags;
+}
+
+// static
+UnoptimizedCompileFlags UnoptimizedCompileFlags::ForTest(Isolate* isolate) {
+ return UnoptimizedCompileFlags(isolate, Script::kTemporaryScriptId);
}
template <typename T>
-void ParseInfo::SetFunctionInfo(T function) {
- set_language_mode(function->language_mode());
+void UnoptimizedCompileFlags::SetFlagsFromFunction(T function) {
+ set_outer_language_mode(function->language_mode());
set_function_kind(function->kind());
set_function_syntax_kind(function->syntax_kind());
set_requires_instance_members_initializer(
@@ -85,63 +129,88 @@ void ParseInfo::SetFunctionInfo(T function) {
set_class_scope_has_private_brand(function->class_scope_has_private_brand());
set_has_static_private_methods_or_accessors(
function->has_static_private_methods_or_accessors());
- set_toplevel(function->is_toplevel());
+ set_is_toplevel(function->is_toplevel());
set_is_oneshot_iife(function->is_oneshot_iife());
}
-ParseInfo::ParseInfo(Isolate* isolate, SharedFunctionInfo shared)
- : ParseInfo(isolate, isolate->allocator(),
- Script::cast(shared.script()).id()) {
- // Do not support re-parsing top-level function of a wrapped script.
- // TODO(yangguo): consider whether we need a top-level function in a
- // wrapped script at all.
- DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared.script()).is_wrapped());
-
+void UnoptimizedCompileFlags::SetFlagsForToplevelCompile(
+ bool is_collecting_type_profile, bool is_user_javascript,
+ LanguageMode language_mode, REPLMode repl_mode) {
set_allow_lazy_parsing(true);
- set_asm_wasm_broken(shared.is_asm_wasm_broken());
+ set_is_toplevel(true);
+ set_collect_type_profile(is_user_javascript && is_collecting_type_profile);
+ set_outer_language_mode(
+ stricter_language_mode(outer_language_mode(), language_mode));
+ set_is_repl_mode((repl_mode == REPLMode::kYes));
- set_start_position(shared.StartPosition());
- set_end_position(shared.EndPosition());
- function_literal_id_ = shared.function_literal_id();
- SetFunctionInfo(&shared);
+ set_block_coverage_enabled(block_coverage_enabled() && is_user_javascript);
+}
- Script script = Script::cast(shared.script());
- SetFlagsForFunctionFromScript(script);
+void UnoptimizedCompileFlags::SetFlagsForFunctionFromScript(Script script) {
+ DCHECK_EQ(script_id(), script.id());
- set_repl_mode(shared.is_repl_mode());
+ set_is_eval(script.compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ set_is_module(script.origin_options().IsModule());
+ DCHECK(!(is_eval() && is_module()));
- // CollectTypeProfile uses its own feedback slots. If we have existing
- // FeedbackMetadata, we can only collect type profile if the feedback vector
- // has the appropriate slots.
- set_collect_type_profile(
- isolate->is_collecting_type_profile() &&
- (shared.HasFeedbackMetadata()
- ? shared.feedback_metadata().HasTypeProfileSlot()
- : script.IsUserJavaScript()));
+ set_block_coverage_enabled(block_coverage_enabled() &&
+ script.IsUserJavaScript());
}
-ParseInfo::ParseInfo(Isolate* isolate, Script script)
- : ParseInfo(isolate, isolate->allocator(), script.id()) {
- SetFlagsForToplevelCompileFromScript(isolate, script,
- isolate->is_collecting_type_profile());
+UnoptimizedCompileState::UnoptimizedCompileState(Isolate* isolate)
+ : hash_seed_(HashSeed(isolate)),
+ allocator_(isolate->allocator()),
+ ast_string_constants_(isolate->ast_string_constants()),
+ logger_(isolate->logger()),
+ parallel_tasks_(isolate->compiler_dispatcher()->IsEnabled()
+ ? new ParallelTasks(isolate->compiler_dispatcher())
+ : nullptr) {}
+
+UnoptimizedCompileState::UnoptimizedCompileState(
+ const UnoptimizedCompileState& other) V8_NOEXCEPT
+ : hash_seed_(other.hash_seed()),
+ allocator_(other.allocator()),
+ ast_string_constants_(other.ast_string_constants()),
+ logger_(other.logger()),
+ // TODO(leszeks): Should this create a new ParallelTasks instance?
+ parallel_tasks_(nullptr) {}
+
+ParseInfo::ParseInfo(const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* state)
+ : flags_(flags),
+ state_(state),
+ zone_(std::make_unique<Zone>(state->allocator(), ZONE_NAME)),
+ extension_(nullptr),
+ script_scope_(nullptr),
+ stack_limit_(0),
+ parameters_end_pos_(kNoSourcePosition),
+ max_function_literal_id_(kFunctionLiteralIdInvalid),
+ character_stream_(nullptr),
+ ast_value_factory_(nullptr),
+ function_name_(nullptr),
+ runtime_call_stats_(nullptr),
+ source_range_map_(nullptr),
+ literal_(nullptr),
+ allow_eval_cache_(false),
+ contains_asm_module_(false),
+ language_mode_(flags.outer_language_mode()) {
+ if (flags.block_coverage_enabled()) {
+ AllocateSourceRangeMap();
+ }
+}
+
+ParseInfo::ParseInfo(Isolate* isolate, const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* state)
+ : ParseInfo(flags, state) {
+ SetPerThreadState(isolate->stack_guard()->real_climit(),
+ isolate->counters()->runtime_call_stats());
}
// static
-std::unique_ptr<ParseInfo> ParseInfo::FromParent(
- const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator,
+std::unique_ptr<ParseInfo> ParseInfo::ForToplevelFunction(
+ const UnoptimizedCompileFlags flags, UnoptimizedCompileState* compile_state,
const FunctionLiteral* literal, const AstRawString* function_name) {
- // Can't use make_unique because the constructor is private.
- std::unique_ptr<ParseInfo> result(
- new ParseInfo(zone_allocator, outer_parse_info->script_id_));
-
- // Replicate shared state of the outer_parse_info.
- result->flags_ = outer_parse_info->flags_;
- result->set_logger(outer_parse_info->logger());
- result->set_ast_string_constants(outer_parse_info->ast_string_constants());
- result->set_hash_seed(outer_parse_info->hash_seed());
-
- DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
- DCHECK_NULL(outer_parse_info->extension());
+ std::unique_ptr<ParseInfo> result(new ParseInfo(flags, compile_state));
// Clone the function_name AstRawString into the ParseInfo's own
// AstValueFactory.
@@ -152,10 +221,6 @@ std::unique_ptr<ParseInfo> ParseInfo::FromParent(
// Setup function specific details.
DCHECK(!literal->is_toplevel());
result->set_function_name(cloned_function_name);
- result->set_start_position(literal->start_position());
- result->set_end_position(literal->end_position());
- result->set_function_literal_id(literal->function_literal_id());
- result->SetFunctionInfo(literal);
return result;
}
@@ -165,17 +230,15 @@ ParseInfo::~ParseInfo() = default;
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
template <typename LocalIsolate>
-Handle<Script> ParseInfo::CreateScript(LocalIsolate* isolate,
- Handle<String> source,
- ScriptOriginOptions origin_options,
- NativesFlag natives) {
+Handle<Script> ParseInfo::CreateScript(
+ LocalIsolate* isolate, Handle<String> source,
+ MaybeHandle<FixedArray> maybe_wrapped_arguments,
+ ScriptOriginOptions origin_options, NativesFlag natives) {
// Create a script object describing the script to be compiled.
- DCHECK_GE(script_id_, 0);
+ DCHECK(flags().script_id() >= 0 ||
+ flags().script_id() == Script::kTemporaryScriptId);
Handle<Script> script =
- isolate->factory()->NewScriptWithId(source, script_id_);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(isolate, script);
- }
+ isolate->factory()->NewScriptWithId(source, flags().script_id());
switch (natives) {
case EXTENSION_CODE:
script->set_type(Script::TYPE_EXTENSION);
@@ -187,8 +250,12 @@ Handle<Script> ParseInfo::CreateScript(LocalIsolate* isolate,
break;
}
script->set_origin_options(origin_options);
- script->set_is_repl_mode(is_repl_mode());
- if (is_eval() && !is_wrapped_as_function()) {
+ script->set_is_repl_mode(flags().is_repl_mode());
+
+ DCHECK_EQ(is_wrapped_as_function(), !maybe_wrapped_arguments.is_null());
+ if (is_wrapped_as_function()) {
+ script->set_wrapped_arguments(*maybe_wrapped_arguments.ToHandleChecked());
+ } else if (flags().is_eval()) {
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
}
@@ -198,15 +265,15 @@ Handle<Script> ParseInfo::CreateScript(LocalIsolate* isolate,
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<Script> ParseInfo::CreateScript(Isolate* isolate,
- Handle<String> source,
- ScriptOriginOptions origin_options,
- NativesFlag natives);
+ Handle<Script> ParseInfo::CreateScript(
+ Isolate* isolate, Handle<String> source,
+ MaybeHandle<FixedArray> maybe_wrapped_arguments,
+ ScriptOriginOptions origin_options, NativesFlag natives);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Handle<Script> ParseInfo::CreateScript(OffThreadIsolate* isolate,
- Handle<String> source,
- ScriptOriginOptions origin_options,
- NativesFlag natives);
+ Handle<Script> ParseInfo::CreateScript(
+ OffThreadIsolate* isolate, Handle<String> source,
+ MaybeHandle<FixedArray> maybe_wrapped_arguments,
+ ScriptOriginOptions origin_options, NativesFlag natives);
AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
if (!ast_value_factory_.get()) {
@@ -217,7 +284,7 @@ AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
}
void ParseInfo::AllocateSourceRangeMap() {
- DCHECK(block_coverage_enabled());
+ DCHECK(flags().block_coverage_enabled());
DCHECK_NULL(source_range_map());
set_source_range_map(new (zone()) SourceRangeMap(zone()));
}
@@ -230,75 +297,34 @@ void ParseInfo::set_character_stream(
character_stream_.swap(character_stream);
}
-void ParseInfo::SetFlagsForToplevelCompile(bool is_collecting_type_profile,
- bool is_user_javascript,
- LanguageMode language_mode,
- REPLMode repl_mode) {
- set_allow_lazy_parsing();
- set_toplevel();
- set_collect_type_profile(is_user_javascript && is_collecting_type_profile);
- set_language_mode(
- stricter_language_mode(this->language_mode(), language_mode));
- set_repl_mode(repl_mode == REPLMode::kYes);
-
- if (V8_UNLIKELY(is_user_javascript && block_coverage_enabled())) {
- AllocateSourceRangeMap();
- }
-}
-
-template <typename LocalIsolate>
-void ParseInfo::SetFlagsForToplevelCompileFromScript(
- LocalIsolate* isolate, Script script, bool is_collecting_type_profile) {
- SetFlagsForFunctionFromScript(script);
- SetFlagsForToplevelCompile(is_collecting_type_profile,
- script.IsUserJavaScript(), language_mode(),
- construct_repl_mode(script.is_repl_mode()));
-
- if (script.is_wrapped()) {
- set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
- }
-}
-
void ParseInfo::CheckFlagsForToplevelCompileFromScript(
Script script, bool is_collecting_type_profile) {
CheckFlagsForFunctionFromScript(script);
- DCHECK(allow_lazy_parsing());
- DCHECK(is_toplevel());
- DCHECK_EQ(collect_type_profile(),
+ DCHECK(flags().allow_lazy_parsing());
+ DCHECK(flags().is_toplevel());
+ DCHECK_EQ(flags().collect_type_profile(),
is_collecting_type_profile && script.IsUserJavaScript());
- DCHECK_EQ(is_repl_mode(), script.is_repl_mode());
+ DCHECK_EQ(flags().is_repl_mode(), script.is_repl_mode());
if (script.is_wrapped()) {
- DCHECK_EQ(function_syntax_kind(), FunctionSyntaxKind::kWrapped);
- }
-}
-
-void ParseInfo::SetFlagsForFunctionFromScript(Script script) {
- DCHECK_EQ(script_id_, script.id());
-
- set_eval(script.compilation_type() == Script::COMPILATION_TYPE_EVAL);
- set_module(script.origin_options().IsModule());
- DCHECK(!(is_eval() && is_module()));
-
- if (block_coverage_enabled() && script.IsUserJavaScript()) {
- AllocateSourceRangeMap();
+ DCHECK_EQ(flags().function_syntax_kind(), FunctionSyntaxKind::kWrapped);
}
}
void ParseInfo::CheckFlagsForFunctionFromScript(Script script) {
- DCHECK_EQ(script_id_, script.id());
- // We set "is_eval" for wrapped functions to get an outer declaration scope.
+ DCHECK_EQ(flags().script_id(), script.id());
+ // We set "is_eval" for wrapped scripts to get an outer declaration scope.
// This is a bit hacky, but ok since we can't be both eval and wrapped.
- DCHECK_EQ(is_eval() && !is_wrapped_as_function(),
+ DCHECK_EQ(flags().is_eval() && !script.is_wrapped(),
script.compilation_type() == Script::COMPILATION_TYPE_EVAL);
- DCHECK_EQ(is_module(), script.origin_options().IsModule());
- DCHECK_IMPLIES(block_coverage_enabled() && script.IsUserJavaScript(),
+ DCHECK_EQ(flags().is_module(), script.origin_options().IsModule());
+ DCHECK_IMPLIES(flags().block_coverage_enabled() && script.IsUserJavaScript(),
source_range_map() != nullptr);
}
-void ParseInfo::ParallelTasks::Enqueue(ParseInfo* outer_parse_info,
- const AstRawString* function_name,
- FunctionLiteral* literal) {
+void UnoptimizedCompileState::ParallelTasks::Enqueue(
+ ParseInfo* outer_parse_info, const AstRawString* function_name,
+ FunctionLiteral* literal) {
base::Optional<CompilerDispatcher::JobId> job_id =
dispatcher_->Enqueue(outer_parse_info, function_name, literal);
if (job_id) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 4430424eb9..c774f0ae94 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -10,7 +10,9 @@
#include <vector>
#include "include/v8.h"
+#include "src/base/bit-field.h"
#include "src/base/export-template.h"
+#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/objects/function-kind.h"
@@ -38,24 +40,187 @@ class SourceRangeMap;
class Utf16CharacterStream;
class Zone;
+// The flags for a parse + unoptimized compile operation.
+#define FLAG_FIELDS(V, _) \
+ V(is_toplevel, bool, 1, _) \
+ V(is_eager, bool, 1, _) \
+ V(is_eval, bool, 1, _) \
+ V(outer_language_mode, LanguageMode, 1, _) \
+ V(parse_restriction, ParseRestriction, 1, _) \
+ V(is_module, bool, 1, _) \
+ V(allow_lazy_parsing, bool, 1, _) \
+ V(is_lazy_compile, bool, 1, _) \
+ V(collect_type_profile, bool, 1, _) \
+ V(coverage_enabled, bool, 1, _) \
+ V(block_coverage_enabled, bool, 1, _) \
+ V(is_asm_wasm_broken, bool, 1, _) \
+ V(class_scope_has_private_brand, bool, 1, _) \
+ V(requires_instance_members_initializer, bool, 1, _) \
+ V(has_static_private_methods_or_accessors, bool, 1, _) \
+ V(might_always_opt, bool, 1, _) \
+ V(allow_natives_syntax, bool, 1, _) \
+ V(allow_lazy_compile, bool, 1, _) \
+ V(allow_harmony_dynamic_import, bool, 1, _) \
+ V(allow_harmony_import_meta, bool, 1, _) \
+ V(allow_harmony_private_methods, bool, 1, _) \
+ V(is_oneshot_iife, bool, 1, _) \
+ V(collect_source_positions, bool, 1, _) \
+ V(allow_harmony_top_level_await, bool, 1, _) \
+ V(is_repl_mode, bool, 1, _) \
+ V(allow_harmony_logical_assignment, bool, 1, _)
+
+class V8_EXPORT_PRIVATE UnoptimizedCompileFlags {
+ public:
+ // Set-up flags for a toplevel compilation.
+ static UnoptimizedCompileFlags ForToplevelCompile(Isolate* isolate,
+ bool is_user_javascript,
+ LanguageMode language_mode,
+ REPLMode repl_mode);
+
+ // Set-up flags for a compiling a particular function (either a lazy compile
+ // or a recompile).
+ static UnoptimizedCompileFlags ForFunctionCompile(Isolate* isolate,
+ SharedFunctionInfo shared);
+
+ // Set-up flags for a full compilation of a given script.
+ static UnoptimizedCompileFlags ForScriptCompile(Isolate* isolate,
+ Script script);
+
+ // Set-up flags for a parallel toplevel function compilation, based on the
+ // flags of an existing toplevel compilation.
+ static UnoptimizedCompileFlags ForToplevelFunction(
+ const UnoptimizedCompileFlags toplevel_flags,
+ const FunctionLiteral* literal);
+
+ // Create flags for a test.
+ static UnoptimizedCompileFlags ForTest(Isolate* isolate);
+
+#define FLAG_GET_SET(NAME, TYPE, SIZE, _) \
+ TYPE NAME() const { return BitFields::NAME::decode(flags_); } \
+ UnoptimizedCompileFlags& set_##NAME(TYPE value) { \
+ flags_ = BitFields::NAME::update(flags_, value); \
+ return *this; \
+ }
+
+ FLAG_FIELDS(FLAG_GET_SET, _)
+
+ int script_id() const { return script_id_; }
+ UnoptimizedCompileFlags& set_script_id(int value) {
+ script_id_ = value;
+ return *this;
+ }
+
+ FunctionKind function_kind() const { return function_kind_; }
+ UnoptimizedCompileFlags& set_function_kind(FunctionKind value) {
+ function_kind_ = value;
+ return *this;
+ }
+
+ FunctionSyntaxKind function_syntax_kind() const {
+ return function_syntax_kind_;
+ }
+ UnoptimizedCompileFlags& set_function_syntax_kind(FunctionSyntaxKind value) {
+ function_syntax_kind_ = value;
+ return *this;
+ }
+
+ private:
+ struct BitFields {
+ DEFINE_BIT_FIELDS(FLAG_FIELDS)
+ };
+
+ UnoptimizedCompileFlags(Isolate* isolate, int script_id);
+
+ // Set function info flags based on those in either FunctionLiteral or
+ // SharedFunctionInfo |function|
+ template <typename T>
+ void SetFlagsFromFunction(T function);
+ void SetFlagsForToplevelCompile(bool is_collecting_type_profile,
+ bool is_user_javascript,
+ LanguageMode language_mode,
+ REPLMode repl_mode);
+ void SetFlagsForFunctionFromScript(Script script);
+
+ uint32_t flags_;
+ int script_id_;
+ FunctionKind function_kind_;
+ FunctionSyntaxKind function_syntax_kind_;
+};
+
+#undef FLAG_FIELDS
+class ParseInfo;
+
+// The mutable state for a parse + unoptimized compile operation.
+class V8_EXPORT_PRIVATE UnoptimizedCompileState {
+ public:
+ explicit UnoptimizedCompileState(Isolate*);
+ UnoptimizedCompileState(const UnoptimizedCompileState& other) V8_NOEXCEPT;
+
+ class ParallelTasks {
+ public:
+ explicit ParallelTasks(CompilerDispatcher* compiler_dispatcher)
+ : dispatcher_(compiler_dispatcher) {
+ DCHECK_NOT_NULL(dispatcher_);
+ }
+
+ void Enqueue(ParseInfo* outer_parse_info, const AstRawString* function_name,
+ FunctionLiteral* literal);
+
+ using EnqueuedJobsIterator =
+ std::forward_list<std::pair<FunctionLiteral*, uintptr_t>>::iterator;
+
+ EnqueuedJobsIterator begin() { return enqueued_jobs_.begin(); }
+ EnqueuedJobsIterator end() { return enqueued_jobs_.end(); }
+
+ CompilerDispatcher* dispatcher() { return dispatcher_; }
+
+ private:
+ CompilerDispatcher* dispatcher_;
+ std::forward_list<std::pair<FunctionLiteral*, uintptr_t>> enqueued_jobs_;
+ };
+
+ uint64_t hash_seed() const { return hash_seed_; }
+ AccountingAllocator* allocator() const { return allocator_; }
+ const AstStringConstants* ast_string_constants() const {
+ return ast_string_constants_;
+ }
+ Logger* logger() const { return logger_; }
+ PendingCompilationErrorHandler* pending_error_handler() {
+ return &pending_error_handler_;
+ }
+ const PendingCompilationErrorHandler* pending_error_handler() const {
+ return &pending_error_handler_;
+ }
+ ParallelTasks* parallel_tasks() const { return parallel_tasks_.get(); }
+
+ private:
+ uint64_t hash_seed_;
+ AccountingAllocator* allocator_;
+ const AstStringConstants* ast_string_constants_;
+ PendingCompilationErrorHandler pending_error_handler_;
+ Logger* logger_;
+ std::unique_ptr<ParallelTasks> parallel_tasks_;
+};
+
// A container for the inputs, configuration options, and outputs of parsing.
class V8_EXPORT_PRIVATE ParseInfo {
public:
- explicit ParseInfo(Isolate*);
- ParseInfo(Isolate* isolate, Script script);
- ParseInfo(Isolate* isolate, SharedFunctionInfo shared);
+ ParseInfo(Isolate* isolate, const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* state);
// Creates a new parse info based on parent top-level |outer_parse_info| for
// function |literal|.
- static std::unique_ptr<ParseInfo> FromParent(
- const ParseInfo* outer_parse_info, AccountingAllocator* zone_allocator,
- const FunctionLiteral* literal, const AstRawString* function_name);
+ static std::unique_ptr<ParseInfo> ForToplevelFunction(
+ const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* compile_state, const FunctionLiteral* literal,
+ const AstRawString* function_name);
~ParseInfo();
template <typename LocalIsolate>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Handle<Script> CreateScript(LocalIsolate* isolate, Handle<String> source,
+ MaybeHandle<FixedArray> maybe_wrapped_arguments,
ScriptOriginOptions origin_options,
NativesFlag natives = NOT_NATIVES_CODE);
@@ -65,70 +230,40 @@ class V8_EXPORT_PRIVATE ParseInfo {
Zone* zone() const { return zone_.get(); }
-// Convenience accessor methods for flags.
-#define FLAG_ACCESSOR(flag, getter, setter) \
- bool getter() const { return GetFlag(flag); } \
- void setter() { SetFlag(flag); } \
- void setter(bool val) { SetFlag(flag, val); }
-
- FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
- FLAG_ACCESSOR(kEager, is_eager, set_eager)
- FLAG_ACCESSOR(kEval, is_eval, set_eval)
- FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
- FLAG_ACCESSOR(kModule, is_module, set_module)
- FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
- FLAG_ACCESSOR(kLazyCompile, lazy_compile, set_lazy_compile)
- FLAG_ACCESSOR(kCollectTypeProfile, collect_type_profile,
- set_collect_type_profile)
- FLAG_ACCESSOR(kIsAsmWasmBroken, is_asm_wasm_broken, set_asm_wasm_broken)
- FLAG_ACCESSOR(kContainsAsmModule, contains_asm_module,
- set_contains_asm_module)
- FLAG_ACCESSOR(kCoverageEnabled, coverage_enabled, set_coverage_enabled)
- FLAG_ACCESSOR(kBlockCoverageEnabled, block_coverage_enabled,
- set_block_coverage_enabled)
- FLAG_ACCESSOR(kAllowEvalCache, allow_eval_cache, set_allow_eval_cache)
- FLAG_ACCESSOR(kRequiresInstanceMembersInitializer,
- requires_instance_members_initializer,
- set_requires_instance_members_initializer)
- FLAG_ACCESSOR(kClassScopeHasPrivateBrand, class_scope_has_private_brand,
- set_class_scope_has_private_brand)
- FLAG_ACCESSOR(kHasStaticPrivateMethodsOrAccessors,
- has_static_private_methods_or_accessors,
- set_has_static_private_methods_or_accessors)
- FLAG_ACCESSOR(kMightAlwaysOpt, might_always_opt, set_might_always_opt)
- FLAG_ACCESSOR(kAllowNativeSyntax, allow_natives_syntax,
- set_allow_natives_syntax)
- FLAG_ACCESSOR(kAllowLazyCompile, allow_lazy_compile, set_allow_lazy_compile)
- FLAG_ACCESSOR(kAllowNativeSyntax, allow_native_syntax,
- set_allow_native_syntax)
- FLAG_ACCESSOR(kAllowHarmonyDynamicImport, allow_harmony_dynamic_import,
- set_allow_harmony_dynamic_import)
- FLAG_ACCESSOR(kAllowHarmonyImportMeta, allow_harmony_import_meta,
- set_allow_harmony_import_meta)
- FLAG_ACCESSOR(kAllowHarmonyOptionalChaining, allow_harmony_optional_chaining,
- set_allow_harmony_optional_chaining)
- FLAG_ACCESSOR(kAllowHarmonyPrivateMethods, allow_harmony_private_methods,
- set_allow_harmony_private_methods)
- FLAG_ACCESSOR(kIsOneshotIIFE, is_oneshot_iife, set_is_oneshot_iife)
- FLAG_ACCESSOR(kCollectSourcePositions, collect_source_positions,
- set_collect_source_positions)
- FLAG_ACCESSOR(kAllowHarmonyNullish, allow_harmony_nullish,
- set_allow_harmony_nullish)
- FLAG_ACCESSOR(kAllowHarmonyTopLevelAwait, allow_harmony_top_level_await,
- set_allow_harmony_top_level_await)
- FLAG_ACCESSOR(kREPLMode, is_repl_mode, set_repl_mode)
-
-#undef FLAG_ACCESSOR
-
- void set_parse_restriction(ParseRestriction restriction) {
- SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
+ const UnoptimizedCompileFlags& flags() const { return flags_; }
+
+ // Getters for state.
+ uint64_t hash_seed() const { return state_->hash_seed(); }
+ AccountingAllocator* allocator() const { return state_->allocator(); }
+ const AstStringConstants* ast_string_constants() const {
+ return state_->ast_string_constants();
}
+ Logger* logger() const { return state_->logger(); }
+ PendingCompilationErrorHandler* pending_error_handler() {
+ return state_->pending_error_handler();
+ }
+ UnoptimizedCompileState::ParallelTasks* parallel_tasks() const {
+ return state_->parallel_tasks();
+ }
+ const UnoptimizedCompileState* state() const { return state_; }
- ParseRestriction parse_restriction() const {
- return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
- : NO_PARSE_RESTRICTION;
+ // Accessors for per-thread state.
+ uintptr_t stack_limit() const { return stack_limit_; }
+ RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
+ void SetPerThreadState(uintptr_t stack_limit,
+ RuntimeCallStats* runtime_call_stats) {
+ stack_limit_ = stack_limit;
+ runtime_call_stats_ = runtime_call_stats;
}
+ // Accessor methods for output flags.
+ bool allow_eval_cache() const { return allow_eval_cache_; }
+ void set_allow_eval_cache(bool value) { allow_eval_cache_ = value; }
+ bool contains_asm_module() const { return contains_asm_module_; }
+ void set_contains_asm_module(bool value) { contains_asm_module_ = value; }
+ LanguageMode language_mode() const { return language_mode_; }
+ void set_language_mode(LanguageMode value) { language_mode_ = value; }
+
Utf16CharacterStream* character_stream() const {
return character_stream_.get();
}
@@ -166,44 +301,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
DeclarationScope* scope() const;
- uintptr_t stack_limit() const { return stack_limit_; }
- void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
-
- uint64_t hash_seed() const { return hash_seed_; }
- void set_hash_seed(uint64_t hash_seed) { hash_seed_ = hash_seed; }
-
- int start_position() const { return start_position_; }
- void set_start_position(int start_position) {
- start_position_ = start_position;
- }
-
- int end_position() const { return end_position_; }
- void set_end_position(int end_position) { end_position_ = end_position; }
-
int parameters_end_pos() const { return parameters_end_pos_; }
void set_parameters_end_pos(int parameters_end_pos) {
parameters_end_pos_ = parameters_end_pos;
}
- int function_literal_id() const { return function_literal_id_; }
- void set_function_literal_id(int function_literal_id) {
- function_literal_id_ = function_literal_id;
- }
-
- FunctionKind function_kind() const { return function_kind_; }
- void set_function_kind(FunctionKind function_kind) {
- function_kind_ = function_kind;
- }
-
- FunctionSyntaxKind function_syntax_kind() const {
- return function_syntax_kind_;
- }
- void set_function_syntax_kind(FunctionSyntaxKind function_syntax_kind) {
- function_syntax_kind_ = function_syntax_kind;
- }
-
bool is_wrapped_as_function() const {
- return function_syntax_kind() == FunctionSyntaxKind::kWrapped;
+ return flags().function_syntax_kind() == FunctionSyntaxKind::kWrapped;
}
int max_function_literal_id() const { return max_function_literal_id_; }
@@ -211,163 +315,45 @@ class V8_EXPORT_PRIVATE ParseInfo {
max_function_literal_id_ = max_function_literal_id;
}
- const AstStringConstants* ast_string_constants() const {
- return ast_string_constants_;
- }
- void set_ast_string_constants(
- const AstStringConstants* ast_string_constants) {
- ast_string_constants_ = ast_string_constants;
- }
-
- RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
- void set_runtime_call_stats(RuntimeCallStats* runtime_call_stats) {
- runtime_call_stats_ = runtime_call_stats;
- }
- Logger* logger() const { return logger_; }
- void set_logger(Logger* logger) { logger_ = logger; }
-
void AllocateSourceRangeMap();
SourceRangeMap* source_range_map() const { return source_range_map_; }
void set_source_range_map(SourceRangeMap* source_range_map) {
source_range_map_ = source_range_map;
}
- PendingCompilationErrorHandler* pending_error_handler() {
- return &pending_error_handler_;
- }
-
- class ParallelTasks {
- public:
- explicit ParallelTasks(CompilerDispatcher* compiler_dispatcher)
- : dispatcher_(compiler_dispatcher) {
- DCHECK(dispatcher_);
- }
-
- void Enqueue(ParseInfo* outer_parse_info, const AstRawString* function_name,
- FunctionLiteral* literal);
-
- using EnqueuedJobsIterator =
- std::forward_list<std::pair<FunctionLiteral*, uintptr_t>>::iterator;
-
- EnqueuedJobsIterator begin() { return enqueued_jobs_.begin(); }
- EnqueuedJobsIterator end() { return enqueued_jobs_.end(); }
-
- CompilerDispatcher* dispatcher() { return dispatcher_; }
-
- private:
- CompilerDispatcher* dispatcher_;
- std::forward_list<std::pair<FunctionLiteral*, uintptr_t>> enqueued_jobs_;
- };
-
- ParallelTasks* parallel_tasks() { return parallel_tasks_.get(); }
-
- void SetFlagsForToplevelCompile(bool is_collecting_type_profile,
- bool is_user_javascript,
- LanguageMode language_mode,
- REPLMode repl_mode);
-
void CheckFlagsForFunctionFromScript(Script script);
- int script_id() const { return script_id_; }
- //--------------------------------------------------------------------------
-
- LanguageMode language_mode() const {
- return construct_language_mode(is_strict_mode());
- }
- void set_language_mode(LanguageMode language_mode) {
- STATIC_ASSERT(LanguageModeSize == 2);
- set_strict_mode(is_strict(language_mode));
- }
-
private:
- ParseInfo(AccountingAllocator* zone_allocator, int script_id);
- ParseInfo(Isolate*, AccountingAllocator* zone_allocator, int script_id);
+ ParseInfo(const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* state);
- void SetFlagsForFunctionFromScript(Script script);
-
- template <typename LocalIsolate>
- void SetFlagsForToplevelCompileFromScript(LocalIsolate* isolate,
- Script script,
- bool is_collecting_type_profile);
void CheckFlagsForToplevelCompileFromScript(Script script,
bool is_collecting_type_profile);
- // Set function info flags based on those in either FunctionLiteral or
- // SharedFunctionInfo |function|
- template <typename T>
- void SetFunctionInfo(T function);
-
- // Various configuration flags for parsing.
- enum Flag : uint32_t {
- // ---------- Input flags ---------------------------
- kToplevel = 1u << 0,
- kEager = 1u << 1,
- kEval = 1u << 2,
- kStrictMode = 1u << 3,
- kNative = 1u << 4,
- kParseRestriction = 1u << 5,
- kModule = 1u << 6,
- kAllowLazyParsing = 1u << 7,
- kLazyCompile = 1u << 8,
- kCollectTypeProfile = 1u << 9,
- kCoverageEnabled = 1u << 10,
- kBlockCoverageEnabled = 1u << 11,
- kIsAsmWasmBroken = 1u << 12,
- kAllowEvalCache = 1u << 13,
- kRequiresInstanceMembersInitializer = 1u << 14,
- kContainsAsmModule = 1u << 15,
- kMightAlwaysOpt = 1u << 16,
- kAllowLazyCompile = 1u << 17,
- kAllowNativeSyntax = 1u << 18,
- kAllowHarmonyPublicFields = 1u << 19,
- kAllowHarmonyStaticFields = 1u << 20,
- kAllowHarmonyDynamicImport = 1u << 21,
- kAllowHarmonyImportMeta = 1u << 22,
- kAllowHarmonyOptionalChaining = 1u << 23,
- kHasStaticPrivateMethodsOrAccessors = 1u << 24,
- kAllowHarmonyPrivateMethods = 1u << 25,
- kIsOneshotIIFE = 1u << 26,
- kCollectSourcePositions = 1u << 27,
- kAllowHarmonyNullish = 1u << 28,
- kAllowHarmonyTopLevelAwait = 1u << 29,
- kREPLMode = 1u << 30,
- kClassScopeHasPrivateBrand = 1u << 31,
- };
-
//------------- Inputs to parsing and scope analysis -----------------------
+ const UnoptimizedCompileFlags flags_;
+ UnoptimizedCompileState* state_;
+
std::unique_ptr<Zone> zone_;
- uint32_t flags_;
v8::Extension* extension_;
DeclarationScope* script_scope_;
uintptr_t stack_limit_;
- uint64_t hash_seed_;
- FunctionKind function_kind_;
- FunctionSyntaxKind function_syntax_kind_;
- int script_id_;
- int start_position_;
- int end_position_;
int parameters_end_pos_;
- int function_literal_id_;
int max_function_literal_id_;
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
std::unique_ptr<ConsumedPreparseData> consumed_preparse_data_;
std::unique_ptr<AstValueFactory> ast_value_factory_;
- const class AstStringConstants* ast_string_constants_;
const AstRawString* function_name_;
RuntimeCallStats* runtime_call_stats_;
- Logger* logger_;
SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
- std::unique_ptr<ParallelTasks> parallel_tasks_;
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
- PendingCompilationErrorHandler pending_error_handler_;
-
- void SetFlag(Flag f) { flags_ |= f; }
- void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
- bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
+ bool allow_eval_cache_ : 1;
+ bool contains_asm_module_ : 1;
+ LanguageMode language_mode_ : 1;
};
} // namespace internal
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 29e527ce2c..903ce2bb7f 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -23,6 +23,7 @@
#include "src/objects/function-kind.h"
#include "src/parsing/expression-scope.h"
#include "src/parsing/func-name-inferrer.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
#include "src/utils/pointer-with-payload.h"
@@ -241,7 +242,7 @@ class ParserBase {
v8::Extension* extension, AstValueFactory* ast_value_factory,
PendingCompilationErrorHandler* pending_error_handler,
RuntimeCallStats* runtime_call_stats, Logger* logger,
- int script_id, bool parsing_module, bool parsing_on_main_thread)
+ UnoptimizedCompileFlags flags, bool parsing_on_main_thread)
: scope_(nullptr),
original_scope_(nullptr),
function_state_(nullptr),
@@ -252,56 +253,25 @@ class ParserBase {
runtime_call_stats_(runtime_call_stats),
logger_(logger),
parsing_on_main_thread_(parsing_on_main_thread),
- parsing_module_(parsing_module),
stack_limit_(stack_limit),
pending_error_handler_(pending_error_handler),
zone_(zone),
expression_scope_(nullptr),
scanner_(scanner),
+ flags_(flags),
function_literal_id_(0),
- script_id_(script_id),
- default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
- allow_natives_(false),
- allow_harmony_dynamic_import_(false),
- allow_harmony_import_meta_(false),
- allow_harmony_private_methods_(false),
- allow_harmony_top_level_await_(false),
- allow_eval_cache_(true) {
+ default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile) {
pointer_buffer_.reserve(32);
variable_buffer_.reserve(32);
}
-#define ALLOW_ACCESSORS(name) \
- bool allow_##name() const { return allow_##name##_; } \
- void set_allow_##name(bool allow) { allow_##name##_ = allow; }
+ const UnoptimizedCompileFlags& flags() const { return flags_; }
- ALLOW_ACCESSORS(natives)
- ALLOW_ACCESSORS(harmony_dynamic_import)
- ALLOW_ACCESSORS(harmony_import_meta)
- ALLOW_ACCESSORS(harmony_private_methods)
- ALLOW_ACCESSORS(harmony_top_level_await)
- ALLOW_ACCESSORS(eval_cache)
-
-#undef ALLOW_ACCESSORS
+ bool allow_eval_cache() const { return allow_eval_cache_; }
+ void set_allow_eval_cache(bool allow) { allow_eval_cache_ = allow; }
V8_INLINE bool has_error() const { return scanner()->has_parser_error(); }
- bool allow_harmony_optional_chaining() const {
- return scanner()->allow_harmony_optional_chaining();
- }
-
- void set_allow_harmony_optional_chaining(bool allow) {
- scanner()->set_allow_harmony_optional_chaining(allow);
- }
-
- bool allow_harmony_nullish() const {
- return scanner()->allow_harmony_nullish();
- }
-
- void set_allow_harmony_nullish(bool allow) {
- scanner()->set_allow_harmony_nullish(allow);
- }
-
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
@@ -885,8 +855,6 @@ class ParserBase {
// Any further calls to Next or peek will return the illegal token.
if (GetCurrentStackPosition() < stack_limit_) set_stack_overflow();
}
- int script_id() { return script_id_; }
- void set_script_id(int id) { script_id_ = id; }
V8_INLINE Token::Value peek() { return scanner()->peek(); }
@@ -1077,7 +1045,7 @@ class ParserBase {
return IsResumableFunction(function_state_->kind());
}
bool is_await_allowed() const {
- return is_async_function() || (allow_harmony_top_level_await() &&
+ return is_async_function() || (flags().allow_harmony_top_level_await() &&
IsModule(function_state_->kind()));
}
const PendingCompilationErrorHandler* pending_error_handler() const {
@@ -1279,7 +1247,12 @@ class ParserBase {
// hoisted over such a scope.
void CheckConflictingVarDeclarations(DeclarationScope* scope) {
if (has_error()) return;
- Declaration* decl = scope->CheckConflictingVarDeclarations();
+ bool allowed_catch_binding_var_redeclaration = false;
+ Declaration* decl = scope->CheckConflictingVarDeclarations(
+ &allowed_catch_binding_var_redeclaration);
+ if (allowed_catch_binding_var_redeclaration) {
+ impl()->CountUsage(v8::Isolate::kVarRedeclaredCatchBinding);
+ }
if (decl != nullptr) {
// In ES6, conflicting variable bindings are early errors.
const AstRawString* name = decl->var()->raw_name();
@@ -1501,16 +1474,14 @@ class ParserBase {
FormalParametersT* parent_parameters_;
};
- class FunctionBodyParsingScope {
+ class FunctionParsingScope {
public:
- explicit FunctionBodyParsingScope(Impl* parser)
+ explicit FunctionParsingScope(Impl* parser)
: parser_(parser), expression_scope_(parser_->expression_scope_) {
parser_->expression_scope_ = nullptr;
}
- ~FunctionBodyParsingScope() {
- parser_->expression_scope_ = expression_scope_;
- }
+ ~FunctionParsingScope() { parser_->expression_scope_ = expression_scope_; }
private:
Impl* parser_;
@@ -1534,7 +1505,6 @@ class ParserBase {
RuntimeCallStats* runtime_call_stats_;
internal::Logger* logger_;
bool parsing_on_main_thread_;
- const bool parsing_module_;
uintptr_t stack_limit_;
PendingCompilationErrorHandler* pending_error_handler_;
@@ -1549,8 +1519,8 @@ class ParserBase {
Scanner* scanner_;
+ const UnoptimizedCompileFlags flags_;
int function_literal_id_;
- int script_id_;
FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
@@ -1589,12 +1559,7 @@ class ParserBase {
bool accept_IN_ = true;
- bool allow_natives_;
- bool allow_harmony_dynamic_import_;
- bool allow_harmony_import_meta_;
- bool allow_harmony_private_methods_;
- bool allow_harmony_top_level_await_;
- bool allow_eval_cache_;
+ bool allow_eval_cache_ = true;
};
template <typename Impl>
@@ -1644,7 +1609,7 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
}
if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
- parsing_module_ || is_async_function())) {
+ flags().is_module() || is_async_function())) {
ReportUnexpectedToken(next);
return impl()->EmptyIdentifierString();
}
@@ -1668,7 +1633,7 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
if (!Token::IsValidIdentifier(
next, language_mode(), IsGeneratorFunction(function_kind),
- parsing_module_ || IsAsyncFunction(function_kind))) {
+ flags().is_module() || IsAsyncFunction(function_kind))) {
ReportUnexpectedToken(next);
return impl()->EmptyIdentifierString();
}
@@ -1879,7 +1844,7 @@ ParserBase<Impl>::ParsePrimaryExpression() {
return ParseSuperExpression(is_new);
}
case Token::IMPORT:
- if (!allow_harmony_dynamic_import()) break;
+ if (!flags().allow_harmony_dynamic_import()) break;
return ParseImportExpressions();
case Token::LBRACK:
@@ -1942,7 +1907,7 @@ ParserBase<Impl>::ParsePrimaryExpression() {
return ParseTemplateLiteral(impl()->NullExpression(), beg_pos, false);
case Token::MOD:
- if (allow_natives() || extension_ != nullptr) {
+ if (flags().allow_natives_syntax() || extension_ != nullptr) {
return ParseV8Intrinsic();
}
break;
@@ -2188,7 +2153,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseProperty(
prop_info->kind = ParsePropertyKind::kNotSet;
return impl()->FailureExpression();
}
- if (V8_UNLIKELY(!allow_harmony_private_methods() &&
+ if (V8_UNLIKELY(!flags().allow_harmony_private_methods() &&
(IsAccessor(prop_info->kind) ||
prop_info->kind == ParsePropertyKind::kMethod))) {
ReportUnexpectedToken(Next());
@@ -2437,7 +2402,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
ClassInfo* class_info, int beg_pos, bool is_static) {
- FunctionBodyParsingScope body_parsing_scope(impl());
+ FunctionParsingScope body_parsing_scope(impl());
DeclarationScope* initializer_scope =
is_static ? class_info->static_fields_scope
: class_info->instance_members_scope;
@@ -2535,8 +2500,9 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
// IdentifierReference Initializer?
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!Token::IsValidIdentifier(name_token, language_mode(), is_generator(),
- parsing_module_ || is_async_function())) {
+ if (!Token::IsValidIdentifier(
+ name_token, language_mode(), is_generator(),
+ flags().is_module() || is_async_function())) {
ReportUnexpectedToken(Next());
return impl()->NullLiteralProperty();
}
@@ -2789,6 +2755,11 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
Token::Value op = peek();
if (!Token::IsArrowOrAssignmentOp(op)) return expression;
+ if ((op == Token::ASSIGN_NULLISH || op == Token::ASSIGN_OR ||
+ op == Token::ASSIGN_AND) &&
+ !flags().allow_harmony_logical_assignment()) {
+ return expression;
+ }
// Arrow functions.
if (V8_UNLIKELY(op == Token::ARROW)) {
@@ -3399,13 +3370,7 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
if (is_optional) {
DCHECK_EQ(scanner()->current_token(), Token::QUESTION_PERIOD);
int pos = position();
- Token::Value next = Next();
- if (V8_UNLIKELY(!Token::IsPropertyName(next))) {
- ReportUnexpectedToken(next);
- return impl()->FailureExpression();
- }
- IdentifierT name = impl()->GetSymbol();
- ExpressionT key = factory()->NewStringLiteral(name, position());
+ ExpressionT key = ParsePropertyOrPrivatePropertyName();
result = factory()->NewProperty(result, key, pos, is_optional);
break;
}
@@ -3456,8 +3421,10 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new);
- } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT &&
- (!allow_harmony_import_meta() || PeekAhead() == Token::LPAREN)) {
+ } else if (flags().allow_harmony_dynamic_import() &&
+ peek() == Token::IMPORT &&
+ (!flags().allow_harmony_import_meta() ||
+ PeekAhead() == Token::LPAREN)) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kImportCallNotNewExpression);
return impl()->FailureExpression();
@@ -3555,14 +3522,14 @@ ParserBase<Impl>::ParseMemberExpression() {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseImportExpressions() {
- DCHECK(allow_harmony_dynamic_import());
+ DCHECK(flags().allow_harmony_dynamic_import());
Consume(Token::IMPORT);
int pos = position();
- if (allow_harmony_import_meta() && Check(Token::PERIOD)) {
+ if (flags().allow_harmony_import_meta() && Check(Token::PERIOD)) {
ExpectContextualKeyword(ast_value_factory()->meta_string(), "import.meta",
pos);
- if (!parsing_module_) {
+ if (!flags().is_module()) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportMetaOutsideModule);
return impl()->FailureExpression();
@@ -3572,7 +3539,7 @@ ParserBase<Impl>::ParseImportExpressions() {
}
if (V8_UNLIKELY(peek() != Token::LPAREN)) {
- if (!parsing_module_) {
+ if (!flags().is_module()) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportOutsideModule);
} else {
@@ -4157,8 +4124,6 @@ void ParserBase<Impl>::ParseFunctionBody(
StatementListT* body, IdentifierT function_name, int pos,
const FormalParametersT& parameters, FunctionKind kind,
FunctionSyntaxKind function_syntax_kind, FunctionBodyType body_type) {
- FunctionBodyParsingScope body_parsing_scope(impl());
-
if (IsResumableFunction(kind)) impl()->PrepareGeneratorVariables();
DeclarationScope* function_scope = parameters.scope;
@@ -4435,6 +4400,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
Consume(Token::LBRACE);
AcceptINScope scope(this, true);
+ FunctionParsingScope body_parsing_scope(impl());
ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
parameters, kind,
FunctionSyntaxKind::kAnonymousExpression,
@@ -4445,6 +4411,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
} else {
Consume(Token::LBRACE);
AcceptINScope scope(this, true);
+ FunctionParsingScope body_parsing_scope(impl());
ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
formal_parameters, kind,
FunctionSyntaxKind::kAnonymousExpression,
@@ -4454,6 +4421,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
} else {
// Single-expression body
has_braces = false;
+ FunctionParsingScope body_parsing_scope(impl());
ParseFunctionBody(&body, impl()->NullIdentifier(), kNoSourcePosition,
formal_parameters, kind,
FunctionSyntaxKind::kAnonymousExpression,
@@ -4493,8 +4461,9 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
const char* event_name =
is_lazy_top_level_function ? "preparse-no-resolution" : "parse";
const char* name = "arrow function";
- logger_->FunctionEvent(event_name, script_id(), ms, scope->start_position(),
- scope->end_position(), name, strlen(name));
+ logger_->FunctionEvent(event_name, flags().script_id(), ms,
+ scope->start_position(), scope->end_position(), name,
+ strlen(name));
}
return function_literal;
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 2a1ad0e98b..63b8b9c6f9 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -15,6 +15,7 @@
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/logging/counters.h"
@@ -416,13 +417,12 @@ Expression* Parser::NewV8RuntimeFunctionForFuzzing(
}
Parser::Parser(ParseInfo* info)
- : ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
- info->extension(), info->GetOrCreateAstValueFactory(),
- info->pending_error_handler(),
- info->runtime_call_stats(), info->logger(),
- info->script_id(), info->is_module(), true),
+ : ParserBase<Parser>(
+ info->zone(), &scanner_, info->stack_limit(), info->extension(),
+ info->GetOrCreateAstValueFactory(), info->pending_error_handler(),
+ info->runtime_call_stats(), info->logger(), info->flags(), true),
info_(info),
- scanner_(info->character_stream(), info->is_module()),
+ scanner_(info->character_stream(), flags()),
preparser_zone_(info->zone()->allocator(), ZONE_NAME),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
@@ -445,20 +445,13 @@ Parser::Parser(ParseInfo* info)
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript
// We also compile eagerly for kProduceExhaustiveCodeCache.
- bool can_compile_lazily = info->allow_lazy_compile() && !info->is_eager();
+ bool can_compile_lazily = flags().allow_lazy_compile() && !flags().is_eager();
set_default_eager_compile_hint(can_compile_lazily
? FunctionLiteral::kShouldLazyCompile
: FunctionLiteral::kShouldEagerCompile);
- allow_lazy_ = info->allow_lazy_compile() && info->allow_lazy_parsing() &&
+ allow_lazy_ = flags().allow_lazy_compile() && flags().allow_lazy_parsing() &&
info->extension() == nullptr && can_compile_lazily;
- set_allow_natives(info->allow_natives_syntax());
- set_allow_harmony_dynamic_import(info->allow_harmony_dynamic_import());
- set_allow_harmony_import_meta(info->allow_harmony_import_meta());
- set_allow_harmony_nullish(info->allow_harmony_nullish());
- set_allow_harmony_optional_chaining(info->allow_harmony_optional_chaining());
- set_allow_harmony_private_methods(info->allow_harmony_private_methods());
- set_allow_harmony_top_level_await(info->allow_harmony_top_level_await());
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -469,7 +462,7 @@ void Parser::InitializeEmptyScopeChain(ParseInfo* info) {
DCHECK_NULL(original_scope_);
DCHECK_NULL(info->script_scope());
DeclarationScope* script_scope =
- NewScriptScope(info->is_repl_mode() ? REPLMode::kYes : REPLMode::kNo);
+ NewScriptScope(flags().is_repl_mode() ? REPLMode::kYes : REPLMode::kNo);
info->set_script_scope(script_scope);
original_scope_ = script_scope;
}
@@ -485,7 +478,7 @@ void Parser::DeserializeScopeChain(
original_scope_ = Scope::DeserializeScopeChain(
isolate, zone(), *outer_scope_info, info->script_scope(),
ast_value_factory(), mode);
- if (info->is_eval() || IsArrowFunction(info->function_kind())) {
+ if (flags().is_eval() || IsArrowFunction(flags().function_kind())) {
original_scope_->GetReceiverScope()->DeserializeReceiver(
ast_value_factory());
}
@@ -515,18 +508,18 @@ void MaybeProcessSourceRanges(ParseInfo* parse_info, Expression* root,
} // namespace
-FunctionLiteral* Parser::ParseProgram(
- Isolate* isolate, Handle<Script> script, ParseInfo* info,
- MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
+void Parser::ParseProgram(Isolate* isolate, Handle<Script> script,
+ ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info) {
// TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
// see comment for HistogramTimerScope class.
- DCHECK_EQ(script->id(), script_id());
+ DCHECK_EQ(script->id(), flags().script_id());
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_, info->is_eval()
+ runtime_call_stats_, flags().is_eval()
? RuntimeCallCounterId::kParseEval
: RuntimeCallCounterId::kParseProgram);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
@@ -546,6 +539,7 @@ FunctionLiteral* Parser::ParseProgram(
FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
+ PostProcessParseResult(isolate, info, result);
HandleSourceURLComments(isolate, script);
@@ -554,14 +548,14 @@ FunctionLiteral* Parser::ParseProgram(
const char* event_name = "parse-eval";
int start = -1;
int end = -1;
- if (!info->is_eval()) {
+ if (!flags().is_eval()) {
event_name = "parse-script";
start = 0;
end = String::cast(script->source()).length();
}
- LOG(isolate, FunctionEvent(event_name, script_id(), ms, start, end, "", 0));
+ LOG(isolate,
+ FunctionEvent(event_name, flags().script_id(), ms, start, end, "", 0));
}
- return result;
}
FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
@@ -574,16 +568,14 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
ResetFunctionLiteralId();
- DCHECK(info->function_literal_id() == kFunctionLiteralIdTopLevel ||
- info->function_literal_id() == kFunctionLiteralIdInvalid);
FunctionLiteral* result = nullptr;
{
Scope* outer = original_scope_;
DCHECK_NOT_NULL(outer);
- if (info->is_eval()) {
+ if (flags().is_eval()) {
outer = NewEvalScope(outer);
- } else if (parsing_module_) {
+ } else if (flags().is_module()) {
DCHECK_EQ(outer, info->script_scope());
outer = NewModuleScope(info->script_scope());
}
@@ -594,15 +586,15 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
FunctionState function_state(&function_state_, &scope_, scope);
ScopedPtrList<Statement> body(pointer_buffer());
int beg_pos = scanner()->location().beg_pos;
- if (parsing_module_) {
- DCHECK(info->is_module());
+ if (flags().is_module()) {
+ DCHECK(flags().is_module());
PrepareGeneratorVariables();
Expression* initial_yield =
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
body.Add(
factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
- if (allow_harmony_top_level_await()) {
+ if (flags().allow_harmony_top_level_await()) {
// First parse statements into a buffer. Then, if there was a
// top level await, create an inner block and rewrite the body of the
// module as an async function. Otherwise merge the statements back
@@ -636,7 +628,7 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
} else if (info->is_wrapped_as_function()) {
DCHECK(parsing_on_main_thread_);
ParseWrapped(isolate, info, &body, scope, zone());
- } else if (info->is_repl_mode()) {
+ } else if (flags().is_repl_mode()) {
ParseREPLProgram(info, &body, scope);
} else {
// Don't count the mode in the use counters--give the program a chance
@@ -661,13 +653,13 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
}
// Internalize the ast strings in the case of eval so we can check for
// conflicting var declarations with outer scope-info-backed scopes.
- if (info->is_eval()) {
+ if (flags().is_eval()) {
DCHECK(parsing_on_main_thread_);
info->ast_value_factory()->Internalize(isolate);
}
CheckConflictingVarDeclarations(scope);
- if (info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
+ if (flags().parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
if (body.length() != 1 || !body.at(0)->IsExpressionStatement() ||
!body.at(0)
->AsExpressionStatement()
@@ -692,6 +684,33 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
return result;
}
+void Parser::PostProcessParseResult(Isolate* isolate, ParseInfo* info,
+ FunctionLiteral* literal) {
+ if (literal == nullptr) return;
+
+ info->set_literal(literal);
+ info->set_language_mode(literal->language_mode());
+ if (info->flags().is_eval()) {
+ info->set_allow_eval_cache(allow_eval_cache());
+ }
+
+ // We cannot internalize on a background thread; a foreground task will take
+ // care of calling AstValueFactory::Internalize just before compilation.
+ DCHECK_EQ(isolate != nullptr, parsing_on_main_thread_);
+ if (isolate) info->ast_value_factory()->Internalize(isolate);
+
+ {
+ RuntimeCallTimerScope runtimeTimer(info->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileAnalyse,
+ RuntimeCallStats::kThreadSpecific);
+ if (!Rewriter::Rewrite(info) || !DeclarationScope::Analyze(info)) {
+ // Null out the literal to indicate that something failed.
+ info->set_literal(nullptr);
+ return;
+ }
+ }
+}
+
ZonePtrList<const AstRawString>* Parser::PrepareWrappedArguments(
Isolate* isolate, ParseInfo* info, Zone* zone) {
DCHECK(parsing_on_main_thread_);
@@ -745,7 +764,7 @@ void Parser::ParseREPLProgram(ParseInfo* info, ScopedPtrList<Statement>* body,
// completion value of the script is obtained by manually invoking
// the {Rewriter} which will return a VariableProxy referencing the
// result.
- DCHECK(info->is_repl_mode());
+ DCHECK(flags().is_repl_mode());
this->scope()->SetLanguageMode(info->language_mode());
PrepareGeneratorVariables();
@@ -791,8 +810,8 @@ Expression* Parser::WrapREPLResult(Expression* value) {
false);
}
-FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
- Handle<SharedFunctionInfo> shared_info) {
+void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
+ Handle<SharedFunctionInfo> shared_info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
@@ -815,6 +834,10 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
Script::cast(shared_info->script()).wrapped_arguments(), isolate);
}
+ int start_position = shared_info->StartPosition();
+ int end_position = shared_info->EndPosition();
+ int function_literal_id = shared_info->function_literal_id();
+
// Initialize parser state.
Handle<String> name(shared_info->Name(), isolate);
info->set_function_name(ast_value_factory()->GetString(name));
@@ -827,9 +850,11 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
// function is in heritage position. Otherwise the function scope's skip bit
// will be correctly inherited from the outer scope.
ClassScope::HeritageParsingScope heritage(original_scope_->AsClassScope());
- result = DoParseFunction(isolate, info, info->function_name());
+ result = DoParseFunction(isolate, info, start_position, end_position,
+ function_literal_id, info->function_name());
} else {
- result = DoParseFunction(isolate, info, info->function_name());
+ result = DoParseFunction(isolate, info, start_position, end_position,
+ function_literal_id, info->function_name());
}
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
@@ -837,23 +862,25 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
Handle<String> inferred_name(shared_info->inferred_name(), isolate);
result->set_inferred_name(inferred_name);
}
+ PostProcessParseResult(isolate, info, result);
if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
- // We need to make sure that the debug-name is available.
- ast_value_factory()->Internalize(isolate);
+ // We should already be internalized by now, so the debug name will be
+ // available.
DeclarationScope* function_scope = result->scope();
std::unique_ptr<char[]> function_name = result->GetDebugName();
LOG(isolate,
- FunctionEvent("parse-function", script_id(), ms,
+ FunctionEvent("parse-function", flags().script_id(), ms,
function_scope->start_position(),
function_scope->end_position(), function_name.get(),
strlen(function_name.get())));
}
- return result;
}
FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
+ int start_position, int end_position,
+ int function_literal_id,
const AstRawString* raw_name) {
DCHECK_EQ(parsing_on_main_thread_, isolate != nullptr);
DCHECK_NOT_NULL(raw_name);
@@ -863,8 +890,8 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
fni_.PushEnclosingName(raw_name);
ResetFunctionLiteralId();
- DCHECK_LT(0, info->function_literal_id());
- SkipFunctionLiterals(info->function_literal_id() - 1);
+ DCHECK_LT(0, function_literal_id);
+ SkipFunctionLiterals(function_literal_id - 1);
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
@@ -880,10 +907,10 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
BlockState block_state(&scope_, outer);
DCHECK(is_sloppy(outer->language_mode()) ||
is_strict(info->language_mode()));
- FunctionKind kind = info->function_kind();
- DCHECK_IMPLIES(
- IsConciseMethod(kind) || IsAccessorFunction(kind),
- info->function_syntax_kind() == FunctionSyntaxKind::kAccessorOrMethod);
+ FunctionKind kind = flags().function_kind();
+ DCHECK_IMPLIES(IsConciseMethod(kind) || IsAccessorFunction(kind),
+ flags().function_syntax_kind() ==
+ FunctionSyntaxKind::kAccessorOrMethod);
if (IsArrowFunction(kind)) {
if (IsAsyncFunction(kind)) {
@@ -906,7 +933,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
// not passing the ScopeInfo to the Scope constructor.
SetLanguageMode(scope, info->language_mode());
- scope->set_start_position(info->start_position());
+ scope->set_start_position(start_position);
ParserFormalParameters formals(scope);
{
ParameterDeclarationParsingScope formals_scope(this);
@@ -927,14 +954,14 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
formals.duplicate_loc = formals_scope.duplicate_location();
}
- if (GetLastFunctionLiteralId() != info->function_literal_id() - 1) {
+ if (GetLastFunctionLiteralId() != function_literal_id - 1) {
if (has_error()) return nullptr;
// If there were FunctionLiterals in the parameters, we need to
// renumber them to shift down so the next function literal id for
// the arrow function is the one requested.
AstFunctionLiteralIdReindexer reindexer(
stack_limit_,
- (info->function_literal_id() - 1) - GetLastFunctionLiteralId());
+ (function_literal_id - 1) - GetLastFunctionLiteralId());
for (auto p : formals.params) {
if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
if (p->initializer() != nullptr) {
@@ -942,7 +969,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
}
}
ResetFunctionLiteralId();
- SkipFunctionLiterals(info->function_literal_id() - 1);
+ SkipFunctionLiterals(function_literal_id - 1);
}
Expression* expression = ParseArrowFunctionLiteral(formals);
@@ -952,7 +979,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
// concise body happens to be a valid expression. This is a problem
// only for arrow functions with single expression bodies, since there
// is no end token such as "}" for normal functions.
- if (scanner()->location().end_pos == info->end_position()) {
+ if (scanner()->location().end_pos == end_position) {
// The pre-parser saw an arrow function here, so the full parser
// must produce a FunctionLiteral.
DCHECK(expression->IsFunctionLiteral());
@@ -961,7 +988,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
} else if (IsDefaultConstructor(kind)) {
DCHECK_EQ(scope(), outer);
result = DefaultConstructor(raw_name, IsDerivedConstructor(kind),
- info->start_position(), info->end_position());
+ start_position, end_position);
} else {
ZonePtrList<const AstRawString>* arguments_for_wrapped_function =
info->is_wrapped_as_function()
@@ -969,24 +996,23 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
: nullptr;
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
- kNoSourcePosition, info->function_syntax_kind(),
+ kNoSourcePosition, flags().function_syntax_kind(),
info->language_mode(), arguments_for_wrapped_function);
}
if (has_error()) return nullptr;
result->set_requires_instance_members_initializer(
- info->requires_instance_members_initializer());
+ flags().requires_instance_members_initializer());
result->set_class_scope_has_private_brand(
- info->class_scope_has_private_brand());
+ flags().class_scope_has_private_brand());
result->set_has_static_private_methods_or_accessors(
- info->has_static_private_methods_or_accessors());
- if (info->is_oneshot_iife()) {
+ flags().has_static_private_methods_or_accessors());
+ if (flags().is_oneshot_iife()) {
result->mark_as_oneshot_iife();
}
}
- DCHECK_IMPLIES(result,
- info->function_literal_id() == result->function_literal_id());
+ DCHECK_IMPLIES(result, function_literal_id == result->function_literal_id());
return result;
}
@@ -1007,8 +1033,9 @@ Statement* Parser::ParseModuleItem() {
// We must be careful not to parse a dynamic import expression as an import
// declaration. Same for import.meta expressions.
Token::Value peek_ahead = PeekAhead();
- if ((!allow_harmony_dynamic_import() || peek_ahead != Token::LPAREN) &&
- (!allow_harmony_import_meta() || peek_ahead != Token::PERIOD)) {
+ if ((!flags().allow_harmony_dynamic_import() ||
+ peek_ahead != Token::LPAREN) &&
+ (!flags().allow_harmony_import_meta() || peek_ahead != Token::PERIOD)) {
ParseImportDeclaration();
return factory()->EmptyStatement();
}
@@ -1068,7 +1095,7 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
// caller needs to report an error.
if (!reserved_loc->IsValid() &&
!Token::IsValidIdentifier(name_tok, LanguageMode::kStrict, false,
- parsing_module_)) {
+ flags().is_module())) {
*reserved_loc = scanner()->location();
}
const AstRawString* local_name = ParsePropertyName();
@@ -1124,7 +1151,7 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
}
if (!Token::IsValidIdentifier(scanner()->current_token(),
LanguageMode::kStrict, false,
- parsing_module_)) {
+ flags().is_module())) {
ReportMessage(MessageTemplate::kUnexpectedReserved);
return nullptr;
} else if (IsEvalOrArguments(local_name)) {
@@ -1562,7 +1589,7 @@ Statement* Parser::DeclareFunction(const AstRawString* variable_name,
bool was_added;
Declare(declaration, variable_name, kind, mode, kCreatedInitialized, scope(),
&was_added, beg_pos);
- if (info()->coverage_enabled()) {
+ if (info()->flags().coverage_enabled()) {
// Force the function to be allocated when collecting source coverage, so
// that even dead functions get source coverage data.
declaration->var()->set_is_used();
@@ -2378,7 +2405,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// parenthesis before the function means that it will be called
// immediately). bar can be parsed lazily, but we need to parse it in a mode
// that tracks unresolved variables.
- DCHECK_IMPLIES(parse_lazily(), info()->allow_lazy_compile());
+ DCHECK_IMPLIES(parse_lazily(), info()->flags().allow_lazy_compile());
DCHECK_IMPLIES(parse_lazily(), has_error() || allow_lazy_);
DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
@@ -2477,7 +2504,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
? (is_top_level ? "preparse-no-resolution" : "preparse-resolution")
: "full-parse";
logger_->FunctionEvent(
- event_name, script_id(), ms, scope->start_position(),
+ event_name, flags().script_id(), ms, scope->start_position(),
scope->end_position(),
reinterpret_cast<const char*>(function_name->raw_data()),
function_name->byte_length());
@@ -2584,7 +2611,7 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
PreParser::PreParseResult result = reusable_preparser()->PreParseFunction(
function_name, kind, function_syntax_kind, function_scope, use_counts_,
- produced_preparse_data, this->script_id());
+ produced_preparse_data);
if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
@@ -2733,6 +2760,7 @@ void Parser::ParseFunction(
bool* has_duplicate_parameters, int* expected_property_count,
int* suspend_count,
ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
+ FunctionParsingScope function_parsing_scope(this);
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
FunctionState function_state(&function_state_, &scope_, function_scope);
@@ -2880,7 +2908,7 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
ClassLiteralProperty::Kind kind,
bool is_static, ClassInfo* class_info) {
DCHECK_IMPLIES(kind != ClassLiteralProperty::Kind::FIELD,
- allow_harmony_private_methods());
+ flags().allow_harmony_private_methods());
if (kind == ClassLiteralProperty::Kind::FIELD) {
if (is_static) {
@@ -3085,11 +3113,11 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
total_preparse_skipped_);
}
-void Parser::ParseOnBackground(ParseInfo* info) {
+void Parser::ParseOnBackground(ParseInfo* info, int start_position,
+ int end_position, int function_literal_id) {
RuntimeCallTimerScope runtimeTimer(
runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
parsing_on_main_thread_ = false;
- set_script_id(info->script_id());
DCHECK_NULL(info->literal());
FunctionLiteral* result = nullptr;
@@ -3104,19 +3132,19 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// don't). We work around this by storing all the scopes which need their end
// position set at the end of the script (the top scope and possible eval
// scopes) and set their end position after we know the script length.
- if (info->is_toplevel()) {
+ if (flags().is_toplevel()) {
+ DCHECK_EQ(start_position, 0);
+ DCHECK_EQ(end_position, 0);
+ DCHECK_EQ(function_literal_id, kFunctionLiteralIdTopLevel);
result = DoParseProgram(/* isolate = */ nullptr, info);
} else {
- result =
- DoParseFunction(/* isolate = */ nullptr, info, info->function_name());
+ result = DoParseFunction(/* isolate = */ nullptr, info, start_position,
+ end_position, function_literal_id,
+ info->function_name());
}
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
-
- info->set_literal(result);
-
- // We cannot internalize on a background thread; a foreground task will take
- // care of calling AstValueFactory::Internalize just before compilation.
+ PostProcessParseResult(/* isolate = */ nullptr, info, result);
}
Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index fd24ffb3e8..472c9a71ab 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -14,6 +14,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/threaded-list.h"
#include "src/common/globals.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparser.h"
@@ -134,7 +135,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
static bool IsPreParser() { return false; }
- void ParseOnBackground(ParseInfo* info);
+ // Sets the literal on |info| if parsing succeeded.
+ void ParseOnBackground(ParseInfo* info, int start_position, int end_position,
+ int function_literal_id);
// Initializes an empty scope chain for top-level scripts, or scopes which
// consist of only the native context.
@@ -207,14 +210,20 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void PrepareGeneratorVariables();
- // Returns nullptr if parsing failed.
- FunctionLiteral* ParseProgram(Isolate* isolate, Handle<Script> script,
- ParseInfo* info,
- MaybeHandle<ScopeInfo> maybe_outer_scope_info);
+ // Sets the literal on |info| if parsing succeeded.
+ void ParseProgram(Isolate* isolate, Handle<Script> script, ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info);
+
+ // Sets the literal on |info| if parsing succeeded.
+ void ParseFunction(Isolate* isolate, ParseInfo* info,
+ Handle<SharedFunctionInfo> shared_info);
+
+ void PostProcessParseResult(Isolate* isolate, ParseInfo* info,
+ FunctionLiteral* literal);
- FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info,
- Handle<SharedFunctionInfo> shared_info);
FunctionLiteral* DoParseFunction(Isolate* isolate, ParseInfo* info,
+ int start_position, int end_position,
+ int function_literal_id,
const AstRawString* raw_name);
// Called by ParseProgram after setting up the scanner.
@@ -239,15 +248,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
if (reusable_preparser_ == nullptr) {
reusable_preparser_ = new PreParser(
&preparser_zone_, &scanner_, stack_limit_, ast_value_factory(),
- pending_error_handler(), runtime_call_stats_, logger_, -1,
- parsing_module_, parsing_on_main_thread_);
-#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
- SET_ALLOW(natives);
- SET_ALLOW(harmony_dynamic_import);
- SET_ALLOW(harmony_import_meta);
- SET_ALLOW(harmony_private_methods);
- SET_ALLOW(eval_cache);
-#undef SET_ALLOW
+ pending_error_handler(), runtime_call_stats_, logger_, flags(),
+ parsing_on_main_thread_);
+ reusable_preparser_->set_allow_eval_cache(allow_eval_cache());
preparse_data_buffer_.reserve(128);
}
return reusable_preparser_;
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index 8a960cdc29..e126874d7d 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -12,6 +12,7 @@
#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
@@ -19,10 +20,27 @@ namespace v8 {
namespace internal {
namespace parsing {
+namespace {
+
+void MaybeReportErrorsAndStatistics(ParseInfo* info, Handle<Script> script,
+ Isolate* isolate, Parser* parser,
+ ReportErrorsAndStatisticsMode mode) {
+ if (mode == ReportErrorsAndStatisticsMode::kYes) {
+ if (info->literal() == nullptr) {
+ info->pending_error_handler()->PrepareErrors(isolate,
+ info->ast_value_factory());
+ info->pending_error_handler()->ReportErrors(isolate, script);
+ }
+ parser->UpdateStatistics(isolate, script);
+ }
+}
+
+} // namespace
+
bool ParseProgram(ParseInfo* info, Handle<Script> script,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
- DCHECK(info->is_toplevel());
+ DCHECK(info->flags().is_toplevel());
DCHECK_NULL(info->literal());
VMState<PARSER> state(isolate);
@@ -36,27 +54,11 @@ bool ParseProgram(ParseInfo* info, Handle<Script> script,
Parser parser(info);
- FunctionLiteral* result = nullptr;
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
-
- result = parser.ParseProgram(isolate, script, info, maybe_outer_scope_info);
- info->set_literal(result);
- if (result) {
- info->set_language_mode(info->literal()->language_mode());
- if (info->is_eval()) {
- info->set_allow_eval_cache(parser.allow_eval_cache());
- }
- }
-
- if (mode == ReportErrorsAndStatisticsMode::kYes) {
- if (result == nullptr) {
- info->pending_error_handler()->ReportErrors(isolate, script,
- info->ast_value_factory());
- }
- parser.UpdateStatistics(isolate, script);
- }
- return (result != nullptr);
+ parser.ParseProgram(isolate, script, info, maybe_outer_scope_info);
+ MaybeReportErrorsAndStatistics(info, script, isolate, &parser, mode);
+ return info->literal() != nullptr;
}
bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
@@ -66,10 +68,12 @@ bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
- DCHECK(!info->is_toplevel());
+ DCHECK(!info->flags().is_toplevel());
DCHECK(!shared_info.is_null());
DCHECK_NULL(info->literal());
+ VMState<PARSER> state(isolate);
+
// Create a character stream for the parser.
Handle<Script> script(Script::cast(shared_info->script()), isolate);
Handle<String> source(String::cast(script->source()), isolate);
@@ -79,37 +83,19 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
shared_info->EndPosition()));
info->set_character_stream(std::move(stream));
- VMState<PARSER> state(isolate);
-
Parser parser(info);
- FunctionLiteral* result = nullptr;
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
-
- result = parser.ParseFunction(isolate, info, shared_info);
- info->set_literal(result);
- if (result) {
- info->ast_value_factory()->Internalize(isolate);
- if (info->is_eval()) {
- info->set_allow_eval_cache(parser.allow_eval_cache());
- }
- }
-
- if (mode == ReportErrorsAndStatisticsMode::kYes) {
- if (result == nullptr) {
- info->pending_error_handler()->ReportErrors(isolate, script,
- info->ast_value_factory());
- }
- parser.UpdateStatistics(isolate, script);
- }
- return (result != nullptr);
+ parser.ParseFunction(isolate, info, shared_info);
+ MaybeReportErrorsAndStatistics(info, script, isolate, &parser, mode);
+ return info->literal() != nullptr;
}
bool ParseAny(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
DCHECK(!shared_info.is_null());
- if (info->is_toplevel()) {
+ if (info->flags().is_toplevel()) {
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (shared_info->HasOuterScopeInfo()) {
maybe_outer_scope_info =
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index 80d201d13f..f131b7ad8e 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -5,24 +5,68 @@
#include "src/parsing/pending-compilation-error-handler.h"
#include "src/ast/ast-value-factory.h"
+#include "src/base/logging.h"
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
#include "src/execution/messages.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
+void PendingCompilationErrorHandler::MessageDetails::SetString(
+ Handle<String> string, Isolate* isolate) {
+ DCHECK_NE(type_, kMainThreadHandle);
+ DCHECK_NE(type_, kOffThreadTransferHandle);
+ type_ = kMainThreadHandle;
+ arg_handle_ = string;
+}
+
+void PendingCompilationErrorHandler::MessageDetails::SetString(
+ Handle<String> string, OffThreadIsolate* isolate) {
+ DCHECK_NE(type_, kMainThreadHandle);
+ DCHECK_NE(type_, kOffThreadTransferHandle);
+ type_ = kOffThreadTransferHandle;
+ arg_transfer_handle_ = isolate->TransferHandle(string);
+}
+
+template <typename LocalIsolate>
+void PendingCompilationErrorHandler::MessageDetails::Prepare(
+ LocalIsolate* isolate) {
+ switch (type_) {
+ case kAstRawString:
+ return SetString(arg_->string(), isolate);
+ case kNone:
+ case kConstCharString:
+ // We can delay allocation until ArgumentString(isolate).
+ // TODO(leszeks): We don't actually have to transfer this string, since
+ // it's a root.
+ return;
+ case kMainThreadHandle:
+ case kOffThreadTransferHandle:
+ UNREACHABLE();
+ }
+}
+
Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgumentString(
Isolate* isolate) const {
- if (arg_ != nullptr) return arg_->string();
- if (char_arg_ != nullptr) {
- return isolate->factory()
- ->NewStringFromUtf8(CStrVector(char_arg_))
- .ToHandleChecked();
+ switch (type_) {
+ case kMainThreadHandle:
+ return arg_handle_;
+ case kOffThreadTransferHandle:
+ return arg_transfer_handle_.ToHandle();
+ case kNone:
+ return isolate->factory()->undefined_string();
+ case kConstCharString:
+ return isolate->factory()
+ ->NewStringFromUtf8(CStrVector(char_arg_), AllocationType::kOld)
+ .ToHandleChecked();
+ case kAstRawString:
+ UNREACHABLE();
}
- return isolate->factory()->undefined_string();
}
MessageLocation PendingCompilationErrorHandler::MessageDetails::GetLocation(
@@ -37,8 +81,7 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
if (has_pending_error_) return;
has_pending_error_ = true;
- error_details_ =
- MessageDetails(start_position, end_position, message, nullptr, arg);
+ error_details_ = MessageDetails(start_position, end_position, message, arg);
}
void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
@@ -48,8 +91,7 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
if (has_pending_error_) return;
has_pending_error_ = true;
- error_details_ =
- MessageDetails(start_position, end_position, message, arg, nullptr);
+ error_details_ = MessageDetails(start_position, end_position, message, arg);
}
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
@@ -57,11 +99,23 @@ void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
MessageTemplate message,
const char* arg) {
warning_messages_.emplace_front(
- MessageDetails(start_position, end_position, message, nullptr, arg));
+ MessageDetails(start_position, end_position, message, arg));
}
-void PendingCompilationErrorHandler::ReportWarnings(Isolate* isolate,
- Handle<Script> script) {
+template <typename LocalIsolate>
+void PendingCompilationErrorHandler::PrepareWarnings(LocalIsolate* isolate) {
+ DCHECK(!has_pending_error());
+
+ for (MessageDetails& warning : warning_messages_) {
+ warning.Prepare(isolate);
+ }
+}
+template void PendingCompilationErrorHandler::PrepareWarnings(Isolate* isolate);
+template void PendingCompilationErrorHandler::PrepareWarnings(
+ OffThreadIsolate* isolate);
+
+void PendingCompilationErrorHandler::ReportWarnings(
+ Isolate* isolate, Handle<Script> script) const {
DCHECK(!has_pending_error());
for (const MessageDetails& warning : warning_messages_) {
@@ -75,27 +129,33 @@ void PendingCompilationErrorHandler::ReportWarnings(Isolate* isolate,
}
}
-void PendingCompilationErrorHandler::ReportWarnings(OffThreadIsolate* isolate,
- Handle<Script> script) {
- // TODO(leszeks): Do nothing, re-report on the main thread.
- UNREACHABLE();
+template <typename LocalIsolate>
+void PendingCompilationErrorHandler::PrepareErrors(
+ LocalIsolate* isolate, AstValueFactory* ast_value_factory) {
+ if (stack_overflow()) return;
+
+ DCHECK(has_pending_error());
+ // Internalize ast values for throwing the pending error.
+ ast_value_factory->Internalize(isolate);
+ error_details_.Prepare(isolate);
}
+template void PendingCompilationErrorHandler::PrepareErrors(
+ Isolate* isolate, AstValueFactory* ast_value_factory);
+template void PendingCompilationErrorHandler::PrepareErrors(
+ OffThreadIsolate* isolate, AstValueFactory* ast_value_factory);
-void PendingCompilationErrorHandler::ReportErrors(
- Isolate* isolate, Handle<Script> script,
- AstValueFactory* ast_value_factory) {
+void PendingCompilationErrorHandler::ReportErrors(Isolate* isolate,
+ Handle<Script> script) const {
if (stack_overflow()) {
isolate->StackOverflow();
} else {
DCHECK(has_pending_error());
- // Internalize ast values for throwing the pending error.
- ast_value_factory->Internalize(isolate);
ThrowPendingError(isolate, script);
}
}
-void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
- Handle<Script> script) {
+void PendingCompilationErrorHandler::ThrowPendingError(
+ Isolate* isolate, Handle<Script> script) const {
if (!has_pending_error_) return;
MessageLocation location = error_details_.GetLocation(script);
@@ -109,7 +169,8 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
}
Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
- Isolate* isolate) const {
+ Isolate* isolate) {
+ error_details_.Prepare(isolate);
return MessageFormatter::Format(isolate, error_details_.message(),
error_details_.ArgumentString(isolate));
}
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index b854c3849e..4d15ac91ca 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
+#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -47,15 +48,16 @@ class PendingCompilationErrorHandler {
bool has_pending_warnings() const { return !warning_messages_.empty(); }
// Handle errors detected during parsing.
- void ReportErrors(Isolate* isolate, Handle<Script> script,
- AstValueFactory* ast_value_factory);
+ template <typename LocalIsolate>
+ void PrepareErrors(LocalIsolate* isolate, AstValueFactory* ast_value_factory);
+ void ReportErrors(Isolate* isolate, Handle<Script> script) const;
// Handle warnings detected during compilation.
- void ReportWarnings(Isolate* isolate, Handle<Script> script);
- void ReportWarnings(OffThreadIsolate* isolate, Handle<Script> script);
+ template <typename LocalIsolate>
+ void PrepareWarnings(LocalIsolate* isolate);
+ void ReportWarnings(Isolate* isolate, Handle<Script> script) const;
- V8_EXPORT_PRIVATE Handle<String> FormatErrorMessageForTest(
- Isolate* isolate) const;
+ V8_EXPORT_PRIVATE Handle<String> FormatErrorMessageForTest(Isolate* isolate);
void set_unidentifiable_error() {
has_pending_error_ = true;
@@ -77,30 +79,54 @@ class PendingCompilationErrorHandler {
: start_position_(-1),
end_position_(-1),
message_(MessageTemplate::kNone),
- arg_(nullptr),
- char_arg_(nullptr) {}
+ type_(kNone) {}
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const AstRawString* arg,
- const char* char_arg)
+ MessageTemplate message, const AstRawString* arg)
: start_position_(start_position),
end_position_(end_position),
message_(message),
arg_(arg),
- char_arg_(char_arg) {}
+ type_(arg ? kAstRawString : kNone) {}
+ MessageDetails(int start_position, int end_position,
+ MessageTemplate message, const char* char_arg)
+ : start_position_(start_position),
+ end_position_(end_position),
+ message_(message),
+ char_arg_(char_arg),
+ type_(char_arg_ ? kConstCharString : kNone) {}
Handle<String> ArgumentString(Isolate* isolate) const;
MessageLocation GetLocation(Handle<Script> script) const;
MessageTemplate message() const { return message_; }
+ template <typename LocalIsolate>
+ void Prepare(LocalIsolate* isolate);
+
private:
+ enum Type {
+ kNone,
+ kAstRawString,
+ kConstCharString,
+ kMainThreadHandle,
+ kOffThreadTransferHandle
+ };
+
+ void SetString(Handle<String> string, Isolate* isolate);
+ void SetString(Handle<String> string, OffThreadIsolate* isolate);
+
int start_position_;
int end_position_;
MessageTemplate message_;
- const AstRawString* arg_;
- const char* char_arg_;
+ union {
+ const AstRawString* arg_;
+ const char* char_arg_;
+ Handle<String> arg_handle_;
+ OffThreadTransferHandle<String> arg_transfer_handle_;
+ };
+ Type type_;
};
- void ThrowPendingError(Isolate* isolate, Handle<Script> script);
+ void ThrowPendingError(Isolate* isolate, Handle<Script> script) const;
bool has_pending_error_;
bool stack_overflow_;
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
index 11165da5ed..707e76236d 100644
--- a/deps/v8/src/parsing/preparse-data-impl.h
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -158,17 +158,20 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
LanguageMode* language_mode) final;
void RestoreScopeAllocationData(DeclarationScope* scope,
- AstValueFactory* ast_value_factory) final;
+ AstValueFactory* ast_value_factory,
+ Zone* zone) final;
#ifdef DEBUG
bool VerifyDataStart();
#endif
private:
- void RestoreDataForScope(Scope* scope, AstValueFactory* ast_value_factory);
+ void RestoreDataForScope(Scope* scope, AstValueFactory* ast_value_factory,
+ Zone* zone);
void RestoreDataForVariable(Variable* var);
void RestoreDataForInnerScopes(Scope* scope,
- AstValueFactory* ast_value_factory);
+ AstValueFactory* ast_value_factory,
+ Zone* zone);
std::unique_ptr<ByteData> scope_data_;
// When consuming the data, these indexes point to the data we're going to
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 7161861b76..d421cb868c 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -613,7 +613,7 @@ BaseConsumedPreparseData<Data>::GetDataForSkippableFunction(
template <class Data>
void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData(
- DeclarationScope* scope, AstValueFactory* ast_value_factory) {
+ DeclarationScope* scope, AstValueFactory* ast_value_factory, Zone* zone) {
DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
typename ByteData::ReadingScope reading_scope(this);
@@ -628,7 +628,7 @@ void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData(
DCHECK_EQ(end_position_from_data, scope->end_position());
#endif
- RestoreDataForScope(scope, ast_value_factory);
+ RestoreDataForScope(scope, ast_value_factory, zone);
// Check that we consumed all scope data.
DCHECK_EQ(scope_data_->RemainingBytes(), 0);
@@ -636,7 +636,7 @@ void BaseConsumedPreparseData<Data>::RestoreScopeAllocationData(
template <typename Data>
void BaseConsumedPreparseData<Data>::RestoreDataForScope(
- Scope* scope, AstValueFactory* ast_value_factory) {
+ Scope* scope, AstValueFactory* ast_value_factory, Zone* zone) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->is_skipped_function()) {
return;
@@ -670,7 +670,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
if (scope->AsClassScope()->is_anonymous_class()) {
var = scope->AsClassScope()->DeclareClassVariable(
ast_value_factory, nullptr, kNoSourcePosition);
- AstNodeFactory factory(ast_value_factory, ast_value_factory->zone());
+ AstNodeFactory factory(ast_value_factory, zone);
Declaration* declaration =
factory.NewVariableDeclaration(kNoSourcePosition);
scope->declarations()->Add(declaration);
@@ -692,7 +692,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
if (IsSerializableVariableMode(var->mode())) RestoreDataForVariable(var);
}
- RestoreDataForInnerScopes(scope, ast_value_factory);
+ RestoreDataForInnerScopes(scope, ast_value_factory, zone);
}
template <typename Data>
@@ -732,10 +732,10 @@ void BaseConsumedPreparseData<Data>::RestoreDataForVariable(Variable* var) {
template <typename Data>
void BaseConsumedPreparseData<Data>::RestoreDataForInnerScopes(
- Scope* scope, AstValueFactory* ast_value_factory) {
+ Scope* scope, AstValueFactory* ast_value_factory, Zone* zone) {
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
- RestoreDataForScope(inner, ast_value_factory);
+ RestoreDataForScope(inner, ast_value_factory, zone);
}
}
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 409942f8c3..aa31326f9f 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -134,8 +134,6 @@ class V8_EXPORT_PRIVATE PreparseDataBuilder : public ZoneObject,
ByteData()
: byte_data_(nullptr), index_(0), free_quarters_in_last_byte_(0) {}
- ~ByteData() {}
-
void Start(std::vector<uint8_t>* buffer);
void Finalize(Zone* zone);
@@ -297,8 +295,9 @@ class ConsumedPreparseData {
// Restores the information needed for allocating the Scope's (and its
// subscopes') variables.
- virtual void RestoreScopeAllocationData(
- DeclarationScope* scope, AstValueFactory* ast_value_factory) = 0;
+ virtual void RestoreScopeAllocationData(DeclarationScope* scope,
+ AstValueFactory* ast_value_factory,
+ Zone* zone) = 0;
protected:
ConsumedPreparseData() = default;
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 464e8e7b17..f9af109d81 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -78,7 +78,7 @@ PreParser::PreParseResult PreParser::PreParseProgram() {
// ModuleDeclarationInstantiation for Source Text Module Records creates a
// new Module Environment Record whose outer lexical environment record is
// the global scope.
- if (parsing_module_) scope = NewModuleScope(scope);
+ if (flags().is_module()) scope = NewModuleScope(scope);
FunctionState top_scope(&function_state_, &scope_, scope);
original_scope_ = scope_;
@@ -105,11 +105,9 @@ void PreParserFormalParameters::ValidateStrictMode(PreParser* preparser) const {
PreParser::PreParseResult PreParser::PreParseFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionSyntaxKind function_syntax_kind, DeclarationScope* function_scope,
- int* use_counts, ProducedPreparseData** produced_preparse_data,
- int script_id) {
+ int* use_counts, ProducedPreparseData** produced_preparse_data) {
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
use_counts_ = use_counts;
- set_script_id(script_id);
#ifdef DEBUG
function_scope->set_is_being_lazily_parsed(true);
#endif
@@ -268,6 +266,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
int function_token_pos, FunctionSyntaxKind function_syntax_kind,
LanguageMode language_mode,
ZonePtrList<const AstRawString>* arguments_for_wrapped_function) {
+ FunctionParsingScope function_parsing_scope(this);
// Wrapped functions are not parsed in the preparser.
DCHECK_NULL(arguments_for_wrapped_function);
DCHECK_NE(FunctionSyntaxKind::kWrapped, function_syntax_kind);
@@ -358,7 +357,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
name_byte_length = string->byte_length();
}
logger_->FunctionEvent(
- event_name, script_id(), ms, function_scope->start_position(),
+ event_name, flags().script_id(), ms, function_scope->start_position(),
function_scope->end_position(), name, name_byte_length);
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 3c1122ef00..5280e3d226 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -8,6 +8,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/pending-compilation-error-handler.h"
#include "src/parsing/preparser-logger.h"
@@ -921,12 +922,11 @@ class PreParser : public ParserBase<PreParser> {
AstValueFactory* ast_value_factory,
PendingCompilationErrorHandler* pending_error_handler,
RuntimeCallStats* runtime_call_stats, Logger* logger,
- int script_id = -1, bool parsing_module = false,
- bool parsing_on_main_thread = true)
+ UnoptimizedCompileFlags flags, bool parsing_on_main_thread = true)
: ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
ast_value_factory, pending_error_handler,
- runtime_call_stats, logger, script_id,
- parsing_module, parsing_on_main_thread),
+ runtime_call_stats, logger, flags,
+ parsing_on_main_thread),
use_counts_(nullptr),
preparse_data_builder_(nullptr),
preparse_data_builder_buffer_() {
@@ -954,8 +954,7 @@ class PreParser : public ParserBase<PreParser> {
PreParseResult PreParseFunction(
const AstRawString* function_name, FunctionKind kind,
FunctionSyntaxKind function_syntax_kind, DeclarationScope* function_scope,
- int* use_counts, ProducedPreparseData** produced_preparser_scope_data,
- int script_id);
+ int* use_counts, ProducedPreparseData** produced_preparser_scope_data);
PreparseDataBuilder* preparse_data_builder() const {
return preparse_data_builder_;
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 75ec661d2d..942acf13f8 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -17,12 +17,12 @@ namespace internal {
class Processor final : public AstVisitor<Processor> {
public:
Processor(uintptr_t stack_limit, DeclarationScope* closure_scope,
- Variable* result, AstValueFactory* ast_value_factory)
+ Variable* result, AstValueFactory* ast_value_factory, Zone* zone)
: result_(result),
replacement_(nullptr),
- zone_(ast_value_factory->zone()),
+ zone_(zone),
closure_scope_(closure_scope),
- factory_(ast_value_factory, ast_value_factory->zone()),
+ factory_(ast_value_factory, zone),
result_assigned_(false),
is_set_(false),
breakable_(false) {
@@ -31,10 +31,10 @@ class Processor final : public AstVisitor<Processor> {
}
Processor(Parser* parser, DeclarationScope* closure_scope, Variable* result,
- AstValueFactory* ast_value_factory)
+ AstValueFactory* ast_value_factory, Zone* zone)
: result_(result),
replacement_(nullptr),
- zone_(ast_value_factory->zone()),
+ zone_(zone),
closure_scope_(closure_scope),
factory_(ast_value_factory, zone_),
result_assigned_(false),
@@ -392,7 +392,7 @@ base::Optional<VariableProxy*> Rewriter::RewriteBody(
Variable* result = scope->AsDeclarationScope()->NewTemporary(
info->ast_value_factory()->dot_result_string());
Processor processor(info->stack_limit(), scope->AsDeclarationScope(),
- result, info->ast_value_factory());
+ result, info->ast_value_factory(), info->zone());
processor.Process(body);
DCHECK_IMPLIES(scope->is_module_scope(), processor.result_assigned());
@@ -400,7 +400,7 @@ base::Optional<VariableProxy*> Rewriter::RewriteBody(
int pos = kNoSourcePosition;
VariableProxy* result_value =
processor.factory()->NewVariableProxy(result, pos);
- if (!info->is_repl_mode()) {
+ if (!info->flags().is_repl_mode()) {
Statement* result_statement =
processor.factory()->NewReturnStatement(result_value, pos);
body->Add(result_statement, info->zone());
@@ -408,7 +408,10 @@ base::Optional<VariableProxy*> Rewriter::RewriteBody(
return result_value;
}
- if (processor.HasStackOverflow()) return base::nullopt;
+ if (processor.HasStackOverflow()) {
+ info->pending_error_handler()->set_stack_overflow();
+ return base::nullopt;
+ }
}
return nullptr;
}
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index fc5194f263..bd4d0284d8 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -364,14 +364,14 @@ V8_INLINE Token::Value Scanner::ScanSingleToken() {
return Select(token);
case Token::CONDITIONAL:
- // ? ?. ??
+ // ? ?. ?? ??=
Advance();
- if (V8_UNLIKELY(allow_harmony_optional_chaining() && c0_ == '.')) {
+ if (c0_ == '.') {
Advance();
if (!IsDecimalDigit(c0_)) return Token::QUESTION_PERIOD;
PushBack('.');
- } else if (V8_UNLIKELY(allow_harmony_nullish() && c0_ == '?')) {
- return Select(Token::NULLISH);
+ } else if (c0_ == '?') {
+ return Select('=', Token::ASSIGN_NULLISH, Token::NULLISH);
}
return Token::CONDITIONAL;
@@ -471,16 +471,16 @@ V8_INLINE Token::Value Scanner::ScanSingleToken() {
return Token::DIV;
case Token::BIT_AND:
- // & && &=
+ // & && &= &&=
Advance();
- if (c0_ == '&') return Select(Token::AND);
+ if (c0_ == '&') return Select('=', Token::ASSIGN_AND, Token::AND);
if (c0_ == '=') return Select(Token::ASSIGN_BIT_AND);
return Token::BIT_AND;
case Token::BIT_OR:
- // | || |=
+ // | || |= ||=
Advance();
- if (c0_ == '|') return Select(Token::OR);
+ if (c0_ == '|') return Select('=', Token::ASSIGN_OR, Token::OR);
if (c0_ == '=') return Select(Token::ASSIGN_BIT_OR);
return Token::BIT_OR;
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 91e4183d53..52a1bf0724 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -13,6 +13,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/bigint.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/scanner-inl.h"
#include "src/zone/zone.h"
@@ -89,12 +90,10 @@ bool Scanner::BookmarkScope::HasBeenApplied() const {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(Utf16CharacterStream* source, bool is_module)
- : source_(source),
+Scanner::Scanner(Utf16CharacterStream* source, UnoptimizedCompileFlags flags)
+ : flags_(flags),
+ source_(source),
found_html_comment_(false),
- allow_harmony_optional_chaining_(false),
- allow_harmony_nullish_(false),
- is_module_(is_module),
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone) {
DCHECK_NOT_NULL(source);
@@ -190,7 +189,7 @@ Token::Value Scanner::PeekAhead() {
}
Token::Value Scanner::SkipSingleHTMLComment() {
- if (is_module_) {
+ if (flags_.is_module()) {
ReportScannerError(source_pos(), MessageTemplate::kHtmlCommentInModule);
return Token::ILLEGAL;
}
@@ -233,9 +232,9 @@ void Scanner::TryToParseSourceURLComment() {
if (!name.is_one_byte()) return;
Vector<const uint8_t> name_literal = name.one_byte_literal();
LiteralBuffer* value;
- if (name_literal == StaticCharVector("sourceURL")) {
+ if (name_literal == StaticOneByteVector("sourceURL")) {
value = &source_url_;
- } else if (name_literal == StaticCharVector("sourceMappingURL")) {
+ } else if (name_literal == StaticOneByteVector("sourceMappingURL")) {
value = &source_mapping_url_;
} else {
return;
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index bed63c9d4e..830067e1ad 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -15,6 +15,7 @@
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/parsing/literal-buffer.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
#include "src/strings/char-predicates.h"
#include "src/strings/unicode.h"
@@ -269,7 +270,7 @@ class V8_EXPORT_PRIVATE Scanner {
static const int kNoOctalLocation = -1;
static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
- explicit Scanner(Utf16CharacterStream* source, bool is_module);
+ explicit Scanner(Utf16CharacterStream* source, UnoptimizedCompileFlags flags);
void Initialize();
@@ -410,18 +411,6 @@ class V8_EXPORT_PRIVATE Scanner {
bool FoundHtmlComment() const { return found_html_comment_; }
- bool allow_harmony_optional_chaining() const {
- return allow_harmony_optional_chaining_;
- }
-
- void set_allow_harmony_optional_chaining(bool allow) {
- allow_harmony_optional_chaining_ = allow;
- }
-
- bool allow_harmony_nullish() const { return allow_harmony_nullish_; }
-
- void set_allow_harmony_nullish(bool allow) { allow_harmony_nullish_ = allow; }
-
const Utf16CharacterStream* stream() const { return source_; }
private:
@@ -715,6 +704,8 @@ class V8_EXPORT_PRIVATE Scanner {
const TokenDesc& next() const { return *next_; }
const TokenDesc& next_next() const { return *next_next_; }
+ UnoptimizedCompileFlags flags_;
+
TokenDesc* current_; // desc for current token (as returned by Next())
TokenDesc* next_; // desc for next token (one token look-ahead)
TokenDesc* next_next_; // desc for the token after next (after PeakAhead())
@@ -730,12 +721,6 @@ class V8_EXPORT_PRIVATE Scanner {
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
- // Harmony flags to allow ESNext features.
- bool allow_harmony_optional_chaining_;
- bool allow_harmony_nullish_;
-
- const bool is_module_;
-
// Values parsed from magic comments.
LiteralBuffer source_url_;
LiteralBuffer source_mapping_url_;
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 4749945ebd..ef92238de2 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -31,6 +31,9 @@ namespace internal {
/* Binary operators */
/* ADD and SUB are at the end since they are UnaryOp */
#define BINARY_OP_TOKEN_LIST(T, E) \
+ E(T, NULLISH, "??", 3) \
+ E(T, OR, "||", 4) \
+ E(T, AND, "&&", 5) \
E(T, BIT_OR, "|", 6) \
E(T, BIT_XOR, "^", 7) \
E(T, BIT_AND, "&", 8) \
@@ -97,9 +100,6 @@ namespace internal {
/* IsBinaryOp() relies on this block of enum values */ \
/* being contiguous and sorted in the same order! */ \
T(COMMA, ",", 1) \
- T(NULLISH, "??", 3) \
- T(OR, "||", 4) \
- T(AND, "&&", 5) \
\
/* Unary operators, starting at ADD in BINARY_OP_TOKEN_LIST */ \
/* IsUnaryOp() relies on this block of enum values */ \
@@ -297,8 +297,8 @@ class V8_EXPORT_PRIVATE Token {
}
static Value BinaryOpForAssignment(Value op) {
- DCHECK(base::IsInRange(op, ASSIGN_BIT_OR, ASSIGN_SUB));
- Value result = static_cast<Value>(op - ASSIGN_BIT_OR + BIT_OR);
+ DCHECK(base::IsInRange(op, ASSIGN_NULLISH, ASSIGN_SUB));
+ Value result = static_cast<Value>(op - ASSIGN_NULLISH + NULLISH);
DCHECK(IsBinaryOp(result));
return result;
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 4c35159b2e..5f22a3d2fb 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/execution/vm-state-inl.h"
+#include "src/libsampler/sampler.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/profiler/cpu-profiler-inl.h"
@@ -84,7 +85,6 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
code_observer_(code_observer),
- running_(1),
last_code_event_id_(0),
last_processed_code_event_id_(0),
isolate_(isolate) {
@@ -149,7 +149,10 @@ void ProfilerEventsProcessor::AddSample(TickSample sample) {
}
void ProfilerEventsProcessor::StopSynchronously() {
- if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
+ bool expected = true;
+ if (!running_.compare_exchange_strong(expected, false,
+ std::memory_order_relaxed))
+ return;
{
base::MutexGuard guard(&running_mutex_);
running_cond_.NotifyOne();
@@ -224,7 +227,7 @@ SamplingEventsProcessor::ProcessOneSample() {
void SamplingEventsProcessor::Run() {
base::MutexGuard guard(&running_mutex_);
- while (!!base::Relaxed_Load(&running_)) {
+ while (running_.load(std::memory_order_relaxed)) {
base::TimeTicks nextSampleTime =
base::TimeTicks::HighResolutionNow() + period_;
base::TimeTicks now;
@@ -261,7 +264,7 @@ void SamplingEventsProcessor::Run() {
// If true was returned, we got interrupted before the timeout
// elapsed. If this was not due to a change in running state, a
// spurious wakeup occurred (thus we should continue to wait).
- if (!base::Relaxed_Load(&running_)) {
+ if (!running_.load(std::memory_order_relaxed)) {
break;
}
now = base::TimeTicks::HighResolutionNow();
@@ -287,7 +290,7 @@ void SamplingEventsProcessor::SetSamplingInterval(base::TimeDelta period) {
StopSynchronously();
period_ = period;
- base::Relaxed_Store(&running_, 1);
+ running_.store(true, std::memory_order_relaxed);
StartSynchronously();
}
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 093f28aba3..e3ff5bb734 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -5,30 +5,28 @@
#ifndef V8_PROFILER_CPU_PROFILER_H_
#define V8_PROFILER_CPU_PROFILER_H_
+#include <atomic>
#include <memory>
-#include "src/base/atomic-utils.h"
-#include "src/base/atomicops.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
-#include "src/execution/isolate.h"
-#include "src/handles/maybe-handles.h"
-#include "src/libsampler/sampler.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/profiler-listener.h"
#include "src/profiler/tick-sample.h"
-#include "src/utils/allocation.h"
#include "src/utils/locked-queue.h"
namespace v8 {
+namespace sampler {
+class Sampler;
+}
namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
-class CpuProfile;
class CpuProfilesCollection;
+class Isolate;
class ProfileGenerator;
#define CODE_EVENTS_TYPE_LIST(V) \
@@ -110,6 +108,8 @@ class NativeContextMoveEventRecord : public CodeEventRecord {
Address to_address;
};
+// A record type for sending samples from the main thread/signal handler to the
+// profiling thread.
class TickSampleEventRecord {
public:
// The parameterless constructor is used when we dequeue data from
@@ -121,7 +121,8 @@ class TickSampleEventRecord {
TickSample sample;
};
-
+// A record type for sending code events (e.g. create, move, delete) to the
+// profiling thread.
class CodeEventsContainer {
public:
explicit CodeEventsContainer(
@@ -162,7 +163,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
// Thread control.
void Run() override = 0;
void StopSynchronously();
- V8_INLINE bool running() { return !!base::Relaxed_Load(&running_); }
+ bool running() { return running_.load(std::memory_order_relaxed); }
void Enqueue(const CodeEventsContainer& event);
// Puts current stack into the tick sample events buffer.
@@ -189,7 +190,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
ProfileGenerator* generator_;
ProfilerCodeObserver* code_observer_;
- base::Atomic32 running_;
+ std::atomic_bool running_{true};
base::ConditionVariable running_cond_;
base::Mutex running_mutex_;
LockedQueue<CodeEventsContainer> events_buffer_;
@@ -276,6 +277,26 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
ProfilerEventsProcessor* processor_;
};
+// The CpuProfiler is a sampling CPU profiler for JS frames. It corresponds to
+// v8::CpuProfiler at the API level. It spawns an additional thread which is
+// responsible for triggering samples and then symbolizing the samples with
+// function names. To symbolize on a background thread, the profiler copies
+// metadata about generated code off-heap.
+//
+// Sampling is done using posix signals (except on Windows). The profiling
+// thread sends a signal to the main thread, based on a timer. The signal
+// handler can interrupt the main thread between any abitrary instructions.
+// This means we are very careful about reading stack values during the signal
+// handler as we could be in the middle of an operation that is modifying the
+// stack.
+//
+// The story on Windows is similar except we use thread suspend and resume.
+//
+// Samples are passed to the profiling thread via a circular buffer. The
+// profiling thread symbolizes the samples by looking up the code pointers
+// against its own list of code objects. The profiling thread also listens for
+// code creation/move/deletion events (from the GC), to maintain its list of
+// code objects accurately.
class V8_EXPORT_PRIVATE CpuProfiler {
public:
explicit CpuProfiler(Isolate* isolate, CpuProfilingNamingMode = kDebugNaming,
@@ -314,7 +335,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
ProfilerEventsProcessor* processor() const { return processor_.get(); }
Isolate* isolate() const { return isolate_; }
- ProfilerListener* profiler_listener_for_test() {
+ ProfilerListener* profiler_listener_for_test() const {
return profiler_listener_.get();
}
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index c5fcdcd713..f063fd1f47 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -216,14 +216,27 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
{
- CombinedHeapObjectIterator function_heap_iterator(
+ HandleScope handle_scope(isolate());
+ std::vector<Handle<JSTypedArray>> on_heap_typed_arrays;
+ CombinedHeapObjectIterator heap_iterator(
heap(), HeapObjectIterator::kFilterUnreachable);
- for (HeapObject heap_obj = function_heap_iterator.Next();
- !heap_obj.is_null(); heap_obj = function_heap_iterator.Next()) {
+ for (HeapObject heap_obj = heap_iterator.Next(); !heap_obj.is_null();
+ heap_obj = heap_iterator.Next()) {
if (heap_obj.IsFeedbackVector()) {
FeedbackVector::cast(heap_obj).ClearSlots(isolate());
+ } else if (heap_obj.IsJSTypedArray() &&
+ JSTypedArray::cast(heap_obj).is_on_heap()) {
+ // Cannot call typed_array->GetBuffer() here directly because it may
+ // trigger GC. Defer that call by collecting the object in a vector.
+ on_heap_typed_arrays.push_back(
+ handle(JSTypedArray::cast(heap_obj), isolate()));
}
}
+ for (auto& typed_array : on_heap_typed_arrays) {
+ // Convert the on-heap typed array into off-heap typed array, so that
+ // its ArrayBuffer becomes valid and can be returned in the result.
+ typed_array->GetBuffer();
+ }
}
// We should return accurate information about live objects, so we need to
// collect all garbage first.
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 2ae2222419..16e87e43c7 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -1487,7 +1487,11 @@ bool V8HeapExplorer::IterateAndExtractReferences(
// its custom name to a generic builtin.
RootsReferencesExtractor extractor(this);
ReadOnlyRoots(heap_).Iterate(&extractor);
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ heap_->IterateRoots(&extractor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
+ // TODO(ulan): The heap snapshot generator incorrectly considers the weak
+ // string tables as strong retainers. Move IterateWeakRoots after
+ // SetVisitingWeakRoots.
+ heap_->IterateWeakRoots(&extractor, {});
extractor.SetVisitingWeakRoots();
heap_->IterateWeakGlobalHandles(&extractor);
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index b8389f4350..42ff71c2bb 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "src/codegen/source-position.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 9f7ef34d18..e71a0abaea 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -17,7 +17,6 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
-#include "src/codegen/source-position.h"
#include "src/logging/code-events.h"
#include "src/profiler/strings-storage.h"
#include "src/utils/allocation.h"
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index e7d780e084..19a15481fe 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -74,15 +74,13 @@ SamplingHeapProfiler::~SamplingHeapProfiler() {
void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowHeapAllocation no_allocation;
+ // Check if the area is iterable by confirming that it starts with a map.
+ DCHECK((*ObjectSlot(soon_object)).IsMap());
+
HandleScope scope(isolate_);
HeapObject heap_object = HeapObject::FromAddress(soon_object);
Handle<Object> obj(heap_object, isolate_);
- // Mark the new block as FreeSpace to make sure the heap is iterable while we
- // are taking the sample.
- heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size),
- ClearRecordedSlots::kNo);
-
Local<v8::Value> loc = v8::Utils::ToLocal(obj);
AllocationNode* node = AddStack();
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index 26092885a9..afed9ca73b 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -4,6 +4,7 @@
#include "src/profiler/tracing-cpu-profiler.h"
+#include "src/execution/isolate.h"
#include "src/init/v8.h"
#include "src/profiler/cpu-profiler.h"
#include "src/tracing/trace-event.h"
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 03dac337e0..10dad83c28 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -945,25 +945,6 @@ RegExpMacroAssembler::IrregexpImplementation
return kARMImplementation;
}
-void RegExpMacroAssemblerARM::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerARM::PopCurrentPosition() {
Pop(current_input_offset());
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 22628fb760..549636a674 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -67,9 +67,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -127,10 +126,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
static const int kBacktrackConstantPoolSize = 4;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 43a6bdf912..055f5639f5 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -1131,28 +1131,6 @@ RegExpMacroAssembler::IrregexpImplementation
return kARM64Implementation;
}
-void RegExpMacroAssemblerARM64::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- // TODO(pielan): Make sure long strings are caught before this, and not
- // just asserted in debug mode.
- // Be sane! (And ensure that an int32_t can be used to index the string)
- DCHECK(cp_offset < (1<<30));
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerARM64::PopCurrentPosition() {
Pop(current_input_offset());
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 91b5e90bf5..2b5feb1dbd 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -73,9 +73,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -139,10 +138,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// 16 registers.
static const int kNumCachedRegisters = 16;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 7f6bd5e296..501a0aff60 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -981,25 +981,6 @@ RegExpMacroAssembler::IrregexpImplementation
return kIA32Implementation;
}
-void RegExpMacroAssemblerIA32::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerIA32::PopCurrentPosition() {
Pop(edi);
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index f68dd0b1b7..2339ca57e1 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -66,9 +66,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -127,10 +126,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index e3f2ea6292..5f8eb4c6d3 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -958,25 +958,6 @@ RegExpMacroAssembler::IrregexpImplementation
return kMIPSImplementation;
}
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
Pop(current_input_offset());
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 5733bbe046..cafa785180 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -67,9 +67,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -128,10 +127,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index fc3cad8b0e..c443c8da46 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -994,25 +994,6 @@ RegExpMacroAssembler::IrregexpImplementation
return kMIPSImplementation;
}
-void RegExpMacroAssemblerMIPS::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
Pop(current_input_offset());
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index b267297c24..161a01e2fc 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -67,9 +67,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -133,10 +132,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 376103324a..5a6eb31510 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -235,7 +235,7 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
__ cmp(current_input_offset(), r3);
__ bne(&backtrack_non_equal);
__ addi(backtrack_stackpointer(), backtrack_stackpointer(),
- Operand(kPointerSize));
+ Operand(kSystemPointerSize));
__ bind(&backtrack_non_equal);
BranchOrBacktrack(eq, on_equal);
@@ -687,7 +687,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ MultiPush(argument_registers | registers_to_retain);
// Set frame pointer in space for it if this is not a direct call
// from generated code.
- __ addi(frame_pointer(), sp, Operand(8 * kPointerSize));
+ __ addi(frame_pointer(), sp, Operand(8 * kSystemPointerSize));
STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
__ li(r3, Operand::Zero());
@@ -711,7 +711,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ ble(&stack_limit_hit, cr0);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ Cmpli(r3, Operand(num_registers_ * kPointerSize), r0);
+ __ Cmpli(r3, Operand(num_registers_ * kSystemPointerSize), r0);
__ bge(&stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -727,7 +727,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ Add(sp, sp, -num_registers_ * kPointerSize, r0);
+ __ Add(sp, sp, -num_registers_ * kSystemPointerSize, r0);
// Load string end.
__ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
@@ -769,12 +769,13 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
- __ addi(r4, frame_pointer(), Operand(kRegisterZero + kPointerSize));
+ __ addi(r4, frame_pointer(),
+ Operand(kRegisterZero + kSystemPointerSize));
__ mov(r5, Operand(num_saved_registers_));
__ mtctr(r5);
Label init_loop;
__ bind(&init_loop);
- __ StorePU(r3, MemOperand(r4, -kPointerSize));
+ __ StorePU(r3, MemOperand(r4, -kSystemPointerSize));
__ bdnz(&init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
@@ -992,25 +993,6 @@ RegExpMacroAssemblerPPC::Implementation() {
return kPPCImplementation;
}
-void RegExpMacroAssemblerPPC::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerPPC::PopCurrentPosition() {
Pop(current_input_offset());
@@ -1122,15 +1104,17 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// The following stack manipulation logic is similar to
// PrepareCallCFunction. However, we need an extra slot on the
// stack to house the return address parameter.
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
__ mr(scratch, sp);
- __ addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
+ __ addi(sp, sp,
+ Operand(-(stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ ClearRightImm(sp, sp,
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
- __ StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ __ StoreP(scratch,
+ MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
// Make room for stack arguments
stack_space += stack_passed_arguments;
@@ -1138,14 +1122,14 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// Allocate frame with required slots to make ABI work.
__ li(r0, Operand::Zero());
- __ StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
+ __ StorePU(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
// RegExp code frame pointer.
__ mr(r5, frame_pointer());
// Code of self.
__ mov(r4, Operand(masm_->CodeObject()));
// r3 will point to the return address, placed by DirectCEntry.
- __ addi(r3, sp, Operand(kStackFrameExtraParamSlot * kPointerSize));
+ __ addi(r3, sp, Operand(kStackFrameExtraParamSlot * kSystemPointerSize));
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1159,10 +1143,10 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// Restore the stack pointer
stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
- if (frame_alignment > kPointerSize) {
- __ LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ if (frame_alignment > kSystemPointerSize) {
+ __ LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
- __ addi(sp, sp, Operand(stack_space * kPointerSize));
+ __ addi(sp, sp, Operand(stack_space * kSystemPointerSize));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -1203,7 +1187,7 @@ MemOperand RegExpMacroAssemblerPPC::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
+ kRegisterZero - register_index * kSystemPointerSize);
}
@@ -1265,7 +1249,7 @@ void RegExpMacroAssemblerPPC::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerPPC::Push(Register source) {
DCHECK(source != backtrack_stackpointer());
- __ StorePU(source, MemOperand(backtrack_stackpointer(), -kPointerSize));
+ __ StorePU(source, MemOperand(backtrack_stackpointer(), -kSystemPointerSize));
}
@@ -1273,7 +1257,7 @@ void RegExpMacroAssemblerPPC::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
__ LoadP(target, MemOperand(backtrack_stackpointer()));
__ addi(backtrack_stackpointer(), backtrack_stackpointer(),
- Operand(kPointerSize));
+ Operand(kSystemPointerSize));
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 3e64f139a8..598691d988 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -59,9 +59,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -92,26 +91,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
// Register 25..31.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 7 * kPointerSize;
- static const int kCallerFrame = kReturnAddress + kPointerSize;
+ static const int kReturnAddress = kStoredRegisters + 7 * kSystemPointerSize;
+ static const int kCallerFrame = kReturnAddress + kSystemPointerSize;
// Stack parameters placed by caller.
static const int kIsolate =
- kCallerFrame + kStackFrameExtraParamSlot * kPointerSize;
+ kCallerFrame + kStackFrameExtraParamSlot * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kPointerSize;
- static const int kStackHighEnd = kDirectCall - kPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
- static const int kInputEnd = kRegisterOutput - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
+ static const int kDirectCall = kFramePointer - kSystemPointerSize;
+ static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
+ static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
+ static const int kInputStart = kInputEnd - kSystemPointerSize;
+ static const int kStartIndex = kInputStart - kSystemPointerSize;
+ static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
@@ -119,10 +119,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 85073cc99d..fdb9b46861 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -22,61 +22,60 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
// determines the initial buffer size. The buffer is owned by the assembler
// and deallocated upon destruction of the assembler.
RegExpBytecodeGenerator(Isolate* isolate, Zone* zone);
- virtual ~RegExpBytecodeGenerator();
+ ~RegExpBytecodeGenerator() override;
// The byte-code interpreter checks on each push anyway.
- virtual int stack_limit_slack() { return 1; }
- virtual bool CanReadUnaligned() { return false; }
- virtual void Bind(Label* label);
- virtual void AdvanceCurrentPosition(int by); // Signed cp change.
- virtual void PopCurrentPosition();
- virtual void PushCurrentPosition();
- virtual void Backtrack();
- virtual void GoTo(Label* label);
- virtual void PushBacktrack(Label* label);
- virtual bool Succeed();
- virtual void Fail();
- virtual void PopRegister(int register_index);
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void WriteStackPointerToRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckAtStart(int cp_offset, Label* on_at_start);
- virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
- virtual void CheckNotBackReference(int start_reg, bool read_backward,
- Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
- Label* on_no_match);
- virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
- virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
- virtual void IfRegisterEqPos(int register_index, Label* if_eq);
+ int stack_limit_slack() override { return 1; }
+ bool CanReadUnaligned() override { return false; }
+ void Bind(Label* label) override;
+ void AdvanceCurrentPosition(int by) override; // Signed cp change.
+ void PopCurrentPosition() override;
+ void PushCurrentPosition() override;
+ void Backtrack() override;
+ void GoTo(Label* label) override;
+ void PushBacktrack(Label* label) override;
+ bool Succeed() override;
+ void Fail() override;
+ void PopRegister(int register_index) override;
+ void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) override;
+ void AdvanceRegister(int reg, int by) override; // r[reg] += by.
+ void SetCurrentPositionFromEnd(int by) override;
+ void SetRegister(int register_index, int to) override;
+ void WriteCurrentPositionToRegister(int reg, int cp_offset) override;
+ void ClearRegisters(int reg_from, int reg_to) override;
+ void ReadCurrentPositionFromRegister(int reg) override;
+ void WriteStackPointerToRegister(int reg) override;
+ void ReadStackPointerFromRegister(int reg) override;
+ void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+ bool check_bounds, int characters,
+ int eats_at_least) override;
+ void CheckCharacter(unsigned c, Label* on_equal) override;
+ void CheckCharacterAfterAnd(unsigned c, unsigned mask,
+ Label* on_equal) override;
+ void CheckCharacterGT(uc16 limit, Label* on_greater) override;
+ void CheckCharacterLT(uc16 limit, Label* on_less) override;
+ void CheckGreedyLoop(Label* on_tos_equals_current_position) override;
+ void CheckAtStart(int cp_offset, Label* on_at_start) override;
+ void CheckNotAtStart(int cp_offset, Label* on_not_at_start) override;
+ void CheckNotCharacter(unsigned c, Label* on_not_equal) override;
+ void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
+ Label* on_not_equal) override;
+ void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+ Label* on_not_equal) override;
+ void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range) override;
+ void CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range) override;
+ void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) override;
+ void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match) override;
+ void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
+ Label* on_no_match) override;
+ void IfRegisterLT(int register_index, int comparand, Label* if_lt) override;
+ void IfRegisterGE(int register_index, int comparand, Label* if_ge) override;
+ void IfRegisterEqPos(int register_index, Label* if_eq) override;
- virtual IrregexpImplementation Implementation();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
+ IrregexpImplementation Implementation() override;
+ Handle<HeapObject> GetCode(Handle<String> source) override;
private:
void Expand();
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index 40ecee0f91..9496de83e1 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -439,6 +439,8 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
+ static constexpr int kMaxRangesToInline = 32; // Arbitrary.
+ if (ranges->length() > kMaxRangesToInline) result->SetDoNotInline();
return result;
}
} else {
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index a6c7cdbe2f..a04180fd34 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -223,7 +223,7 @@ class RecursionCheck {
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
bool one_byte)
- : next_register_(2 * (capture_count + 1)),
+ : next_register_(JSRegExp::RegistersForCaptureCount(capture_count)),
unicode_lookaround_stack_register_(kNoRegister),
unicode_lookaround_position_register_(kNoRegister),
work_list_(nullptr),
@@ -269,7 +269,7 @@ RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
isolate->IncreaseTotalRegexpCodeGenerated(code);
work_list_ = nullptr;
- return {*code, next_register_};
+ return {code, next_register_};
}
bool Trace::DeferredAction::Mentions(int that) {
@@ -3804,26 +3804,24 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
if (initial_offset == 0) set_bm_info(not_at_start, bm);
}
-// static
RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
- RegExpCompiler* compiler, RegExpNode* on_success, JSRegExp::Flags flags) {
- DCHECK(!compiler->read_backward());
- Zone* zone = compiler->zone();
+ RegExpNode* on_success, JSRegExp::Flags flags) {
+ DCHECK(!read_backward());
ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+ zone(), CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+ zone(), CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
- ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
+ ChoiceNode* optional_step_back = new (zone()) ChoiceNode(2, zone());
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
+ int stack_register = UnicodeLookaroundStackRegister();
+ int position_register = UnicodeLookaroundPositionRegister();
RegExpNode* step_back = TextNode::CreateForCharacterRanges(
- zone, lead_surrogates, true, on_success, flags);
+ zone(), lead_surrogates, true, on_success, flags);
RegExpLookaround::Builder builder(true, step_back, stack_register,
position_register);
RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
- zone, trail_surrogates, false, builder.on_match_success(), flags);
+ zone(), trail_surrogates, false, builder.on_match_success(), flags);
optional_step_back->AddAlternative(
GuardedAlternative(builder.ForMatch(match_trail)));
@@ -3832,5 +3830,49 @@ RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
return optional_step_back;
}
+RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
+ JSRegExp::Flags flags,
+ bool is_one_byte) {
+ // Wrap the body of the regexp in capture #0.
+ RegExpNode* captured_body =
+ RegExpCapture::ToNode(data->tree, 0, this, accept());
+ RegExpNode* node = captured_body;
+ if (!data->tree->IsAnchoredAtStart() && !IsSticky(flags)) {
+ // Add a .*? at the beginning, outside the body capture, unless
+ // this expression is anchored at the beginning or sticky.
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ RegExpNode* loop_node = RegExpQuantifier::ToNode(
+ 0, RegExpTree::kInfinity, false,
+ new (zone()) RegExpCharacterClass('*', default_flags), this,
+ captured_body, data->contains_anchor);
+
+ if (data->contains_anchor) {
+ // Unroll loop once, to take care of the case that might start
+ // at the start of input.
+ ChoiceNode* first_step_node = new (zone()) ChoiceNode(2, zone());
+ first_step_node->AddAlternative(GuardedAlternative(captured_body));
+ first_step_node->AddAlternative(GuardedAlternative(new (zone()) TextNode(
+ new (zone()) RegExpCharacterClass('*', default_flags), false,
+ loop_node)));
+ node = first_step_node;
+ } else {
+ node = loop_node;
+ }
+ }
+ if (is_one_byte) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ // Do it again to propagate the new nodes to places where they were not
+ // put because they had not been calculated yet.
+ if (node != nullptr) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ }
+ } else if (IsUnicode(flags) && (IsGlobal(flags) || IsSticky(flags))) {
+ node = OptionallyStepBackToLeadSurrogate(node, flags);
+ }
+
+ if (node == nullptr) node = new (zone()) EndNode(EndNode::BACKTRACK, zone());
+ return node;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index d083d5d9dd..a35ffcd01a 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -501,7 +501,7 @@ class RegExpCompiler {
struct CompilationResult final {
explicit CompilationResult(RegExpError err) : error(err) {}
- CompilationResult(Object code, int registers)
+ CompilationResult(Handle<Object> code, int registers)
: code(code), num_registers(registers) {}
static CompilationResult RegExpTooBig() {
@@ -511,7 +511,7 @@ class RegExpCompiler {
bool Succeeded() const { return error == RegExpError::kNone; }
const RegExpError error = RegExpError::kNone;
- Object code;
+ Handle<Object> code;
int num_registers = 0;
};
@@ -519,11 +519,19 @@ class RegExpCompiler {
RegExpNode* start, int capture_count,
Handle<String> pattern);
+ // Preprocessing is the final step of node creation before analysis
+ // and assembly. It includes:
+ // - Wrapping the body of the regexp in capture 0.
+ // - Inserting the implicit .* before/after the regexp if necessary.
+ // - If the input is a one-byte string, filtering out nodes that can't match.
+ // - Fixing up regexp matches that start within a surrogate pair.
+ RegExpNode* PreprocessRegExp(RegExpCompileData* data, JSRegExp::Flags flags,
+ bool is_one_byte);
+
// If the regexp matching starts within a surrogate pair, step back to the
// lead surrogate and start matching from there.
- static RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
+ RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpNode* on_success,
+ JSRegExp::Flags flags);
inline void AddWork(RegExpNode* node) {
if (!node->on_work_list() && !node->label()->is_bound()) {
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index d3efa65bf1..0c6d8d5b4b 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -144,6 +144,48 @@ class BacktrackStack {
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
+// Registers used during interpreter execution. These consist of output
+// registers in indices [0, output_register_count[ which will contain matcher
+// results as a {start,end} index tuple for each capture (where the whole match
+// counts as implicit capture 0); and internal registers in indices
+// [output_register_count, total_register_count[.
+class InterpreterRegisters {
+ public:
+ using RegisterT = int;
+
+ InterpreterRegisters(int total_register_count, RegisterT* output_registers,
+ int output_register_count)
+ : registers_(total_register_count),
+ output_registers_(output_registers),
+ output_register_count_(output_register_count) {
+ // TODO(jgruber): Use int32_t consistently for registers. Currently, CSA
+ // uses int32_t while runtime uses int.
+ STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
+ DCHECK_GE(output_register_count, 2); // At least 2 for the match itself.
+ DCHECK_GE(total_register_count, output_register_count);
+ DCHECK_LE(total_register_count, RegExpMacroAssembler::kMaxRegisterCount);
+ DCHECK_NOT_NULL(output_registers);
+
+ // Initialize the output register region to -1 signifying 'no match'.
+ std::memset(registers_.data(), -1,
+ output_register_count * sizeof(RegisterT));
+ }
+
+ const RegisterT& operator[](size_t index) const { return registers_[index]; }
+ RegisterT& operator[](size_t index) { return registers_[index]; }
+
+ void CopyToOutputRegisters() {
+ MemCopy(output_registers_, registers_.data(),
+ output_register_count_ * sizeof(RegisterT));
+ }
+
+ private:
+ static constexpr int kStaticCapacity = 64; // Arbitrary.
+ base::SmallVector<RegisterT, kStaticCapacity> registers_;
+ RegisterT* const output_registers_;
+ const int output_register_count_;
+};
+
IrregexpInterpreter::Result ThrowStackOverflow(Isolate* isolate,
RegExp::CallOrigin call_origin) {
CHECK(call_origin == RegExp::CallOrigin::kFromRuntime);
@@ -305,12 +347,12 @@ bool CheckBitInTable(const uint32_t current_char, const byte* const table) {
#endif // DEBUG
template <typename Char>
-IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
- String subject_string,
- Vector<const Char> subject, int* registers,
- int current, uint32_t current_char,
- RegExp::CallOrigin call_origin,
- const uint32_t backtrack_limit) {
+IrregexpInterpreter::Result RawMatch(
+ Isolate* isolate, ByteArray code_array, String subject_string,
+ Vector<const Char> subject, int* output_registers,
+ int output_register_count, int total_register_count, int current,
+ uint32_t current_char, RegExp::CallOrigin call_origin,
+ const uint32_t backtrack_limit) {
DisallowHeapAllocation no_gc;
#if V8_USE_COMPUTED_GOTO
@@ -364,6 +406,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
const byte* pc = code_array.GetDataStartAddress();
const byte* code_base = pc;
+ InterpreterRegisters registers(total_register_count, output_registers,
+ output_register_count);
BacktrackStack backtrack_stack;
uint32_t backtrack_count = 0;
@@ -471,6 +515,7 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
BYTECODE(SUCCEED) {
isolate->counters()->regexp_backtracks()->AddSample(
static_cast<int>(backtrack_count));
+ registers.CopyToOutputRegisters();
return IrregexpInterpreter::SUCCESS;
}
BYTECODE(ADVANCE_CP) {
@@ -952,24 +997,25 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate, ByteArray code_array,
// static
IrregexpInterpreter::Result IrregexpInterpreter::Match(
- Isolate* isolate, JSRegExp regexp, String subject_string, int* registers,
- int registers_length, int start_position, RegExp::CallOrigin call_origin) {
- if (FLAG_regexp_tier_up) {
- regexp.TierUpTick();
- }
+ Isolate* isolate, JSRegExp regexp, String subject_string,
+ int* output_registers, int output_register_count, int start_position,
+ RegExp::CallOrigin call_origin) {
+ if (FLAG_regexp_tier_up) regexp.TierUpTick();
bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string);
ByteArray code_array = ByteArray::cast(regexp.Bytecode(is_one_byte));
+ int total_register_count = regexp.MaxRegisterCount();
- return MatchInternal(isolate, code_array, subject_string, registers,
- registers_length, start_position, call_origin,
- regexp.BacktrackLimit());
+ return MatchInternal(isolate, code_array, subject_string, output_registers,
+ output_register_count, total_register_count,
+ start_position, call_origin, regexp.BacktrackLimit());
}
IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
Isolate* isolate, ByteArray code_array, String subject_string,
- int* registers, int registers_length, int start_position,
- RegExp::CallOrigin call_origin, uint32_t backtrack_limit) {
+ int* output_registers, int output_register_count, int total_register_count,
+ int start_position, RegExp::CallOrigin call_origin,
+ uint32_t backtrack_limit) {
DCHECK(subject_string.IsFlat());
// Note: Heap allocation *is* allowed in two situations if calling from
@@ -980,27 +1026,23 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
// after interrupts have run.
DisallowHeapAllocation no_gc;
- // Reset registers to -1 (=undefined).
- // This is necessary because registers are only written when a
- // capture group matched.
- // Resetting them ensures that previous matches are cleared.
- memset(registers, -1, sizeof(registers[0]) * registers_length);
-
uc16 previous_char = '\n';
String::FlatContent subject_content = subject_string.GetFlatContent(no_gc);
if (subject_content.IsOneByte()) {
Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(isolate, code_array, subject_string, subject_vector,
- registers, start_position, previous_char, call_origin,
- backtrack_limit);
+ output_registers, output_register_count,
+ total_register_count, start_position, previous_char,
+ call_origin, backtrack_limit);
} else {
DCHECK(subject_content.IsTwoByte());
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(isolate, code_array, subject_string, subject_vector,
- registers, start_position, previous_char, call_origin,
- backtrack_limit);
+ output_registers, output_register_count,
+ total_register_count, start_position, previous_char,
+ call_origin, backtrack_limit);
}
}
@@ -1009,11 +1051,11 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
// This method is called through an external reference from RegExpExecInternal
// builtin.
IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
- Address subject, int32_t start_position, Address, Address, int* registers,
- int32_t registers_length, Address, RegExp::CallOrigin call_origin,
- Isolate* isolate, Address regexp) {
+ Address subject, int32_t start_position, Address, Address,
+ int* output_registers, int32_t output_register_count, Address,
+ RegExp::CallOrigin call_origin, Isolate* isolate, Address regexp) {
DCHECK_NOT_NULL(isolate);
- DCHECK_NOT_NULL(registers);
+ DCHECK_NOT_NULL(output_registers);
DCHECK(call_origin == RegExp::CallOrigin::kFromJs);
DisallowHeapAllocation no_gc;
@@ -1028,38 +1070,18 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
return IrregexpInterpreter::RETRY;
}
- // In generated code, registers are allocated on the stack. The given
- // `registers` argument is only guaranteed to hold enough space for permanent
- // registers (i.e. for captures), and not for temporary registers used only
- // during matcher execution. We match that behavior in the interpreter by
- // using a SmallVector as internal register storage.
- static constexpr int kBaseRegisterArraySize = 64; // Arbitrary.
- const int internal_register_count =
- Smi::ToInt(regexp_obj.DataAt(JSRegExp::kIrregexpMaxRegisterCountIndex));
- base::SmallVector<int, kBaseRegisterArraySize> internal_registers(
- internal_register_count);
-
- Result result =
- Match(isolate, regexp_obj, subject_string, internal_registers.data(),
- internal_register_count, start_position, call_origin);
-
- // Copy capture registers to the output array.
- if (result == IrregexpInterpreter::SUCCESS) {
- CHECK_GE(internal_registers.size(), registers_length);
- MemCopy(registers, internal_registers.data(),
- registers_length * sizeof(registers[0]));
- }
-
- return result;
+ return Match(isolate, regexp_obj, subject_string, output_registers,
+ output_register_count, start_position, call_origin);
}
#endif // !COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER
IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromRuntime(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject_string,
- int* registers, int registers_length, int start_position) {
- return Match(isolate, *regexp, *subject_string, registers, registers_length,
- start_position, RegExp::CallOrigin::kFromRuntime);
+ int* output_registers, int output_register_count, int start_position) {
+ return Match(isolate, *regexp, *subject_string, output_registers,
+ output_register_count, start_position,
+ RegExp::CallOrigin::kFromRuntime);
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index d77b5db896..be96476443 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -23,36 +23,42 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
// In case a StackOverflow occurs, a StackOverflowException is created and
// EXCEPTION is returned.
- static Result MatchForCallFromRuntime(Isolate* isolate,
- Handle<JSRegExp> regexp,
- Handle<String> subject_string,
- int* registers, int registers_length,
- int start_position);
+ static Result MatchForCallFromRuntime(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject_string,
+ int* output_registers, int output_register_count, int start_position);
// In case a StackOverflow occurs, EXCEPTION is returned. The caller is
// responsible for creating the exception.
+ //
// RETRY is returned if a retry through the runtime is needed (e.g. when
// interrupts have been scheduled or the regexp is marked for tier-up).
+ //
// Arguments input_start, input_end and backtrack_stack are
// unused. They are only passed to match the signature of the native irregex
// code.
+ //
+ // Arguments output_registers and output_register_count describe the results
+ // array, which will contain register values of all captures if SUCCESS is
+ // returned. For all other return codes, the results array remains unmodified.
static Result MatchForCallFromJs(Address subject, int32_t start_position,
Address input_start, Address input_end,
- int* registers, int32_t registers_length,
+ int* output_registers,
+ int32_t output_register_count,
Address backtrack_stack,
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
static Result MatchInternal(Isolate* isolate, ByteArray code_array,
- String subject_string, int* registers,
- int registers_length, int start_position,
+ String subject_string, int* output_registers,
+ int output_register_count,
+ int total_register_count, int start_position,
RegExp::CallOrigin call_origin,
uint32_t backtrack_limit);
private:
static Result Match(Isolate* isolate, JSRegExp regexp, String subject_string,
- int* registers, int registers_length, int start_position,
- RegExp::CallOrigin call_origin);
+ int* output_registers, int output_register_count,
+ int start_position, RegExp::CallOrigin call_origin);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 3ac1bb7f57..6cc9cae6e1 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -111,6 +111,24 @@ NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Isolate* isolate,
NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() = default;
+void NativeRegExpMacroAssembler::LoadCurrentCharacterImpl(
+ int cp_offset, Label* on_end_of_input, bool check_bounds, int characters,
+ int eats_at_least) {
+ // It's possible to preload a small number of characters when each success
+ // path requires a large number of characters, but not the reverse.
+ DCHECK_GE(eats_at_least, characters);
+
+ DCHECK(base::IsInRange(cp_offset, kMinCPOffset, kMaxCPOffset));
+ if (check_bounds) {
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
return FLAG_enable_regexp_unaligned_accesses && !slow_safe();
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index e83446cdc9..289c2a979e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -28,13 +28,14 @@ struct DisjunctDecisionRow {
class RegExpMacroAssembler {
public:
// The implementation must be able to handle at least:
- static const int kMaxRegister = (1 << 16) - 1;
- static const int kMaxCPOffset = (1 << 15) - 1;
- static const int kMinCPOffset = -(1 << 15);
+ static constexpr int kMaxRegisterCount = (1 << 16);
+ static constexpr int kMaxRegister = kMaxRegisterCount - 1;
+ static constexpr int kMaxCPOffset = (1 << 15) - 1;
+ static constexpr int kMinCPOffset = -(1 << 15);
- static const int kTableSizeBits = 7;
- static const int kTableSize = 1 << kTableSizeBits;
- static const int kTableMask = kTableSize - 1;
+ static constexpr int kTableSizeBits = 7;
+ static constexpr int kTableSize = 1 << kTableSizeBits;
+ static constexpr int kTableMask = kTableSize - 1;
static constexpr int kUseCharactersValue = -1;
@@ -272,6 +273,13 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
const byte* input_end, int* output,
int output_size, Isolate* isolate,
JSRegExp regexp);
+ void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
+ bool check_bounds, int characters,
+ int eats_at_least) override;
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count) = 0;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
index d618c9bb27..7863dbaa02 100644
--- a/deps/v8/src/regexp/regexp-nodes.h
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -237,6 +237,15 @@ class RegExpNode : public ZoneObject {
eats_at_least_ = eats_at_least;
}
+ // TODO(v8:10441): This is a hacky way to avoid exponential code size growth
+ // for very large choice nodes that can be generated by unicode property
+ // escapes. In order to avoid inlining (i.e. trace recursion), we pretend to
+ // have generated the maximum count of code copies already.
+ // We should instead fix this properly, e.g. by using the code size budget
+ // (flush_budget) or by generating property escape matches as calls to a C
+ // function.
+ void SetDoNotInline() { trace_count_ = kMaxCopiesCodeGenerated; }
+
BoyerMooreLookahead* bm_info(bool not_at_start) {
return bm_info_[not_at_start ? 1 : 0];
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 73c2015dd9..556edbdac8 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -185,10 +185,7 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// property. Similar spots in CSA would use BranchIfFastRegExp_Strict in this
// case.
- if (!Protectors::IsRegExpSpeciesLookupChainProtectorIntact(
- recv.GetCreationContext())) {
- return false;
- }
+ if (!Protectors::IsRegExpSpeciesLookupChainIntact(isolate)) return false;
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 4319990a39..7b8da4d8ea 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -34,8 +34,7 @@ class RegExpImpl final : public AllStatic {
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> pattern, JSRegExp::Flags flags,
- int capture_register_count,
- uint32_t backtrack_limit);
+ int capture_count, uint32_t backtrack_limit);
static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> pattern, JSRegExp::Flags flags,
@@ -86,7 +85,6 @@ class RegExpImpl final : public AllStatic {
static void SetIrregexpCaptureNameMap(FixedArray re,
Handle<FixedArray> value);
static int IrregexpNumberOfCaptures(FixedArray re);
- static int IrregexpNumberOfRegisters(FixedArray re);
static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
};
@@ -422,7 +420,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<FixedArray> data =
Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
if (compile_data.compilation_target == RegExpCompilationTarget::kNative) {
- data->set(JSRegExp::code_index(is_one_byte), compile_data.code);
+ data->set(JSRegExp::code_index(is_one_byte), *compile_data.code);
// Reset bytecode to uninitialized. In case we use tier-up we know that
// tier-up has happened this way.
data->set(JSRegExp::bytecode_index(is_one_byte),
@@ -432,7 +430,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
RegExpCompilationTarget::kBytecode);
// Store code generated by compiler in bytecode and trampoline to
// interpreter in code.
- data->set(JSRegExp::bytecode_index(is_one_byte), compile_data.code);
+ data->set(JSRegExp::bytecode_index(is_one_byte), *compile_data.code);
Handle<Code> trampoline =
BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
data->set(JSRegExp::code_index(is_one_byte), *trampoline);
@@ -456,7 +454,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
}
int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
- return Smi::cast(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex)).value();
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
}
void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
@@ -476,10 +474,6 @@ int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
return Smi::ToInt(re.get(JSRegExp::kIrregexpCaptureCountIndex));
}
-int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
- return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
-}
-
ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
return ByteArray::cast(re.get(JSRegExp::bytecode_index(is_one_byte)));
}
@@ -509,35 +503,23 @@ int RegExp::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
return -1;
}
- DisallowHeapAllocation no_gc;
- FixedArray data = FixedArray::cast(regexp->data());
- if (regexp->ShouldProduceBytecode()) {
- // Byte-code regexp needs space allocated for all its registers.
- // The result captures are copied to the start of the registers array
- // if the match succeeds. This way those registers are not clobbered
- // when we set the last match info from last successful match.
- return RegExpImpl::IrregexpNumberOfRegisters(data) +
- (RegExpImpl::IrregexpNumberOfCaptures(data) + 1) * 2;
- } else {
- // Native regexp only needs room to output captures. Registers are handled
- // internally.
- return (RegExpImpl::IrregexpNumberOfCaptures(data) + 1) * 2;
- }
+ // Only reserve room for output captures. Internal registers are allocated by
+ // the engine.
+ return JSRegExp::RegistersForCaptureCount(regexp->CaptureCount());
}
int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
int32_t* output, int output_size) {
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
-
DCHECK_LE(0, index);
DCHECK_LE(index, subject->length());
DCHECK(subject->IsFlat());
+ DCHECK_GE(output_size,
+ JSRegExp::RegistersForCaptureCount(regexp->CaptureCount()));
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
if (!regexp->ShouldProduceBytecode()) {
- DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
// The stack is used to allocate registers for the compiled regexp code.
@@ -568,27 +550,16 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
UNREACHABLE();
} else {
DCHECK(regexp->ShouldProduceBytecode());
- DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
- // We must have done EnsureCompiledIrregexp, so we can get the number of
- // registers.
- int number_of_capture_registers =
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
- int32_t* raw_output = &output[number_of_capture_registers];
do {
IrregexpInterpreter::Result result =
IrregexpInterpreter::MatchForCallFromRuntime(
- isolate, regexp, subject, raw_output, number_of_capture_registers,
- index);
+ isolate, regexp, subject, output, output_size, index);
DCHECK_IMPLIES(result == IrregexpInterpreter::EXCEPTION,
isolate->has_pending_exception());
switch (result) {
case IrregexpInterpreter::SUCCESS:
- // Copy capture results to the start of the registers array.
- MemCopy(output, raw_output,
- number_of_capture_registers * sizeof(int32_t));
- return result;
case IrregexpInterpreter::EXCEPTION:
case IrregexpInterpreter::FAILURE:
return result;
@@ -596,9 +567,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
// The string has changed representation, and we must restart the
// match.
// We need to reset the tier up to start over with compilation.
- if (FLAG_regexp_tier_up) {
- regexp->ResetLastTierUpTick();
- }
+ if (FLAG_regexp_tier_up) regexp->ResetLastTierUpTick();
is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
break;
@@ -659,8 +628,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
output_registers, required_registers);
if (res == RegExp::RE_SUCCESS) {
- int capture_count =
- IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
+ int capture_count = regexp->CaptureCount();
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
}
@@ -692,7 +660,8 @@ Handle<RegExpMatchInfo> RegExp::SetLastMatchInfo(
}
}
- int capture_register_count = (capture_count + 1) * 2;
+ int capture_register_count =
+ JSRegExp::RegistersForCaptureCount(capture_count);
DisallowHeapAllocation no_allocation;
if (match != nullptr) {
for (int i = 0; i < capture_register_count; i += 2) {
@@ -746,14 +715,12 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
JSRegExp::Flags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t backtrack_limit) {
- if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
+ if (JSRegExp::RegistersForCaptureCount(data->capture_count) >
+ RegExpMacroAssembler::kMaxRegisterCount) {
data->error = RegExpError::kTooLarge;
return false;
}
- bool is_sticky = IsSticky(flags);
- bool is_global = IsGlobal(flags);
- bool is_unicode = IsUnicode(flags);
RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
if (compiler.optimize()) {
@@ -772,50 +739,8 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
}
- // Wrap the body of the regexp in capture #0.
- RegExpNode* captured_body =
- RegExpCapture::ToNode(data->tree, 0, &compiler, compiler.accept());
- RegExpNode* node = captured_body;
- bool is_end_anchored = data->tree->IsAnchoredAtEnd();
- bool is_start_anchored = data->tree->IsAnchoredAtStart();
- int max_length = data->tree->max_match();
- if (!is_start_anchored && !is_sticky) {
- // Add a .*? at the beginning, outside the body capture, unless
- // this expression is anchored at the beginning or sticky.
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- RegExpNode* loop_node = RegExpQuantifier::ToNode(
- 0, RegExpTree::kInfinity, false,
- new (zone) RegExpCharacterClass('*', default_flags), &compiler,
- captured_body, data->contains_anchor);
-
- if (data->contains_anchor) {
- // Unroll loop once, to take care of the case that might start
- // at the start of input.
- ChoiceNode* first_step_node = new (zone) ChoiceNode(2, zone);
- first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
- new (zone) RegExpCharacterClass('*', default_flags), false,
- loop_node)));
- node = first_step_node;
- } else {
- node = loop_node;
- }
- }
- if (is_one_byte) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
- // Do it again to propagate the new nodes to places where they were not
- // put because they had not been calculated yet.
- if (node != nullptr) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
- }
- } else if (is_unicode && (is_global || is_sticky)) {
- node = RegExpCompiler::OptionallyStepBackToLeadSurrogate(&compiler, node,
- flags);
- }
-
- if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
- data->node = node;
- data->error = AnalyzeRegExp(isolate, is_one_byte, node);
+ data->node = compiler.PreprocessRegExp(data, flags, is_one_byte);
+ data->error = AnalyzeRegExp(isolate, is_one_byte, data->node);
if (data->error != RegExpError::kNone) {
return false;
}
@@ -830,30 +755,32 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
is_one_byte ? NativeRegExpMacroAssembler::LATIN1
: NativeRegExpMacroAssembler::UC16;
+ const int output_register_count =
+ JSRegExp::RegistersForCaptureCount(data->capture_count);
#if V8_TARGET_ARCH_IA32
- macro_assembler.reset(new RegExpMacroAssemblerIA32(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerIA32(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_X64
- macro_assembler.reset(new RegExpMacroAssemblerX64(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerX64(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_ARM
- macro_assembler.reset(new RegExpMacroAssemblerARM(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerARM(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_ARM64
- macro_assembler.reset(new RegExpMacroAssemblerARM64(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerARM64(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_S390
- macro_assembler.reset(new RegExpMacroAssemblerS390(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerS390(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
- macro_assembler.reset(new RegExpMacroAssemblerPPC(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerPPC(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_MIPS
- macro_assembler.reset(new RegExpMacroAssemblerMIPS(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode,
+ output_register_count));
#elif V8_TARGET_ARCH_MIPS64
- macro_assembler.reset(new RegExpMacroAssemblerMIPS(
- isolate, zone, mode, (data->capture_count + 1) * 2));
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode,
+ output_register_count));
#else
#error "Unsupported architecture"
#endif
@@ -868,17 +795,20 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
+ bool is_end_anchored = data->tree->IsAnchoredAtEnd();
+ bool is_start_anchored = data->tree->IsAnchoredAtStart();
+ int max_length = data->tree->max_match();
static const int kMaxBacksearchLimit = 1024;
- if (is_end_anchored && !is_start_anchored && !is_sticky &&
+ if (is_end_anchored && !is_start_anchored && !IsSticky(flags) &&
max_length < kMaxBacksearchLimit) {
macro_assembler->SetCurrentPositionFromEnd(max_length);
}
- if (is_global) {
+ if (IsGlobal(flags)) {
RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
if (data->tree->min_match() > 0) {
mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
- } else if (is_unicode) {
+ } else if (IsUnicode(flags)) {
mode = RegExpMacroAssembler::GLOBAL_UNICODE;
}
macro_assembler->set_global_mode(mode);
@@ -895,7 +825,7 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#endif
RegExpCompiler::CompilationResult result = compiler.Assemble(
- isolate, macro_assembler_ptr, node, data->capture_count, pattern);
+ isolate, macro_assembler_ptr, data->node, data->capture_count, pattern);
// Code / bytecode printing.
{
@@ -904,14 +834,14 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
data->compilation_target == RegExpCompilationTarget::kNative) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
OFStream os(trace_scope.file());
- Handle<Code> c(Code::cast(result.code), isolate);
+ Handle<Code> c = Handle<Code>::cast(result.code);
auto pattern_cstring = pattern->ToCString();
c->Disassemble(pattern_cstring.get(), os, isolate);
}
#endif
if (FLAG_print_regexp_bytecode &&
data->compilation_target == RegExpCompilationTarget::kBytecode) {
- Handle<ByteArray> bytecode(ByteArray::cast(result.code), isolate);
+ Handle<ByteArray> bytecode = Handle<ByteArray>::cast(result.code);
auto pattern_cstring = pattern->ToCString();
RegExpBytecodeDisassemble(bytecode->GetDataStartAddress(),
bytecode->length(), pattern_cstring.get());
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 27ccbb47ba..3a5d9e2962 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -27,7 +27,7 @@ struct RegExpCompileData {
// Either the generated code as produced by the compiler or a trampoline
// to the interpreter.
- Object code;
+ Handle<Object> code;
// True, iff the pattern is a 'simple' atom with zero captures. In other
// words, the pattern consists of a string with no metacharacters and special
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index be4b85df4f..2109b45314 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -983,26 +983,6 @@ RegExpMacroAssemblerS390::Implementation() {
return kS390Implementation;
}
-void RegExpMacroAssemblerS390::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
void RegExpMacroAssemblerS390::PopCurrentPosition() {
Pop(current_input_offset());
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index eced564d7f..9ced67fe27 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -59,9 +59,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least);
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
@@ -121,10 +119,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 5edbf5e579..cf8eb6604c 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -1044,25 +1044,6 @@ RegExpMacroAssembler::IrregexpImplementation
return kX64Implementation;
}
-void RegExpMacroAssemblerX64::LoadCurrentCharacterImpl(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters,
- int eats_at_least) {
- // It's possible to preload a small number of characters when each success
- // path requires a large number of characters, but not the reverse.
- DCHECK_GE(eats_at_least, characters);
-
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + eats_at_least - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
void RegExpMacroAssemblerX64::PopCurrentPosition() {
Pop(rdi);
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 64614e228a..551e9bc6ec 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -59,9 +59,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
void IfRegisterLT(int reg, int comparand, Label* if_lt) override;
void IfRegisterEqPos(int reg, Label* if_eq) override;
IrregexpImplementation Implementation() override;
- void LoadCurrentCharacterImpl(int cp_offset, Label* on_end_of_input,
- bool check_bounds, int characters,
- int eats_at_least) override;
+ void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count) override;
void PopCurrentPosition() override;
void PopRegister(int register_index) override;
void PushBacktrack(Label* label) override;
@@ -159,10 +158,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
// Check whether preemption has been requested.
void CheckPreemption();
diff --git a/deps/v8/src/roots/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index 2a5fabd216..c9dc033aa3 100644
--- a/deps/v8/src/roots/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -62,6 +62,9 @@ bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
: ReadOnlyRoots(Isolate::FromHeap(heap)) {}
+ReadOnlyRoots::ReadOnlyRoots(OffThreadHeap* heap)
+ : ReadOnlyRoots(OffThreadIsolate::FromHeap(heap)) {}
+
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
: read_only_roots_(reinterpret_cast<Address*>(
isolate->roots_table().read_only_roots_begin().address())) {}
@@ -75,23 +78,32 @@ ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
// have the right type, and to avoid the heavy #includes that would be
// required for checked casts.
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- Type ReadOnlyRoots::name() const { \
- DCHECK(CheckType(RootIndex::k##CamelName)); \
- return Type::unchecked_cast(Object(at(RootIndex::k##CamelName))); \
- } \
- Handle<Type> ReadOnlyRoots::name##_handle() const { \
- DCHECK(CheckType(RootIndex::k##CamelName)); \
- return Handle<Type>(&at(RootIndex::k##CamelName)); \
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type ReadOnlyRoots::name() const { \
+ DCHECK(CheckType_##name()); \
+ return unchecked_##name(); \
+ } \
+ Type ReadOnlyRoots::unchecked_##name() const { \
+ return Type::unchecked_cast( \
+ Object(*GetLocation(RootIndex::k##CamelName))); \
+ } \
+ Handle<Type> ReadOnlyRoots::name##_handle() const { \
+ DCHECK(CheckType_##name()); \
+ Address* location = GetLocation(RootIndex::k##CamelName); \
+ return Handle<Type>(location); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-Address& ReadOnlyRoots::at(RootIndex root_index) const {
+Address* ReadOnlyRoots::GetLocation(RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
- return read_only_roots_[index];
+ return &read_only_roots_[index];
+}
+
+Address ReadOnlyRoots::at(RootIndex root_index) const {
+ return *GetLocation(root_index);
}
} // namespace internal
diff --git a/deps/v8/src/roots/roots.cc b/deps/v8/src/roots/roots.cc
index e2ca6e5897..3fdecfe0bf 100644
--- a/deps/v8/src/roots/roots.cc
+++ b/deps/v8/src/roots/roots.cc
@@ -25,23 +25,14 @@ void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
}
#ifdef DEBUG
-
-bool ReadOnlyRoots::CheckType(RootIndex index) const {
- Object root(at(index));
- switch (index) {
-#define CHECKTYPE(Type, name, CamelName) \
- case RootIndex::k##CamelName: \
- return root.Is##Type();
- READ_ONLY_ROOT_LIST(CHECKTYPE)
-#undef CHECKTYPE
-
- default:
- UNREACHABLE();
- return false;
+#define ROOT_TYPE_CHECK(Type, name, CamelName) \
+ bool ReadOnlyRoots::CheckType_##name() const { \
+ return unchecked_##name().Is##Type(); \
}
-}
-#endif // DEBUG
+READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
+#undef ROOT_TYPE_CHECK
+#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index cf84ebf40b..0d6c0f30c6 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -5,6 +5,7 @@
#ifndef V8_ROOTS_ROOTS_H_
#define V8_ROOTS_ROOTS_H_
+#include "src/base/macros.h"
#include "src/builtins/accessors.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
@@ -18,6 +19,7 @@ namespace internal {
// Forward declarations.
enum ElementsKind : uint8_t;
+class OffThreadHeap;
class OffThreadIsolate;
template <typename T>
class Handle;
@@ -214,6 +216,7 @@ class Symbol;
V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
+ V(PropertyCell, regexp_species_protector, RegExpSpeciesProtector) \
V(PropertyCell, string_length_protector, StringLengthProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_detaching_protector, \
@@ -229,7 +232,49 @@ class Symbol;
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
/* Indirection lists for isolate-independent builtins */ \
- V(FixedArray, builtins_constants_table, BuiltinsConstantsTable)
+ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
+ /* Internal SharedFunctionInfos */ \
+ V(SharedFunctionInfo, async_function_await_reject_shared_fun, \
+ AsyncFunctionAwaitRejectSharedFun) \
+ V(SharedFunctionInfo, async_function_await_resolve_shared_fun, \
+ AsyncFunctionAwaitResolveSharedFun) \
+ V(SharedFunctionInfo, async_generator_await_reject_shared_fun, \
+ AsyncGeneratorAwaitRejectSharedFun) \
+ V(SharedFunctionInfo, async_generator_await_resolve_shared_fun, \
+ AsyncGeneratorAwaitResolveSharedFun) \
+ V(SharedFunctionInfo, async_generator_yield_resolve_shared_fun, \
+ AsyncGeneratorYieldResolveSharedFun) \
+ V(SharedFunctionInfo, async_generator_return_resolve_shared_fun, \
+ AsyncGeneratorReturnResolveSharedFun) \
+ V(SharedFunctionInfo, async_generator_return_closed_reject_shared_fun, \
+ AsyncGeneratorReturnClosedRejectSharedFun) \
+ V(SharedFunctionInfo, async_generator_return_closed_resolve_shared_fun, \
+ AsyncGeneratorReturnClosedResolveSharedFun) \
+ V(SharedFunctionInfo, async_iterator_value_unwrap_shared_fun, \
+ AsyncIteratorValueUnwrapSharedFun) \
+ V(SharedFunctionInfo, promise_all_resolve_element_shared_fun, \
+ PromiseAllResolveElementSharedFun) \
+ V(SharedFunctionInfo, promise_all_settled_resolve_element_shared_fun, \
+ PromiseAllSettledResolveElementSharedFun) \
+ V(SharedFunctionInfo, promise_all_settled_reject_element_shared_fun, \
+ PromiseAllSettledRejectElementSharedFun) \
+ V(SharedFunctionInfo, promise_any_reject_element_shared_fun, \
+ PromiseAnyRejectElementSharedFun) \
+ V(SharedFunctionInfo, promise_capability_default_reject_shared_fun, \
+ PromiseCapabilityDefaultRejectSharedFun) \
+ V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun, \
+ PromiseCapabilityDefaultResolveSharedFun) \
+ V(SharedFunctionInfo, promise_catch_finally_shared_fun, \
+ PromiseCatchFinallySharedFun) \
+ V(SharedFunctionInfo, promise_get_capabilities_executor_shared_fun, \
+ PromiseGetCapabilitiesExecutorSharedFun) \
+ V(SharedFunctionInfo, promise_then_finally_shared_fun, \
+ PromiseThenFinallySharedFun) \
+ V(SharedFunctionInfo, promise_thrower_finally_shared_fun, \
+ PromiseThrowerFinallySharedFun) \
+ V(SharedFunctionInfo, promise_value_thunk_finally_shared_fun, \
+ PromiseValueThunkFinallySharedFun) \
+ V(SharedFunctionInfo, proxy_revoke_shared_fun, ProxyRevokeSharedFun)
// These root references can be updated by the mutator.
#define STRONG_MUTABLE_MOVABLE_ROOT_LIST(V) \
@@ -243,7 +288,6 @@ class Symbol;
V(FixedArray, materialized_objects, MaterializedObjects) \
V(WeakArrayList, detached_contexts, DetachedContexts) \
V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
- V(WeakArrayList, retained_maps, RetainedMaps) \
/* Feedback vectors that we need for code coverage or type profile */ \
V(Object, feedback_vectors_for_profiling_tools, \
FeedbackVectorsForProfilingTools) \
@@ -302,15 +346,15 @@ class Symbol;
#define ACCESSOR_INFO_ROOT_LIST(V) \
ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_ROOT_LIST_ADAPTER, V)
-#define READ_ONLY_ROOT_LIST(V) \
- STRONG_READ_ONLY_ROOT_LIST(V) \
- INTERNALIZED_STRING_ROOT_LIST(V) \
- PRIVATE_SYMBOL_ROOT_LIST(V) \
- PUBLIC_SYMBOL_ROOT_LIST(V) \
- WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
- STRUCT_MAPS_LIST(V) \
- TORQUE_INTERNAL_CLASS_MAPS_LIST(V) \
- ALLOCATION_SITE_MAPS_LIST(V) \
+#define READ_ONLY_ROOT_LIST(V) \
+ STRONG_READ_ONLY_ROOT_LIST(V) \
+ INTERNALIZED_STRING_ROOT_LIST(V) \
+ PRIVATE_SYMBOL_ROOT_LIST(V) \
+ PUBLIC_SYMBOL_ROOT_LIST(V) \
+ WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
+ STRUCT_MAPS_LIST(V) \
+ TORQUE_INTERNAL_MAP_ROOT_LIST(V) \
+ ALLOCATION_SITE_MAPS_LIST(V) \
DATA_HANDLER_MAPS_LIST(V)
#define MUTABLE_ROOT_LIST(V) \
@@ -480,29 +524,38 @@ class ReadOnlyRoots {
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
+ V8_INLINE explicit ReadOnlyRoots(OffThreadHeap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
V8_INLINE explicit ReadOnlyRoots(OffThreadIsolate* isolate);
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- V8_INLINE class Type name() const; \
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ V8_INLINE class Type name() const; \
+ V8_INLINE class Type unchecked_##name() const; \
V8_INLINE Handle<Type> name##_handle() const;
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+ // Get the address of a given read-only root index, without type checks.
+ V8_INLINE Address at(RootIndex root_index) const;
+
// Iterate over all the read-only roots. This is not necessary for garbage
// collection and is usually only performed as part of (de)serialization or
// heap verification.
void Iterate(RootVisitor* visitor);
+ private:
#ifdef DEBUG
- V8_EXPORT_PRIVATE bool CheckType(RootIndex index) const;
+#define ROOT_TYPE_CHECK(Type, name, CamelName) \
+ V8_EXPORT_PRIVATE bool CheckType_##name() const;
+
+ READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
+#undef ROOT_TYPE_CHECK
#endif
- private:
V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
- V8_INLINE Address& at(RootIndex root_index) const;
+ V8_INLINE Address* GetLocation(RootIndex root_index) const;
Address* read_only_roots_;
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 93733fe90f..a0e7a2410a 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -123,22 +123,17 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
if (args[3].IsJSArrayBuffer()) {
memory = args.at<JSArrayBuffer>(3);
}
- if (function->shared().HasAsmWasmData()) {
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (shared->HasAsmWasmData()) {
Handle<AsmWasmData> data(shared->asm_wasm_data(), isolate);
MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
isolate, shared, data, stdlib, foreign, memory);
- if (!result.is_null()) {
- return *result.ToHandleChecked();
- }
- }
- // Remove wasm data, mark as broken for asm->wasm, replace function code with
- // UncompiledData, and return a smi 0 to indicate failure.
- if (function->shared().HasAsmWasmData()) {
- SharedFunctionInfo::DiscardCompiled(isolate,
- handle(function->shared(), isolate));
+ if (!result.is_null()) return *result.ToHandleChecked();
+ // Remove wasm data, mark as broken for asm->wasm, replace function code
+ // with UncompiledData, and return a smi 0 to indicate failure.
+ SharedFunctionInfo::DiscardCompiled(isolate, shared);
}
- function->shared().set_is_asm_wasm_broken(true);
+ shared->set_is_asm_wasm_broken(true);
DCHECK(function->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
function->set_code(isolate->builtins()->builtin(Builtins::kCompileLazy));
@@ -176,6 +171,12 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
JavaScriptFrame* top_frame = top_it.frame();
isolate->set_context(Context::cast(top_frame->context()));
+ int count = optimized_code->deoptimization_count();
+ if (type == DeoptimizeKind::kSoft && count < FLAG_reuse_opt_code_count) {
+ optimized_code->increment_deoptimization_count();
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
// Invalidate the underlying optimized code on non-lazy deopts.
if (type != DeoptimizeKind::kLazy) {
Deoptimizer::DeoptimizeFunction(*function, *optimized_code);
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 0e1b8fd8fa..3b8eefcee1 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -27,6 +27,7 @@
#include "src/objects/js-promise-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 4806922a97..bdb2931e20 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -209,11 +209,29 @@ RUNTIME_FUNCTION(Runtime_NewError) {
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_LE(args.length(), 4);
+ DCHECK_GE(args.length(), 1);
CONVERT_INT32_ARG_CHECKED(template_index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
MessageTemplate message_template = MessageTemplateFromInt(template_index);
- return *isolate->factory()->NewTypeError(message_template, arg0);
+
+ Handle<Object> arg0;
+ if (args.length() >= 2) {
+ CHECK(args[1].IsObject());
+ arg0 = args.at<Object>(1);
+ }
+
+ Handle<Object> arg1;
+ if (args.length() >= 3) {
+ CHECK(args[2].IsObject());
+ arg1 = args.at<Object>(2);
+ }
+ Handle<Object> arg2;
+ if (args.length() >= 4) {
+ CHECK(args[3].IsObject());
+ arg2 = args.at<Object>(3);
+ }
+
+ return *isolate->factory()->NewTypeError(message_template, arg0, arg1, arg2);
}
RUNTIME_FUNCTION(Runtime_NewReferenceError) {
@@ -409,11 +427,13 @@ RUNTIME_FUNCTION(Runtime_ThrowIteratorError) {
return isolate->Throw(*ErrorUtils::NewIteratorError(isolate, object));
}
-RUNTIME_FUNCTION(Runtime_ThrowSpreadArgIsNullOrUndefined) {
+RUNTIME_FUNCTION(Runtime_ThrowSpreadArgError) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- return ErrorUtils::ThrowSpreadArgIsNullOrUndefinedError(isolate, object);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+ MessageTemplate message_id = MessageTemplateFromInt(message_id_smi);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+ return ErrorUtils::ThrowSpreadArgError(isolate, message_id, object);
}
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index eb21e0a9a4..ecc9ad534e 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -41,7 +41,7 @@ RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
Handle<SourceTextModule> module(isolate->context().module(), isolate);
- return *isolate->RunHostInitializeImportMetaObjectCallback(module);
+ return *SourceTextModule::GetImportMeta(isolate, module);
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index e496880b71..04b195b31e 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -70,12 +70,12 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
return *isolate->factory()->NewNumber(value);
}
-RUNTIME_FUNCTION(Runtime_NumberToString) {
+RUNTIME_FUNCTION(Runtime_NumberToStringSlow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
- return *isolate->factory()->NumberToString(number);
+ return *isolate->factory()->NumberToString(number, NumberCacheMode::kSetOnly);
}
RUNTIME_FUNCTION(Runtime_MaxSmi) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index b93cdf349b..2dfa9e53be 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -10,6 +10,7 @@
#include "src/execution/messages.h"
#include "src/handles/maybe-handles.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
+#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/hash-table-inl.h"
@@ -916,9 +917,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
}
}
- DataPropertyInLiteralFlags flags =
- static_cast<DataPropertyInLiteralFlag>(flag);
-
+ DataPropertyInLiteralFlags flags(flag);
PropertyAttributes attrs = (flags & DataPropertyInLiteralFlag::kDontEnum)
? PropertyAttributes::DONT_ENUM
: PropertyAttributes::NONE;
@@ -991,14 +990,6 @@ RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
return isolate->heap()->ToBoolean(obj.IsJSReceiver());
}
-RUNTIME_FUNCTION(Runtime_ClassOf) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj.IsJSReceiver()) return ReadOnlyRoots(isolate).null_value();
- return JSReceiver::cast(obj).class_name();
-}
-
RUNTIME_FUNCTION(Runtime_GetFunctionName) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index d1b63a2fc8..4d1c5ea9d2 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -260,5 +260,62 @@ RUNTIME_FUNCTION(Runtime_ResolvePromise) {
return *result;
}
+// A helper function to be called when constructing AggregateError objects. This
+// takes care of the Error-related construction, e.g., stack traces.
+RUNTIME_FUNCTION(Runtime_ConstructAggregateErrorHelper) {
+ DCHECK(FLAG_harmony_promise_any);
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, message, 2);
+
+ DCHECK_EQ(*target, *isolate->aggregate_error_function());
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ ErrorUtils::Construct(isolate, target, new_target, message));
+ return *result;
+}
+
+// A helper function to be called when constructing AggregateError objects. This
+// takes care of the Error-related construction, e.g., stack traces.
+RUNTIME_FUNCTION(Runtime_ConstructInternalAggregateErrorHelper) {
+ DCHECK(FLAG_harmony_promise_any);
+ HandleScope scope(isolate);
+ DCHECK_GE(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, message, 0);
+
+ Handle<Object> arg0;
+ if (args.length() >= 2) {
+ DCHECK(args[1].IsObject());
+ arg0 = args.at<Object>(1);
+ }
+
+ Handle<Object> arg1;
+ if (args.length() >= 3) {
+ DCHECK(args[2].IsObject());
+ arg1 = args.at<Object>(2);
+ }
+
+ Handle<Object> arg2;
+ if (args.length() >= 4) {
+ CHECK(args[3].IsObject());
+ arg2 = args.at<Object>(3);
+ }
+
+ Handle<Object> message_string = MessageFormatter::Format(
+ isolate, MessageTemplate(message->value()), arg0, arg1, arg2);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ ErrorUtils::Construct(isolate, isolate->aggregate_error_function(),
+ isolate->aggregate_error_function(),
+ message_string));
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 1d6cad9465..a1ec296731 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -938,8 +938,8 @@ class MatchInfoBackedMatch : public String::Match {
*capture_name_map_);
if (capture_index == -1) {
- *state = INVALID;
- return name; // Arbitrary string handle.
+ *state = UNMATCHED;
+ return isolate_->factory()->empty_string();
}
DCHECK(1 <= capture_index && capture_index <= CaptureCount());
@@ -1015,15 +1015,6 @@ class VectorBackedMatch : public String::Match {
CaptureState* state) override {
DCHECK(has_named_captures_);
- Maybe<bool> maybe_capture_exists =
- JSReceiver::HasProperty(groups_obj_, name);
- if (maybe_capture_exists.IsNothing()) return MaybeHandle<String>();
-
- if (!maybe_capture_exists.FromJust()) {
- *state = INVALID;
- return name; // Arbitrary string handle.
- }
-
Handle<Object> capture_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, capture_obj,
Object::GetProperty(isolate_, groups_obj_, name),
@@ -1109,7 +1100,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
isolate->heap(), *subject, regexp->data(), &last_match_cache,
RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
if (cached_answer.IsFixedArray()) {
- int capture_registers = (capture_count + 1) * 2;
+ int capture_registers = JSRegExp::RegistersForCaptureCount(capture_count);
int32_t* last_match = NewArray<int32_t>(capture_registers);
for (int i = 0; i < capture_registers; i++) {
last_match[i] = Smi::ToInt(last_match_cache.get(i));
@@ -1234,8 +1225,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
if (subject_length > kMinLengthToCache) {
// Store the last successful match into the array for caching.
- // TODO(yangguo): do not expose last match to JS and simplify caching.
- int capture_registers = (capture_count + 1) * 2;
+ int capture_registers = JSRegExp::RegistersForCaptureCount(capture_count);
Handle<FixedArray> last_match_cache =
isolate->factory()->NewFixedArray(capture_registers);
int32_t* last_match = global_cache.LastSuccessfulMatch();
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 52abeef583..4b1f6f2231 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -487,8 +487,7 @@ class ParameterArguments {
} // namespace
-
-RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
+RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
@@ -501,7 +500,6 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
-
RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -552,37 +550,6 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
return *result;
}
-
-RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
- StackFrameIterator iterator(isolate);
-
- // Stub/interpreter handler frame
- iterator.Advance();
- DCHECK(iterator.frame()->type() == StackFrame::STUB);
-
- // Function frame
- iterator.Advance();
- JavaScriptFrame* function_frame = JavaScriptFrame::cast(iterator.frame());
- DCHECK(function_frame->is_java_script());
- int argc = function_frame->ComputeParametersCount();
- Address fp = function_frame->fp();
- if (function_frame->has_adapted_arguments()) {
- iterator.Advance();
- ArgumentsAdaptorFrame* adaptor_frame =
- ArgumentsAdaptorFrame::cast(iterator.frame());
- argc = adaptor_frame->ComputeParametersCount();
- fp = adaptor_frame->fp();
- }
-
- Address parameters =
- fp + argc * kSystemPointerSize + StandardFrameConstants::kCallerSPOffset;
- ParameterArguments argument_getter(parameters);
- return *NewSloppyArguments(isolate, callee, argument_getter, argc);
-}
-
RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 220a4a473c..db804490f4 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -28,6 +28,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/smi.h"
+#include "src/snapshot/snapshot.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/ostreams.h"
#include "src/wasm/memory-tracing.h"
@@ -111,6 +112,26 @@ bool WasmInstanceOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
return true;
}
+V8_WARN_UNUSED_RESULT Object CrashUnlessFuzzing(Isolate* isolate) {
+ CHECK(FLAG_fuzzing);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+// Assert that the given argument is a number within the Int32 range
+// and convert it to int32_t. If the argument is not an Int32 we crash if not
+// in fuzzing mode.
+#define CONVERT_INT32_ARG_FUZZ_SAFE(name, index) \
+ if (!args[index].IsNumber()) return CrashUnlessFuzzing(isolate); \
+ int32_t name = 0; \
+ if (!args[index].ToInt32(&name)) return CrashUnlessFuzzing(isolate);
+
+// Cast the given object to a boolean and store it in a variable with
+// the given name. If the object is not a boolean we crash if not in
+// fuzzing mode.
+#define CONVERT_BOOLEAN_ARG_FUZZ_SAFE(name, index) \
+ if (!args[index].IsBoolean()) return CrashUnlessFuzzing(isolate); \
+ bool name = args[index].IsTrue(isolate);
+
} // namespace
RUNTIME_FUNCTION(Runtime_ClearMegamorphicStubCache) {
@@ -163,18 +184,13 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- // This function is used by fuzzers to get coverage in compiler.
- // Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- if (!function_object->IsJSFunction()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- // If the function is not optimized, just return.
- if (!function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
-
- Deoptimizer::DeoptimizeFunction(*function);
+ if (function->IsOptimized()) {
+ Deoptimizer::DeoptimizeFunction(*function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -188,12 +204,11 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
// Find the JavaScript function on the top of the stack.
JavaScriptFrameIterator it(isolate);
if (!it.done()) function = handle(it.frame()->function(), isolate);
- if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
+ if (function.is_null()) return CrashUnlessFuzzing(isolate);
- // If the function is not optimized, just return.
- if (!function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
-
- Deoptimizer::DeoptimizeFunction(*function);
+ if (function->IsOptimized()) {
+ Deoptimizer::DeoptimizeFunction(*function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -238,24 +253,19 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
- // This function is used by fuzzers, ignore calls with bogus arguments count.
if (args.length() != 1 && args.length() != 2) {
- return ReadOnlyRoots(isolate).undefined_value();
+ return CrashUnlessFuzzing(isolate);
}
- // This function is used by fuzzers to get coverage for optimizations
- // in compiler. Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- if (!function_object->IsJSFunction()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
// The following conditions were lifted (in part) from the DCHECK inside
// JSFunction::MarkForOptimization().
if (!function->shared().allows_lazy_compilation()) {
- return ReadOnlyRoots(isolate).undefined_value();
+ return CrashUnlessFuzzing(isolate);
}
// If function isn't compiled, compile it now.
@@ -263,19 +273,19 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
if (!is_compiled_scope.is_compiled() &&
!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
- return ReadOnlyRoots(isolate).undefined_value();
+ return CrashUnlessFuzzing(isolate);
}
- if (!FLAG_opt || (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
- BailoutReason::kNeverOptimize)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (!FLAG_opt) return ReadOnlyRoots(isolate).undefined_value();
- if (function->shared().HasAsmWasmData()) {
- return ReadOnlyRoots(isolate).undefined_value();
+ if (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize) {
+ return CrashUnlessFuzzing(isolate);
}
+ if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
+
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
}
@@ -290,11 +300,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (args.length() == 2) {
- // Ignore invalid inputs produced by fuzzers.
CONVERT_ARG_HANDLE_CHECKED(Object, type, 1);
- if (!type->IsString()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (!type->IsString()) return CrashUnlessFuzzing(isolate);
if (Handle<String>::cast(type)->IsOneByteEqualTo(
StaticCharVector("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
@@ -355,9 +362,6 @@ bool EnsureFeedbackVector(Handle<JSFunction> function) {
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- if (!args[0].IsJSFunction()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
EnsureFeedbackVector(function);
return ReadOnlyRoots(isolate).undefined_value();
@@ -366,16 +370,13 @@ RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
HandleScope scope(isolate);
DCHECK(args.length() == 1 || args.length() == 2);
- if (!args[0].IsJSFunction()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (!args[0].IsJSFunction()) return CrashUnlessFuzzing(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
bool allow_heuristic_optimization = false;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
- if (!sync_object->IsString())
- return ReadOnlyRoots(isolate).undefined_value();
+ if (!sync_object->IsString()) return CrashUnlessFuzzing(isolate);
Handle<String> sync = Handle<String>::cast(sync_object);
if (sync->IsOneByteEqualTo(
StaticCharVector("allow heuristic optimization"))) {
@@ -384,7 +385,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
}
if (!EnsureFeedbackVector(function)) {
- return ReadOnlyRoots(isolate).undefined_value();
+ return CrashUnlessFuzzing(isolate);
}
// If optimization is disabled for the function, return without making it
@@ -392,13 +393,10 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
if (function->shared().optimization_disabled() &&
function->shared().disable_optimization_reason() ==
BailoutReason::kNeverOptimize) {
- return ReadOnlyRoots(isolate).undefined_value();
+ return CrashUnlessFuzzing(isolate);
}
- // We don't optimize Asm/Wasm functions.
- if (function->shared().HasAsmWasmData()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
@@ -417,18 +415,24 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
Handle<JSFunction> function;
// The optional parameter determines the frame being targeted.
- int stack_depth = args.length() == 1 ? args.smi_at(0) : 0;
+ int stack_depth = 0;
+ if (args.length() == 1) {
+ if (!args[0].IsSmi()) return CrashUnlessFuzzing(isolate);
+ stack_depth = args.smi_at(0);
+ }
// Find the JavaScript function on the top of the stack.
JavaScriptFrameIterator it(isolate);
while (!it.done() && stack_depth--) it.Advance();
if (!it.done()) function = handle(it.frame()->function(), isolate);
- if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
+ if (function.is_null()) return CrashUnlessFuzzing(isolate);
- if (!FLAG_opt || (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
- BailoutReason::kNeverOptimize)) {
- return ReadOnlyRoots(isolate).undefined_value();
+ if (!FLAG_opt) return ReadOnlyRoots(isolate).undefined_value();
+
+ if (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize) {
+ return CrashUnlessFuzzing(isolate);
}
if (FLAG_testing_d8_test_runner) {
@@ -470,14 +474,15 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- // This function is used by fuzzers to get coverage for optimizations
- // in compiler. Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- if (!function_object->IsJSFunction()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared().DisableOptimization(BailoutReason::kNeverOptimize);
+ SharedFunctionInfo sfi = function->shared();
+ if (sfi.abstract_code().kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ sfi.abstract_code().kind() != AbstractCode::BUILTIN) {
+ return CrashUnlessFuzzing(isolate);
+ }
+ sfi.DisableOptimization(BailoutReason::kNeverOptimize);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -500,23 +505,25 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
status |= static_cast<int>(OptimizationStatus::kMaybeDeopted);
}
- // This function is used by fuzzers to get coverage for optimizations
- // in compiler. Ignore calls on non-function objects to avoid runtime errors.
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
- if (!function_object->IsJSFunction()) {
- return Smi::FromInt(status);
- }
+ if (function_object->IsUndefined()) return Smi::FromInt(status);
+ if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
status |= static_cast<int>(OptimizationStatus::kIsFunction);
bool sync_with_compiler_thread = true;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
- if (!sync_object->IsString())
- return ReadOnlyRoots(isolate).undefined_value();
+ if (!sync_object->IsString()) return CrashUnlessFuzzing(isolate);
Handle<String> sync = Handle<String>::cast(sync_object);
if (sync->IsOneByteEqualTo(StaticCharVector("no sync"))) {
sync_with_compiler_thread = false;
+ } else if (sync->IsOneByteEqualTo(StaticCharVector("sync")) ||
+ sync->length() == 0) {
+ DCHECK(sync_with_compiler_thread);
+ } else {
+ return CrashUnlessFuzzing(isolate);
}
}
@@ -575,29 +582,25 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
DCHECK_EQ(0, args.length());
- if (FLAG_block_concurrent_recompilation &&
- isolate->concurrent_recompilation_enabled()) {
- isolate->optimizing_compile_dispatcher()->Unblock();
- }
+ CHECK(FLAG_block_concurrent_recompilation);
+ CHECK(isolate->concurrent_recompilation_enabled());
+ isolate->optimizing_compile_dispatcher()->Unblock();
return ReadOnlyRoots(isolate).undefined_value();
}
-static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(args.This());
+static void ReturnNull(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().SetNull();
}
RUNTIME_FUNCTION(Runtime_GetUndetectable) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-
Local<v8::ObjectTemplate> desc = v8::ObjectTemplate::New(v8_isolate);
desc->MarkAsUndetectable();
- desc->SetCallAsFunctionHandler(ReturnThis);
- Local<v8::Object> obj;
- if (!desc->NewInstance(v8_isolate->GetCurrentContext()).ToLocal(&obj)) {
- return Object();
- }
+ desc->SetCallAsFunctionHandler(ReturnNull);
+ Local<v8::Object> obj =
+ desc->NewInstance(v8_isolate->GetCurrentContext()).ToLocalChecked();
return *Utils::OpenHandle(*obj);
}
@@ -630,9 +633,6 @@ RUNTIME_FUNCTION(Runtime_GetCallable) {
RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- if (!args[0].IsJSFunction()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->ClearTypeFeedbackInfo();
return ReadOnlyRoots(isolate).undefined_value();
@@ -672,15 +672,15 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2 || args.length() == 3);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- CONVERT_INT32_ARG_CHECKED(timeout, 1);
+ CONVERT_INT32_ARG_FUZZ_SAFE(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
#endif
#ifdef DEBUG
- CONVERT_INT32_ARG_CHECKED(interval, 0);
+ CONVERT_INT32_ARG_FUZZ_SAFE(interval, 0);
FLAG_gc_interval = interval;
if (args.length() == 3) {
// Enable/disable inline allocation if requested.
- CONVERT_BOOLEAN_ARG_CHECKED(inline_allocation, 2);
+ CONVERT_BOOLEAN_ARG_FUZZ_SAFE(inline_allocation, 2);
if (inline_allocation) {
isolate->heap()->EnableInlineAllocation();
} else {
@@ -816,24 +816,19 @@ RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
HandleScope scope(isolate);
DCHECK_LE(1, args.length());
DCHECK_GE(2, args.length());
- if (!FLAG_track_retaining_path) {
- PrintF("DebugTrackRetainingPath requires --track-retaining-path flag.\n");
- } else {
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
- RetainingPathOption option = RetainingPathOption::kDefault;
- if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, str, 1);
- const char track_ephemeron_path[] = "track-ephemeron-path";
- if (str->IsOneByteEqualTo(StaticCharVector(track_ephemeron_path))) {
- option = RetainingPathOption::kTrackEphemeronPath;
- } else if (str->length() != 0) {
- PrintF("Unexpected second argument of DebugTrackRetainingPath.\n");
- PrintF("Expected an empty string or '%s', got '%s'.\n",
- track_ephemeron_path, str->ToCString().get());
- }
+ CHECK(FLAG_track_retaining_path);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (args.length() == 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, str, 1);
+ const char track_ephemeron_path[] = "track-ephemeron-path";
+ if (str->IsOneByteEqualTo(StaticCharVector(track_ephemeron_path))) {
+ option = RetainingPathOption::kTrackEphemeronPath;
+ } else {
+ CHECK_EQ(str->length(), 0);
}
- isolate->heap()->AddRetainingPathTarget(object, option);
}
+ isolate->heap()->AddRetainingPathTarget(object, option);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -919,10 +914,8 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
IsCompiledScope is_compiled_scope;
- if (!func->is_compiled() &&
- !Compiler::Compile(func, Compiler::KEEP_EXCEPTION, &is_compiled_scope)) {
- return ReadOnlyRoots(isolate).exception();
- }
+ CHECK(func->is_compiled() ||
+ Compiler::Compile(func, Compiler::KEEP_EXCEPTION, &is_compiled_scope));
StdoutStream os;
func->code().Print(os);
os << std::endl;
@@ -1001,7 +994,6 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, function, 0);
if (!function.shared().HasAsmWasmData()) {
- // Doesn't have wasm data.
return ReadOnlyRoots(isolate).false_value();
}
if (function.shared().HasBuiltinId() &&
@@ -1080,13 +1072,12 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionId) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 1);
Handle<Object> tag =
WasmExceptionPackage::GetExceptionTag(isolate, exception);
- if (tag->IsWasmExceptionTag()) {
- Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
- for (int index = 0; index < exceptions_table->length(); ++index) {
- if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
- }
+ CHECK(tag->IsWasmExceptionTag());
+ Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate);
+ for (int index = 0; index < exceptions_table->length(); ++index) {
+ if (exceptions_table->get(index) == *tag) return Smi::FromInt(index);
}
- return ReadOnlyRoots(isolate).undefined_value();
+ UNREACHABLE();
}
RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
@@ -1102,7 +1093,6 @@ RUNTIME_FUNCTION(Runtime_GetWasmExceptionValues) {
namespace {
bool EnableWasmThreads(v8::Local<v8::Context> context) { return true; }
-
bool DisableWasmThreads(v8::Local<v8::Context> context) { return false; }
} // namespace
@@ -1203,6 +1193,22 @@ RUNTIME_FUNCTION(Runtime_StringIteratorProtector) {
Protectors::IsStringIteratorLookupChainIntact(isolate));
}
+// For use by tests and fuzzers. It
+//
+// 1. serializes a snapshot of the current isolate,
+// 2. deserializes the snapshot,
+// 3. and runs VerifyHeap on the resulting isolate.
+//
+// The current isolate should not be modified by this call and can keep running
+// once it completes.
+RUNTIME_FUNCTION(Runtime_SerializeDeserializeNow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ Snapshot::SerializeDeserializeAndVerifyForTesting(isolate,
+ isolate->native_context());
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
// Take a compiled wasm module and serialize it into an array buffer, which is
// then returned.
RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
@@ -1226,8 +1232,7 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
return *array_buffer;
}
- // Error. Return undefined.
- return ReadOnlyRoots(isolate).undefined_value();
+ UNREACHABLE();
}
// Take an array buffer and attempt to reconstruct a compiled wasm module.
@@ -1268,7 +1273,7 @@ RUNTIME_FUNCTION(Runtime_CloneWasmModule) {
Handle<WasmModuleObject> new_module_object =
isolate->wasm_engine()->ImportNativeModule(
- isolate, module_object->shared_native_module());
+ isolate, module_object->shared_native_module(), {});
return *new_module_object;
}
@@ -1315,15 +1320,6 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
return Smi::FromInt(instance_count);
}
-RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- if (!instance->has_debug_info()) return Object();
- uint64_t num = instance->debug_info().NumInterpretedCalls();
- return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num));
-}
-
RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
@@ -1340,18 +1336,6 @@ RUNTIME_FUNCTION(Runtime_WasmNumCodeSpaces) {
return *isolate->factory()->NewNumberFromSize(num_spaces);
}
-RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
- DCHECK_EQ(2, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_SMI_ARG_CHECKED(function_index, 1);
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::RedirectToInterpreter(debug_info,
- Vector<int>(&function_index, 1));
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1365,7 +1349,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
StackTraceFrameIterator it(isolate);
DCHECK(!it.done());
DCHECK(it.is_wasm());
- WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
uint8_t* mem_start = reinterpret_cast<uint8_t*>(
frame->wasm_instance().memory_object().array_buffer().backing_store());
@@ -1399,7 +1383,8 @@ RUNTIME_FUNCTION(Runtime_WasmTierDownModule) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
auto* native_module = instance->module_object().native_module();
- native_module->TierDown(isolate);
+ native_module->SetTieringState(wasm::kTieredDown);
+ native_module->TriggerRecompilation();
CHECK(!native_module->compilation_state()->failed());
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1409,7 +1394,8 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpModule) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
auto* native_module = instance->module_object().native_module();
- native_module->TierUp(isolate);
+ native_module->SetTieringState(wasm::kTieredUp);
+ native_module->TriggerRecompilation();
CHECK(!native_module->compilation_state()->failed());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index f65922064f..96c8835700 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -17,6 +17,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-debug.h"
@@ -53,7 +54,7 @@ class FrameFinder {
};
WasmInstanceObject GetWasmInstanceOnStackTop(Isolate* isolate) {
- return FrameFinder<WasmCompiledFrame, StackFrame::EXIT>(isolate)
+ return FrameFinder<WasmFrame, StackFrame::EXIT>(isolate)
.frame()
->wasm_instance();
}
@@ -86,6 +87,8 @@ Object ThrowWasmError(Isolate* isolate, MessageTemplate message) {
} // namespace
RUNTIME_FUNCTION(Runtime_WasmIsValidFuncRefValue) {
+ // This code is called from wrappers, so the "thread is wasm" flag is not set.
+ DCHECK(!trap_handler::IsThreadInWasm());
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, function, 0);
@@ -100,6 +103,7 @@ RUNTIME_FUNCTION(Runtime_WasmIsValidFuncRefValue) {
}
RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
+ ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -107,9 +111,6 @@ RUNTIME_FUNCTION(Runtime_WasmMemoryGrow) {
// which calls this runtime function.
CONVERT_UINT32_ARG_CHECKED(delta_pages, 1);
- // This runtime function is always being called from wasm code.
- ClearThreadInWasmScope flag_scope;
-
int ret = WasmMemoryObject::Grow(
isolate, handle(instance->memory_object(), isolate), delta_pages);
// The WasmMemoryGrow builtin which calls this runtime function expects us to
@@ -125,12 +126,17 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
}
RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
+ ClearThreadInWasmScope clear_wasm_flag;
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
return isolate->StackOverflow();
}
RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
+ // This runtime function is called both from wasm and from e.g. js-to-js
+ // functions. Hence the "thread in wasm" flag can be either set or not. Both
+ // is OK, since throwing will trigger unwinding anyway, which sets the flag
+ // correctly depending on the handler.
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -138,6 +144,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
}
RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
+ ClearThreadInWasmScope clear_wasm_flag;
// TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -163,173 +170,10 @@ RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
return *exception;
}
-RUNTIME_FUNCTION(Runtime_WasmExceptionGetTag) {
- // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- DCHECK(isolate->context().is_null());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
- // TODO(wasm): Manually box because parameters are not visited yet.
- Handle<Object> except_obj(except_obj_raw, isolate);
- if (!except_obj->IsWasmExceptionPackage(isolate)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
- Handle<WasmExceptionPackage> exception =
- Handle<WasmExceptionPackage>::cast(except_obj);
- return *WasmExceptionPackage::GetExceptionTag(isolate, exception);
-}
-
-RUNTIME_FUNCTION(Runtime_WasmExceptionGetValues) {
- // TODO(kschimpf): Can this be replaced with equivalent TurboFan code/calls.
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- DCHECK(isolate->context().is_null());
- isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
- CONVERT_ARG_CHECKED(Object, except_obj_raw, 0);
- // TODO(wasm): Manually box because parameters are not visited yet.
- Handle<Object> except_obj(except_obj_raw, isolate);
- if (!except_obj->IsWasmExceptionPackage(isolate)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
- Handle<WasmExceptionPackage> exception =
- Handle<WasmExceptionPackage>::cast(except_obj);
- return *WasmExceptionPackage::GetExceptionValues(isolate, exception);
-}
-
-RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
- DCHECK_EQ(2, args.length());
- HandleScope scope(isolate);
- CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[0]);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 1);
-
- // The arg buffer is the raw pointer to the caller's stack. It looks like a
- // Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
- // cast it back to the raw pointer.
- CHECK(!arg_buffer_obj->IsHeapObject());
- CHECK(arg_buffer_obj->IsSmi());
- Address arg_buffer = arg_buffer_obj->ptr();
-
- ClearThreadInWasmScope wasm_flag;
-
- // Find the frame pointer and instance of the interpreter frame on the stack.
- Handle<WasmInstanceObject> instance;
- Address frame_pointer = 0;
- {
- FrameFinder<WasmInterpreterEntryFrame, StackFrame::EXIT> frame_finder(
- isolate);
- instance = handle(frame_finder.frame()->wasm_instance(), isolate);
- frame_pointer = frame_finder.frame()->fp();
- }
-
- // Reserve buffers for argument and return values.
- DCHECK_GE(instance->module()->functions.size(), func_index);
- const wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
- DCHECK_GE(kMaxInt, sig->parameter_count());
- int num_params = static_cast<int>(sig->parameter_count());
- ScopedVector<wasm::WasmValue> wasm_args(num_params);
- DCHECK_GE(kMaxInt, sig->return_count());
- int num_returns = static_cast<int>(sig->return_count());
- ScopedVector<wasm::WasmValue> wasm_rets(num_returns);
-
- // Copy the arguments for the {arg_buffer} into a vector of {WasmValue}. This
- // also boxes reference types into handles, which needs to happen before any
- // methods that could trigger a GC are being called.
- Address arg_buf_ptr = arg_buffer;
- for (int i = 0; i < num_params; ++i) {
-#define CASE_ARG_TYPE(type, ctype) \
- case wasm::ValueType::type: \
- DCHECK_EQ(sig->GetParam(i).element_size_bytes(), sizeof(ctype)); \
- wasm_args[i] = \
- wasm::WasmValue(base::ReadUnalignedValue<ctype>(arg_buf_ptr)); \
- arg_buf_ptr += sizeof(ctype); \
- break;
- switch (sig->GetParam(i).kind()) {
- CASE_ARG_TYPE(kI32, uint32_t)
- CASE_ARG_TYPE(kI64, uint64_t)
- CASE_ARG_TYPE(kF32, float)
- CASE_ARG_TYPE(kF64, double)
-#undef CASE_ARG_TYPE
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef: {
- DCHECK_EQ(sig->GetParam(i).element_size_bytes(), kSystemPointerSize);
- Handle<Object> ref(
- Object(base::ReadUnalignedValue<Address>(arg_buf_ptr)), isolate);
- DCHECK_IMPLIES(sig->GetParam(i) == wasm::kWasmNullRef, ref->IsNull());
- wasm_args[i] = wasm::WasmValue(ref);
- arg_buf_ptr += kSystemPointerSize;
- break;
- }
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kBottom:
- UNREACHABLE();
- }
- }
-
- // Set the current isolate's context.
- DCHECK(isolate->context().is_null());
- isolate->set_context(instance->native_context());
-
- // Run the function in the interpreter. Note that neither the {WasmDebugInfo}
- // nor the {InterpreterHandle} have to exist, because interpretation might
- // have been triggered by another Isolate sharing the same WasmEngine.
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- bool success = WasmDebugInfo::RunInterpreter(
- isolate, debug_info, frame_pointer, func_index, wasm_args, wasm_rets);
-
- // Early return on failure.
- if (!success) {
- DCHECK(isolate->has_pending_exception());
- return ReadOnlyRoots(isolate).exception();
- }
-
- // Copy return values from the vector of {WasmValue} into {arg_buffer}. This
- // also un-boxes reference types from handles into raw pointers.
- arg_buf_ptr = arg_buffer;
- for (int i = 0; i < num_returns; ++i) {
-#define CASE_RET_TYPE(type, ctype) \
- case wasm::ValueType::type: \
- DCHECK_EQ(sig->GetReturn(i).element_size_bytes(), sizeof(ctype)); \
- base::WriteUnalignedValue<ctype>(arg_buf_ptr, wasm_rets[i].to<ctype>()); \
- arg_buf_ptr += sizeof(ctype); \
- break;
- switch (sig->GetReturn(i).kind()) {
- CASE_RET_TYPE(kI32, uint32_t)
- CASE_RET_TYPE(kI64, uint64_t)
- CASE_RET_TYPE(kF32, float)
- CASE_RET_TYPE(kF64, double)
-#undef CASE_RET_TYPE
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef: {
- DCHECK_EQ(sig->GetReturn(i).element_size_bytes(), kSystemPointerSize);
- DCHECK_IMPLIES(sig->GetReturn(i) == wasm::kWasmNullRef,
- wasm_rets[i].to_anyref()->IsNull());
- base::WriteUnalignedValue<Object>(arg_buf_ptr,
- *wasm_rets[i].to_anyref());
- arg_buf_ptr += kSystemPointerSize;
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
+ ClearThreadInWasmScope wasm_flag;
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- DCHECK(!trap_handler::IsTrapHandlerEnabled() ||
- trap_handler::IsThreadInWasm());
-
- ClearThreadInWasmScope wasm_flag;
// Check if this is a real stack overflow.
StackLimitCheck check(isolate);
@@ -339,14 +183,12 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
}
RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
+ ClearThreadInWasmScope wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(func_index, 1);
- // This runtime function is always called from wasm code.
- ClearThreadInWasmScope flag_scope;
-
#ifdef DEBUG
FrameFinder<WasmCompileLazyFrame, StackFrame::EXIT> frame_finder(isolate);
DCHECK_EQ(*instance, frame_finder.frame()->wasm_instance());
@@ -367,7 +209,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
}
// Should be called from within a handle scope
-Handle<JSArrayBuffer> getSharedArrayBuffer(Handle<WasmInstanceObject> instance,
+Handle<JSArrayBuffer> GetSharedArrayBuffer(Handle<WasmInstanceObject> instance,
Isolate* isolate, uint32_t address) {
DCHECK(instance->has_memory_object());
Handle<JSArrayBuffer> array_buffer(instance->memory_object().array_buffer(),
@@ -389,7 +231,7 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
Handle<JSArrayBuffer> array_buffer =
- getSharedArrayBuffer(instance, isolate, address);
+ GetSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::Wake(array_buffer, address, count);
}
@@ -403,7 +245,7 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
- getSharedArrayBuffer(instance, isolate, address);
+ GetSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
expected_value, timeout_ns->AsInt64());
}
@@ -418,7 +260,7 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
- getSharedArrayBuffer(instance, isolate, address);
+ GetSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
expected_value->AsInt64(),
timeout_ns->AsInt64());
@@ -439,15 +281,11 @@ Object ThrowTableOutOfBounds(Isolate* isolate,
} // namespace
RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
- // This runtime function is always being called from wasm code.
ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- DCHECK(isolate->context().is_null());
- isolate->set_context(instance->native_context());
- CONVERT_UINT32_ARG_CHECKED(function_index, 0);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_UINT32_ARG_CHECKED(function_index, 1);
Handle<WasmExternalFunction> function =
WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
@@ -457,9 +295,7 @@ RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
}
RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
- // This runtime function is always being called from wasm code.
ClearThreadInWasmScope flag_scope;
-
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -468,6 +304,8 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
DCHECK_LT(table_index, instance->tables().length());
auto table = handle(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
+ // We only use the runtime call for lazily initialized function references.
+ DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
if (!WasmTableObject::IsInBounds(isolate, table, entry_index)) {
return ThrowWasmError(isolate, MessageTemplate::kWasmTrapTableOutOfBounds);
@@ -477,9 +315,7 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
}
RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
- // This runtime function is always being called from wasm code.
ClearThreadInWasmScope flag_scope;
-
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -491,6 +327,8 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
DCHECK_LT(table_index, instance->tables().length());
auto table = handle(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
+ // We only use the runtime call for function references.
+ DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
if (!WasmTableObject::IsInBounds(isolate, table, entry_index)) {
return ThrowWasmError(isolate, MessageTemplate::kWasmTrapTableOutOfBounds);
@@ -500,6 +338,7 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
}
RUNTIME_FUNCTION(Runtime_WasmTableInit) {
+ ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -518,6 +357,7 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
}
RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
+ ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
DCHECK_EQ(6, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
@@ -536,6 +376,7 @@ RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
}
RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
+ ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
auto instance =
@@ -554,6 +395,7 @@ RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
}
RUNTIME_FUNCTION(Runtime_WasmTableFill) {
+ ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
auto instance =
@@ -585,29 +427,11 @@ RUNTIME_FUNCTION(Runtime_WasmTableFill) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmNewMultiReturnFixedArray) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_INT32_ARG_CHECKED(size, 0);
- Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(size);
- return *fixed_array;
-}
-
-RUNTIME_FUNCTION(Runtime_WasmNewMultiReturnJSArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- DCHECK(!isolate->context().is_null());
- CONVERT_ARG_CHECKED(FixedArray, fixed_array, 0);
- Handle<FixedArray> fixed_array_handle(fixed_array, isolate);
- Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(
- fixed_array_handle, PACKED_ELEMENTS);
- return *array;
-}
-
RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
+ ClearThreadInWasmScope flag_scope;
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- FrameFinder<WasmCompiledFrame, StackFrame::EXIT, StackFrame::WASM_DEBUG_BREAK>
+ FrameFinder<WasmFrame, StackFrame::EXIT, StackFrame::WASM_DEBUG_BREAK>
frame_finder(isolate);
auto instance = handle(frame_finder.frame()->wasm_instance(), isolate);
int position = frame_finder.frame()->position();
@@ -617,7 +441,7 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
DebugScope debug_scope(isolate->debug());
const auto undefined = ReadOnlyRoots(isolate).undefined_value();
- WasmCompiledFrame* frame = frame_finder.frame();
+ WasmFrame* frame = frame_finder.frame();
auto* debug_info = frame->native_module()->GetDebugInfo();
if (debug_info->IsStepping(frame)) {
debug_info->ClearStepping();
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
new file mode 100644
index 0000000000..f73017c584
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/execution/arguments-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_ShrinkFinalizationRegistryUnregisterTokenMap) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFinalizationRegistry, finalization_registry, 0);
+
+ if (!finalization_registry->key_map().IsUndefined(isolate)) {
+ Handle<SimpleNumberDictionary> key_map =
+ handle(SimpleNumberDictionary::cast(finalization_registry->key_map()),
+ isolate);
+ key_map = SimpleNumberDictionary::Shrink(isolate, key_map);
+ finalization_registry->set_key_map(*key_map);
+ }
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index e51338ba6d..bd6853de8e 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -212,6 +212,7 @@ bool Runtime::IsWhitelistedForFuzzing(FunctionId id) {
// Runtime functions only permitted for non-differential fuzzers.
// This list may contain functions performing extra checks or returning
// different values in the context of different flags passed to V8.
+ case Runtime::kGetOptimizationStatus:
case Runtime::kHeapObjectVerify:
case Runtime::kIsBeingInterpreted:
return !FLAG_allow_natives_for_differential_fuzzing;
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index c9ee6d88ac..8f8903d965 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -222,7 +222,7 @@ namespace internal {
F(NewError, 2, 1) \
F(NewReferenceError, 2, 1) \
F(NewSyntaxError, 2, 1) \
- F(NewTypeError, 2, 1) \
+ F(NewTypeError, -1 /* [1, 4] */, 1) \
F(OrdinaryHasInstance, 2, 1) \
F(PromoteScheduledException, 0, 1) \
F(ReportMessageFromMicrotask, 1, 1) \
@@ -239,7 +239,7 @@ namespace internal {
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowInvalidTypedArrayAlignment, 2, 1) \
F(ThrowIteratorError, 1, 1) \
- F(ThrowSpreadArgIsNullOrUndefined, 1, 1) \
+ F(ThrowSpreadArgError, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowNotConstructor, 1, 1) \
F(ThrowPatternAssignmentNonCoercible, 1, 1) \
@@ -274,7 +274,7 @@ namespace internal {
I(IsSmi, 1, 1) \
F(IsValidSmi, 1, 1) \
F(MaxSmi, 0, 1) \
- F(NumberToString, 1, 1) \
+ F(NumberToStringSlow, 1, 1) \
F(StringParseFloat, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringToNumber, 1, 1) \
@@ -285,7 +285,6 @@ namespace internal {
F(AddPrivateField, 3, 1) \
F(AddPrivateBrand, 3, 1) \
F(AllocateHeapNumber, 0, 1) \
- F(ClassOf, 1, 1) \
F(CollectTypeProfile, 3, 1) \
F(CompleteInobjectSlackTrackingForMap, 1, 1) \
I(CopyDataProperties, 2, 1) \
@@ -367,7 +366,9 @@ namespace internal {
F(RejectPromise, 3, 1) \
F(ResolvePromise, 2, 1) \
F(PromiseRejectAfterResolved, 2, 1) \
- F(PromiseResolveAfterResolved, 2, 1)
+ F(PromiseResolveAfterResolved, 2, 1) \
+ F(ConstructAggregateErrorHelper, 3, 1) \
+ F(ConstructInternalAggregateErrorHelper, -1 /* <= 4*/, 1)
#define FOR_EACH_INTRINSIC_PROXY(F, I) \
F(CheckProxyGetSetTrapResult, 2, 1) \
@@ -403,8 +404,7 @@ namespace internal {
F(NewClosure_Tenured, 2, 1) \
F(NewFunctionContext, 1, 1) \
F(NewRestParameter, 1, 1) \
- F(NewSloppyArguments, 3, 1) \
- F(NewSloppyArguments_Generic, 1, 1) \
+ F(NewSloppyArguments, 1, 1) \
F(NewStrictArguments, 1, 1) \
F(PushBlockContext, 1, 1) \
F(PushCatchContext, 2, 1) \
@@ -520,9 +520,9 @@ namespace internal {
F(NewRegExpWithBacktrackLimit, 3, 1) \
F(PrepareFunctionForOptimization, -1, 1) \
F(PrintWithNameForAssert, 2, 1) \
- F(RedirectToWasmInterpreter, 2, 1) \
F(RunningInSimulator, 0, 1) \
F(RuntimeEvaluateREPL, 1, 1) \
+ F(SerializeDeserializeNow, 0, 1) \
F(SerializeWasmModule, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetForceSlowPath, 1, 1) \
@@ -538,7 +538,6 @@ namespace internal {
F(TurbofanStaticAssert, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
- F(WasmNumInterpretedCalls, 1, 1) \
F(WasmNumCodeSpaces, 1, 1) \
F(WasmTierDownModule, 1, 1) \
F(WasmTierUpFunction, 2, 1) \
@@ -553,32 +552,30 @@ namespace internal {
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F, I) \
- F(ThrowWasmError, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmI32AtomicWait, 4, 1) \
- F(WasmI64AtomicWait, 5, 1) \
- F(WasmAtomicNotify, 3, 1) \
- F(WasmExceptionGetValues, 1, 1) \
- F(WasmExceptionGetTag, 1, 1) \
- F(WasmMemoryGrow, 2, 1) \
- F(WasmRunInterpreter, 2, 1) \
- F(WasmStackGuard, 0, 1) \
- F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmRefFunc, 1, 1) \
- F(WasmFunctionTableGet, 3, 1) \
- F(WasmFunctionTableSet, 4, 1) \
- F(WasmTableInit, 6, 1) \
- F(WasmTableCopy, 6, 1) \
- F(WasmTableGrow, 3, 1) \
- F(WasmTableFill, 4, 1) \
- F(WasmIsValidFuncRefValue, 1, 1) \
- F(WasmCompileLazy, 2, 1) \
- F(WasmNewMultiReturnFixedArray, 1, 1) \
- F(WasmNewMultiReturnJSArray, 1, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F, I) \
+ F(ThrowWasmError, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmI32AtomicWait, 4, 1) \
+ F(WasmI64AtomicWait, 5, 1) \
+ F(WasmAtomicNotify, 3, 1) \
+ F(WasmMemoryGrow, 2, 1) \
+ F(WasmStackGuard, 0, 1) \
+ F(WasmThrowCreate, 2, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmRefFunc, 1, 1) \
+ F(WasmFunctionTableGet, 3, 1) \
+ F(WasmFunctionTableSet, 4, 1) \
+ F(WasmTableInit, 6, 1) \
+ F(WasmTableCopy, 6, 1) \
+ F(WasmTableGrow, 3, 1) \
+ F(WasmTableFill, 4, 1) \
+ F(WasmIsValidFuncRefValue, 1, 1) \
+ F(WasmCompileLazy, 2, 1) \
F(WasmDebugBreak, 0, 1)
+#define FOR_EACH_INTRINSIC_WEAKREF(F, I) \
+ F(ShrinkFinalizationRegistryUnregisterTokenMap, 1, 1)
+
#define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
F(DebugBreakOnBytecode, 1, 2) \
F(LoadLookupSlotForCall, 1, 2)
@@ -637,7 +634,8 @@ namespace internal {
FOR_EACH_INTRINSIC_SYMBOL(F, I) \
FOR_EACH_INTRINSIC_TEST(F, I) \
FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
- FOR_EACH_INTRINSIC_WASM(F, I)
+ FOR_EACH_INTRINSIC_WASM(F, I) \
+ FOR_EACH_INTRINSIC_WEAKREF(F, I)
// Defines the list of all intrinsics, coming in 2 flavors, either returning an
// object or a pair.
diff --git a/deps/v8/src/snapshot/DEPS b/deps/v8/src/snapshot/DEPS
index 93f17c9286..6f92db18d4 100644
--- a/deps/v8/src/snapshot/DEPS
+++ b/deps/v8/src/snapshot/DEPS
@@ -5,7 +5,7 @@ specific_include_rules = {
"snapshot-compression.cc": [
"+third_party/zlib",
],
- "serializer-common.cc": [
+ "snapshot-utils.cc": [
"+third_party/zlib",
],
}
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index b6e2ec65e3..f9093012b2 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -13,6 +13,7 @@
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
#include "src/snapshot/object-deserializer.h"
+#include "src/snapshot/snapshot-utils.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/version.h"
@@ -31,7 +32,8 @@ ScriptData::ScriptData(const byte* data, int length)
}
CodeSerializer::CodeSerializer(Isolate* isolate, uint32_t source_hash)
- : Serializer(isolate), source_hash_(source_hash) {
+ : Serializer(isolate, Snapshot::kDefaultSerializerFlags),
+ source_hash_(source_hash) {
allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
@@ -268,8 +270,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
SerializedCodeData::SanityCheckResult sanity_check_result =
SerializedCodeData::CHECK_SUCCESS;
const SerializedCodeData scd = SerializedCodeData::FromCachedData(
- isolate, cached_data,
- SerializedCodeData::SourceHash(source, origin_options),
+ cached_data, SerializedCodeData::SourceHash(source, origin_options),
&sanity_check_result);
if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
@@ -403,7 +404,7 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
}
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
- Isolate* isolate, uint32_t expected_source_hash) const {
+ uint32_t expected_source_hash) const {
if (this->size_ < kHeaderSize) return INVALID_HEADER;
uint32_t magic_number = GetMagicNumber();
if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
@@ -469,11 +470,11 @@ SerializedCodeData::SerializedCodeData(ScriptData* data)
: SerializedData(const_cast<byte*>(data->data()), data->length()) {}
SerializedCodeData SerializedCodeData::FromCachedData(
- Isolate* isolate, ScriptData* cached_data, uint32_t expected_source_hash,
+ ScriptData* cached_data, uint32_t expected_source_hash,
SanityCheckResult* rejection_result) {
DisallowHeapAllocation no_gc;
SerializedCodeData scd(cached_data);
- *rejection_result = scd.SanityCheck(isolate, expected_source_hash);
+ *rejection_result = scd.SanityCheck(expected_source_hash);
if (*rejection_result != CHECK_SUCCESS) {
cached_data->Reject();
return SerializedCodeData(nullptr, 0);
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index ace50b26f3..2daf5200ec 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -109,8 +109,7 @@ class SerializedCodeData : public SerializedData {
static const uint32_t kHeaderSize = POINTER_SIZE_ALIGN(kUnalignedHeaderSize);
// Used when consuming.
- static SerializedCodeData FromCachedData(Isolate* isolate,
- ScriptData* cached_data,
+ static SerializedCodeData FromCachedData(ScriptData* cached_data,
uint32_t expected_source_hash,
SanityCheckResult* rejection_result);
@@ -136,8 +135,7 @@ class SerializedCodeData : public SerializedData {
return Vector<const byte>(data_ + kHeaderSize, size_ - kHeaderSize);
}
- SanityCheckResult SanityCheck(Isolate* isolate,
- uint32_t expected_source_hash) const;
+ SanityCheckResult SanityCheck(uint32_t expected_source_hash) const;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index e15cf6c678..2a3d77646a 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/snapshot/partial-deserializer.h"
+#include "src/snapshot/context-deserializer.h"
#include "src/api/api-inl.h"
#include "src/heap/heap-inl.h"
@@ -12,11 +12,11 @@
namespace v8 {
namespace internal {
-MaybeHandle<Context> PartialDeserializer::DeserializeContext(
+MaybeHandle<Context> ContextDeserializer::DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
- PartialDeserializer d(data);
+ ContextDeserializer d(data);
d.SetRehashability(can_rehash);
MaybeHandle<Object> maybe_result =
@@ -27,15 +27,18 @@ MaybeHandle<Context> PartialDeserializer::DeserializeContext(
: MaybeHandle<Context>();
}
-MaybeHandle<Object> PartialDeserializer::Deserialize(
+MaybeHandle<Object> ContextDeserializer::Deserialize(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
Initialize(isolate);
if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "PartialDeserializer");
+ V8::FatalProcessOutOfMemory(isolate, "ContextDeserializer");
}
+ // Replace serialized references to the global proxy and its map with the
+ // given global proxy and its map.
AddAttachedObject(global_proxy);
+ AddAttachedObject(handle(global_proxy->map(), isolate));
Handle<Object> result;
{
@@ -45,8 +48,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
CodeSpace* code_space = isolate->heap()->code_space();
Address start_address = code_space->top();
Object root;
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
- FullObjectSlot(&root));
+ VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
@@ -57,21 +59,20 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
+ if (FLAG_rehash_snapshot && can_rehash()) Rehash();
LogNewMapEvents();
result = handle(root, isolate);
}
- if (FLAG_rehash_snapshot && can_rehash()) Rehash();
SetupOffHeapArrayBufferBackingStores();
return result;
}
-void PartialDeserializer::SetupOffHeapArrayBufferBackingStores() {
+void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
- // Serializer writes backing store ref in |backing_store| field.
- size_t store_index = reinterpret_cast<size_t>(buffer->backing_store());
+ uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
@@ -79,7 +80,7 @@ void PartialDeserializer::SetupOffHeapArrayBufferBackingStores() {
}
}
-void PartialDeserializer::DeserializeEmbedderFields(
+void ContextDeserializer::DeserializeEmbedderFields(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
if (!source()->HasMore() || source()->Get() != kEmbedderFieldsData) return;
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/snapshot/partial-deserializer.h b/deps/v8/src/snapshot/context-deserializer.h
index a25e659595..3854902238 100644
--- a/deps/v8/src/snapshot/partial-deserializer.h
+++ b/deps/v8/src/snapshot/context-deserializer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SNAPSHOT_PARTIAL_DESERIALIZER_H_
-#define V8_SNAPSHOT_PARTIAL_DESERIALIZER_H_
+#ifndef V8_SNAPSHOT_CONTEXT_DESERIALIZER_H_
+#define V8_SNAPSHOT_CONTEXT_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
#include "src/snapshot/snapshot.h"
@@ -14,8 +14,8 @@ namespace internal {
class Context;
// Deserializes the context-dependent object graph rooted at a given object.
-// The PartialDeserializer is not expected to deserialize any code objects.
-class V8_EXPORT_PRIVATE PartialDeserializer final : public Deserializer {
+// The ContextDeserializer is not expected to deserialize any code objects.
+class V8_EXPORT_PRIVATE ContextDeserializer final : public Deserializer {
public:
static MaybeHandle<Context> DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE PartialDeserializer final : public Deserializer {
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
private:
- explicit PartialDeserializer(const SnapshotData* data)
+ explicit ContextDeserializer(const SnapshotData* data)
: Deserializer(data, false) {}
// Deserialize a single object and the objects reachable from it.
@@ -40,4 +40,4 @@ class V8_EXPORT_PRIVATE PartialDeserializer final : public Deserializer {
} // namespace internal
} // namespace v8
-#endif // V8_SNAPSHOT_PARTIAL_DESERIALIZER_H_
+#endif // V8_SNAPSHOT_CONTEXT_DESERIALIZER_H_
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 56b29a2ac7..41047aee7b 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/context-serializer.h"
#include "src/snapshot/startup-serializer.h"
#include "src/api/api-inl.h"
@@ -15,10 +15,61 @@
namespace v8 {
namespace internal {
-PartialSerializer::PartialSerializer(
- Isolate* isolate, StartupSerializer* startup_serializer,
+namespace {
+
+// During serialization, puts the native context into a state understood by the
+// serializer (e.g. by clearing lists of Code objects). After serialization,
+// the original state is restored.
+class SanitizeNativeContextScope final {
+ public:
+ SanitizeNativeContextScope(Isolate* isolate, NativeContext native_context,
+ bool allow_active_isolate_for_testing,
+ const DisallowHeapAllocation& no_gc)
+ : isolate_(isolate),
+ native_context_(native_context),
+ microtask_queue_(native_context.microtask_queue()),
+ optimized_code_list_(native_context.OptimizedCodeListHead()),
+ deoptimized_code_list_(native_context.DeoptimizedCodeListHead()) {
+#ifdef DEBUG
+ if (!allow_active_isolate_for_testing) {
+ // Microtasks.
+ DCHECK_EQ(0, microtask_queue_->size());
+ DCHECK(!microtask_queue_->HasMicrotasksSuppressions());
+ DCHECK_EQ(0, microtask_queue_->GetMicrotasksScopeDepth());
+ DCHECK(microtask_queue_->DebugMicrotasksScopeDepthIsZero());
+ // Code lists.
+ DCHECK(optimized_code_list_.IsUndefined(isolate));
+ DCHECK(deoptimized_code_list_.IsUndefined(isolate));
+ }
+#endif
+ Object undefined = ReadOnlyRoots(isolate).undefined_value();
+ native_context.set_microtask_queue(isolate, nullptr);
+ native_context.SetOptimizedCodeListHead(undefined);
+ native_context.SetDeoptimizedCodeListHead(undefined);
+ }
+
+ ~SanitizeNativeContextScope() {
+ // Restore saved fields.
+ native_context_.SetDeoptimizedCodeListHead(optimized_code_list_);
+ native_context_.SetOptimizedCodeListHead(deoptimized_code_list_);
+ native_context_.set_microtask_queue(isolate_, microtask_queue_);
+ }
+
+ private:
+ Isolate* isolate_;
+ NativeContext native_context_;
+ MicrotaskQueue* const microtask_queue_;
+ const Object optimized_code_list_;
+ const Object deoptimized_code_list_;
+};
+
+} // namespace
+
+ContextSerializer::ContextSerializer(
+ Isolate* isolate, Snapshot::SerializerFlags flags,
+ StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback)
- : Serializer(isolate),
+ : Serializer(isolate, flags),
startup_serializer_(startup_serializer),
serialize_embedder_fields_(callback),
can_be_rehashed_(true) {
@@ -26,36 +77,40 @@ PartialSerializer::PartialSerializer(
allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
-PartialSerializer::~PartialSerializer() {
- OutputStatistics("PartialSerializer");
+ContextSerializer::~ContextSerializer() {
+ OutputStatistics("ContextSerializer");
}
-void PartialSerializer::Serialize(Context* o, bool include_global_proxy) {
+void ContextSerializer::Serialize(Context* o,
+ const DisallowHeapAllocation& no_gc) {
context_ = *o;
DCHECK(context_.IsNativeContext());
+
+ // Upon deserialization, references to the global proxy and its map will be
+ // replaced.
reference_map()->AddAttachedReference(
reinterpret_cast<void*>(context_.global_proxy().ptr()));
+ reference_map()->AddAttachedReference(
+ reinterpret_cast<void*>(context_.global_proxy().map().ptr()));
+
// The bootstrap snapshot has a code-stub context. When serializing the
- // partial snapshot, it is chained into the weak context list on the isolate
+ // context snapshot, it is chained into the weak context list on the isolate
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
+ // TODO(v8:10416): These mutations should not observably affect the running
+ // context.
context_.set(Context::NEXT_CONTEXT_LINK,
ReadOnlyRoots(isolate()).undefined_value());
DCHECK(!context_.global_object().IsUndefined());
// Reset math random cache to get fresh random numbers.
MathRandom::ResetContext(context_);
-#ifdef DEBUG
- MicrotaskQueue* microtask_queue = context_.native_context().microtask_queue();
- DCHECK_EQ(0, microtask_queue->size());
- DCHECK(!microtask_queue->HasMicrotasksSuppressions());
- DCHECK_EQ(0, microtask_queue->GetMicrotasksScopeDepth());
- DCHECK(microtask_queue->DebugMicrotasksScopeDepthIsZero());
-#endif
- context_.native_context().set_microtask_queue(nullptr);
+ SanitizeNativeContextScope sanitize_native_context(
+ isolate(), context_.native_context(), allow_active_isolate_for_testing(),
+ no_gc);
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr, FullObjectSlot(o));
+ VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(o));
SerializeDeferredObjects();
// Add section for embedder-serialized embedder fields.
@@ -68,9 +123,18 @@ void PartialSerializer::Serialize(Context* o, bool include_global_proxy) {
Pad();
}
-void PartialSerializer::SerializeObject(HeapObject obj) {
+void ContextSerializer::SerializeObject(HeapObject obj) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
+ if (!allow_active_isolate_for_testing()) {
+ // When serializing a snapshot intended for real use, we should not end up
+ // at another native context.
+ // But in test scenarios there is no way to avoid this. Since we only
+ // serialize a single context in these cases, and this context does not
+ // have to be executable, we can simply ignore this.
+ DCHECK_IMPLIES(obj.IsNativeContext(), obj == context_);
+ }
+
if (SerializeHotObject(obj)) return;
if (SerializeRoot(obj)) return;
@@ -81,22 +145,20 @@ void PartialSerializer::SerializeObject(HeapObject obj) {
return;
}
- if (ShouldBeInThePartialSnapshotCache(obj)) {
- startup_serializer_->SerializeUsingPartialSnapshotCache(&sink_, obj);
+ if (ShouldBeInTheStartupObjectCache(obj)) {
+ startup_serializer_->SerializeUsingStartupObjectCache(&sink_, obj);
return;
}
- // Pointers from the partial snapshot to the objects in the startup snapshot
- // should go through the root array or through the partial snapshot cache.
+ // Pointers from the context snapshot to the objects in the startup snapshot
+ // should go through the root array or through the startup object cache.
// If this is not the case you may have to add something to the root array.
DCHECK(!startup_serializer_->ReferenceMapContains(obj));
- // All the internalized strings that the partial snapshot needs should be
- // either in the root table or in the partial snapshot cache.
+ // All the internalized strings that the context snapshot needs should be
+ // either in the root table or in the startup object cache.
DCHECK(!obj.IsInternalizedString());
// Function and object templates are not context specific.
DCHECK(!obj.IsTemplateInfo());
- // We should not end up at another native context.
- DCHECK_IMPLIES(obj != context_, !obj.IsNativeContext());
// Clear literal boilerplates and feedback.
if (obj.IsFeedbackVector()) FeedbackVector::cast(obj).ClearSlots(isolate());
@@ -125,10 +187,10 @@ void PartialSerializer::SerializeObject(HeapObject obj) {
serializer.Serialize();
}
-bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject o) {
+bool ContextSerializer::ShouldBeInTheStartupObjectCache(HeapObject o) {
// Scripts should be referred only through shared function infos. We can't
- // allow them to be part of the partial snapshot because they contain a
- // unique ID, and deserializing several partial snapshots containing script
+ // allow them to be part of the context snapshot because they contain a
+ // unique ID, and deserializing several context snapshots containing script
// would cause dupes.
DCHECK(!o.IsScript());
return o.IsName() || o.IsSharedFunctionInfo() || o.IsHeapNumber() ||
@@ -142,7 +204,7 @@ namespace {
bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
} // anonymous namespace
-bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
+bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
if (!obj.IsJSObject()) return false;
JSObject js_obj = JSObject::cast(obj);
int embedder_fields_count = js_obj.GetEmbedderFieldCount();
@@ -167,7 +229,8 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// onto the result.
for (int i = 0; i < embedder_fields_count; i++) {
EmbedderDataSlot embedder_data_slot(js_obj, i);
- original_embedder_values.emplace_back(embedder_data_slot.load_raw(no_gc));
+ original_embedder_values.emplace_back(
+ embedder_data_slot.load_raw(isolate(), no_gc));
Object object = embedder_data_slot.load_tagged();
if (object.IsHeapObject()) {
DCHECK(IsValidHeapObject(isolate()->heap(), HeapObject::cast(object)));
@@ -194,7 +257,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// with embedder callbacks.
for (int i = 0; i < embedder_fields_count; i++) {
if (!DataIsEmpty(serialized_data[i])) {
- EmbedderDataSlot(js_obj, i).store_raw(kNullAddress, no_gc);
+ EmbedderDataSlot(js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
}
}
@@ -213,7 +276,8 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
StartupData data = serialized_data[i];
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
- EmbedderDataSlot(js_obj, i).store_raw(original_embedder_values[i], no_gc);
+ EmbedderDataSlot(js_obj, i).store_raw(isolate(),
+ original_embedder_values[i], no_gc);
embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()),
"embedder field holder");
embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
@@ -229,11 +293,11 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// 6) The content of the separate sink is appended eventually to the default
// sink. The ensures that during deserialization, we call the deserializer
// callback at the end, and can guarantee that the deserialized objects are
- // in a consistent state. See PartialSerializer::Serialize.
+ // in a consistent state. See ContextSerializer::Serialize.
return true;
}
-void PartialSerializer::CheckRehashability(HeapObject obj) {
+void ContextSerializer::CheckRehashability(HeapObject obj) {
if (!can_be_rehashed_) return;
if (!obj.NeedsRehashing()) return;
if (obj.CanBeRehashed()) return;
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/context-serializer.h
index d8e9ee2496..68850ed609 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/context-serializer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
-#define V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
+#ifndef V8_SNAPSHOT_CONTEXT_SERIALIZER_H_
+#define V8_SNAPSHOT_CONTEXT_SERIALIZER_H_
#include "src/objects/contexts.h"
#include "src/snapshot/serializer.h"
@@ -14,25 +14,23 @@ namespace internal {
class StartupSerializer;
-class V8_EXPORT_PRIVATE PartialSerializer : public Serializer {
+class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
public:
- PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
+ ContextSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
+ StartupSerializer* startup_serializer,
v8::SerializeEmbedderFieldsCallback callback);
- ~PartialSerializer() override;
+ ~ContextSerializer() override;
// Serialize the objects reachable from a single object pointer.
- void Serialize(Context* o, bool include_global_proxy);
+ void Serialize(Context* o, const DisallowHeapAllocation& no_gc);
bool can_be_rehashed() const { return can_be_rehashed_; }
private:
void SerializeObject(HeapObject o) override;
-
- bool ShouldBeInThePartialSnapshotCache(HeapObject o);
-
+ bool ShouldBeInTheStartupObjectCache(HeapObject o);
bool SerializeJSObjectWithEmbedderFields(Object obj);
-
void CheckRehashability(HeapObject obj);
StartupSerializer* startup_serializer_;
@@ -44,10 +42,10 @@ class V8_EXPORT_PRIVATE PartialSerializer : public Serializer {
// Used to store serialized data for embedder fields.
SnapshotByteSink embedder_fields_sink_;
- DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+ DISALLOW_COPY_AND_ASSIGN(ContextSerializer);
};
} // namespace internal
} // namespace v8
-#endif // V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
+#endif // V8_SNAPSHOT_CONTEXT_SERIALIZER_H_
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index e58e7b66ac..a3d3eca711 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/deserializer-allocator.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
+#include "src/heap/memory-chunk.h"
namespace v8 {
namespace internal {
@@ -84,7 +85,8 @@ Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
DCHECK(ReadOnlyRoots(heap_).free_space_map().IsMap());
DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map().IsMap());
DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map().IsMap());
- obj = heap_->AlignWithFiller(obj, size, reserved, next_alignment_);
+ obj = Heap::AlignWithFiller(ReadOnlyRoots(heap_), obj, size, reserved,
+ next_alignment_);
address = obj.address();
next_alignment_ = kWordAligned;
return address;
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
index 18f9363cdf..979e6ed2a8 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ b/deps/v8/src/snapshot/deserializer-allocator.h
@@ -8,7 +8,8 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/objects/heap-object.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/references.h"
+#include "src/snapshot/snapshot-data.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 761ece8037..33e4db4393 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -4,7 +4,9 @@
#include "src/snapshot/deserializer.h"
+#include "src/base/logging.h"
#include "src/codegen/assembler-inl.h"
+#include "src/common/external-pointer.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -22,6 +24,7 @@
#include "src/objects/smi.h"
#include "src/objects/string.h"
#include "src/roots/roots.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
@@ -44,6 +47,15 @@ TSlot Deserializer::WriteAddress(TSlot dest, Address value) {
return dest + (kSystemPointerSize / TSlot::kSlotDataSize);
}
+template <typename TSlot>
+TSlot Deserializer::WriteExternalPointer(TSlot dest, Address value) {
+ value = EncodeExternalPointer(isolate(), value);
+ DCHECK(!allocator()->next_reference_is_weak());
+ memcpy(dest.ToVoidPtr(), &value, kExternalPointerSize);
+ STATIC_ASSERT(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
+ return dest + (kExternalPointerSize / TSlot::kSlotDataSize);
+}
+
void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
@@ -70,7 +82,7 @@ void Deserializer::Initialize(Isolate* isolate) {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
for (HeapObject item : to_rehash_) {
- item.RehashBasedOnMap(isolate_);
+ item.RehashBasedOnMap(ReadOnlyRoots(isolate_));
}
}
@@ -130,14 +142,6 @@ void Deserializer::DeserializeDeferredObjects() {
}
}
}
-
- // When the deserialization of maps are deferred, they will be created
- // as filler maps, and we postpone the post processing until the maps
- // are also deserialized.
- for (const auto& pair : fillers_to_post_process_) {
- DCHECK(!pair.first.IsFiller());
- PostProcessNewObject(pair.first, pair.second);
- }
}
void Deserializer::LogNewObjectEvents() {
@@ -209,11 +213,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
DisallowHeapAllocation no_gc;
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
- if (obj.IsFiller()) {
- DCHECK_EQ(fillers_to_post_process_.find(obj),
- fillers_to_post_process_.end());
- fillers_to_post_process_.insert({obj, space});
- } else if (obj.IsString()) {
+ if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash.
String string = String::cast(obj);
string.set_hash_field(String::kEmptyHashField);
@@ -279,7 +279,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
uint32_t index = string.resource_as_uint32();
Address address =
static_cast<Address>(isolate_->api_external_references()[index]);
- string.set_address_as_resource(address);
+ string.set_address_as_resource(isolate_, address);
isolate_->heap()->UpdateExternalString(string, 0,
string.ExternalPayloadSize());
isolate_->heap()->RegisterExternalString(String::cast(obj));
@@ -291,25 +291,29 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
// The backing store of the JSArrayBuffer has not been correctly restored
// yet, as that may trigger GC. The backing_store field currently contains
// a numbered reference to an already deserialized backing store.
- size_t store_index = reinterpret_cast<size_t>(buffer.backing_store());
+ uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view.set_data_pointer(reinterpret_cast<uint8_t*>(backing_store) +
- data_view.byte_offset());
+ data_view.set_data_pointer(
+ isolate_,
+ reinterpret_cast<uint8_t*>(backing_store) + data_view.byte_offset());
} else if (obj.IsJSTypedArray()) {
JSTypedArray typed_array = JSTypedArray::cast(obj);
// Fixup typed array pointers.
if (typed_array.is_on_heap()) {
- typed_array.SetOnHeapDataPtr(HeapObject::cast(typed_array.base_pointer()),
+ typed_array.SetOnHeapDataPtr(isolate(),
+ HeapObject::cast(typed_array.base_pointer()),
typed_array.external_pointer());
} else {
// Serializer writes backing store ref as a DataPtr() value.
- size_t store_index = reinterpret_cast<size_t>(typed_array.DataPtr());
+ uint32_t store_index =
+ typed_array.GetExternalBackingStoreRefForDeserialization();
auto backing_store = backing_stores_[store_index];
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array.SetOffHeapDataPtr(start, typed_array.byte_offset());
+ typed_array.SetOffHeapDataPtr(isolate(), start,
+ typed_array.byte_offset());
}
} else if (obj.IsJSArrayBuffer()) {
JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
@@ -591,10 +595,10 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
// Find an object in the roots array and write a pointer to it to the
// current object.
SINGLE_CASE(kRootArray, SnapshotSpace::kReadOnlyHeap)
- // Find an object in the partial snapshots cache and write a pointer to it
+ // Find an object in the startup object cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kPartialSnapshotCache, SnapshotSpace::kReadOnlyHeap)
- // Find an object in the partial snapshots cache and write a pointer to it
+ SINGLE_CASE(kStartupObjectCache, SnapshotSpace::kReadOnlyHeap)
+ // Find an object in the read-only object cache and write a pointer to it
// to the current object.
SINGLE_CASE(kReadOnlyObjectCache, SnapshotSpace::kReadOnlyHeap)
// Find an object in the attached references and write a pointer to it to
@@ -607,9 +611,15 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
// Find an external reference and write a pointer to it to the current
// object.
+ case kSandboxedExternalReference:
case kExternalReference: {
Address address = ReadExternalReferenceCase();
- current = WriteAddress(current, address);
+ if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedExternalReference) {
+ current = WriteExternalPointer(current, address);
+ } else {
+ DCHECK(!V8_HEAP_SANDBOX_BOOL);
+ current = WriteAddress(current, address);
+ }
break;
}
@@ -690,6 +700,7 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
break;
}
+ case kSandboxedApiReference:
case kApiReference: {
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address;
@@ -702,7 +713,12 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
} else {
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
- current = WriteAddress(current, address);
+ if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedApiReference) {
+ current = WriteExternalPointer(current, address);
+ } else {
+ DCHECK(!V8_HEAP_SANDBOX_BOOL);
+ current = WriteAddress(current, address);
+ }
break;
}
@@ -763,41 +779,17 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
break;
}
- // Deserialize raw data of fixed length from 1 to 32 words.
+ // Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
STATIC_ASSERT(kNumberOfFixedRawData == 32);
SIXTEEN_CASES(kFixedRawData)
SIXTEEN_CASES(kFixedRawData + 16) {
- // This bytecode has become very confusing with recent changes due to
- // pointer compression. From comments and variable names it implies that
- // the length unit is words/kPointerSize, but the unit is actually
- // kTaggedSize since https://chromium-review.googlesource.com/c/1388529.
- //
- // Also, contents can be (tagged) Smis or just a raw byte sequence. In
- // the case of Smis we must be careful when deserializing into full
- // object slots. It is not valid to deserialize a sequence of >1 Smis
- // into full object slots in compressed pointer builds.
- //
- // Likewise one must pay attention to endianness when deserializing a
- // smi into a full object slot. That is what the code below is trying to
- // address.
- //
- // The solution below works because we currently never deserialize >1
- // Smi into full object slots, or raw byte sequences into full object
- // slots. But those assumptions are fragile.
- //
- const int size_in_tagged = data - kFixedRawDataStart;
- const int size_in_bytes = size_in_tagged * kTaggedSize;
- Address addr = current.address();
- DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize,
- size_in_tagged == 1);
-#ifdef V8_TARGET_BIG_ENDIAN
- if (kTaggedSize != TSlot::kSlotDataSize) {
- // Should only be reached when deserializing a Smi root.
- addr += kTaggedSize;
- }
-#endif
- source_.CopyRaw(reinterpret_cast<void*>(addr), size_in_bytes);
- current += size_in_tagged;
+ int size_in_tagged = data - kFixedRawDataStart;
+ source_.CopyRaw(current.ToVoidPtr(), size_in_tagged * kTaggedSize);
+
+ int size_in_bytes = size_in_tagged * kTaggedSize;
+ int size_in_slots = size_in_bytes / TSlot::kSlotDataSize;
+ DCHECK(IsAligned(size_in_bytes, TSlot::kSlotDataSize));
+ current += size_in_slots;
break;
}
@@ -864,10 +856,10 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
isolate->read_only_heap()->cached_read_only_object(cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
- } else if (bytecode == kPartialSnapshotCache) {
+ } else if (bytecode == kStartupObjectCache) {
int cache_index = source_.GetInt();
heap_object =
- HeapObject::cast(isolate->partial_snapshot_cache()->at(cache_index));
+ HeapObject::cast(isolate->startup_object_cache()->at(cache_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
} else {
DCHECK_EQ(bytecode, kAttachedReference);
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 153fda4a90..3af3eca591 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -16,7 +16,7 @@
#include "src/objects/map.h"
#include "src/objects/string.h"
#include "src/snapshot/deserializer-allocator.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
@@ -132,6 +132,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
template <typename TSlot>
inline TSlot WriteAddress(TSlot dest, Address value);
+ template <typename TSlot>
+ inline TSlot WriteExternalPointer(TSlot dest, Address value);
+
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or nullptr if we are not writing into an
@@ -194,11 +197,6 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
std::vector<HeapObject> to_rehash_;
- // Store the objects whose maps are deferred and thus initialized as filler
- // maps during deserialization, so that they can be processed later when the
- // maps become available.
- std::unordered_map<HeapObject, SnapshotSpace, Object::Hasher>
- fillers_to_post_process_;
#ifdef DEBUG
uint32_t num_api_references_;
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index bb2a49e61e..93c584dcb7 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -7,6 +7,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
#include "src/objects/objects-inl.h"
+#include "src/snapshot/snapshot-utils.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -306,7 +307,10 @@ Address EmbeddedData::InstructionEndOfBytecodeHandlers() const {
size_t EmbeddedData::CreateEmbeddedBlobHash() const {
STATIC_ASSERT(EmbeddedBlobHashOffset() == 0);
STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
- return base::hash_range(data_ + EmbeddedBlobHashSize(), data_ + size_);
+ // Hash the entire blob except the hash field itself.
+ Vector<const byte> payload(data_ + EmbeddedBlobHashSize(),
+ size_ - EmbeddedBlobHashSize());
+ return Checksum(payload);
}
void EmbeddedData::PrintStatistics() const {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 8b1b4500d6..ebb78477b6 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -96,10 +96,6 @@ void PlatformEmbeddedFileWriterAIX::DeclareFunctionBegin(const char* name,
void PlatformEmbeddedFileWriterAIX::DeclareFunctionEnd(const char* name) {}
-int PlatformEmbeddedFileWriterAIX::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
void PlatformEmbeddedFileWriterAIX::FilePrologue() {}
void PlatformEmbeddedFileWriterAIX::DeclareExternalFilename(
@@ -122,12 +118,6 @@ DataDirective PlatformEmbeddedFileWriterAIX::ByteChunkDataDirective() const {
return kLong;
}
-int PlatformEmbeddedFileWriterAIX::WriteByteChunk(const uint8_t* data) {
- DCHECK_EQ(ByteChunkDataDirective(), kLong);
- const uint32_t* long_ptr = reinterpret_cast<const uint32_t*>(data);
- return HexLiteral(*long_ptr);
-}
-
#undef SYMBOL_PREFIX
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h
index 8f99c1ae85..2c709e0a30 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h
@@ -37,8 +37,6 @@ class PlatformEmbeddedFileWriterAIX : public PlatformEmbeddedFileWriterBase {
void DeclareFunctionBegin(const char* name, uint32_t size) override;
void DeclareFunctionEnd(const char* name) override;
- int HexLiteral(uint64_t value) override;
-
void Comment(const char* string) override;
void FilePrologue() override;
@@ -48,7 +46,6 @@ class PlatformEmbeddedFileWriterAIX : public PlatformEmbeddedFileWriterBase {
int IndentedDataDirective(DataDirective directive) override;
DataDirective ByteChunkDataDirective() const override;
- int WriteByteChunk(const uint8_t* data) override;
private:
void DeclareSymbolGlobal(const char* name);
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
index 7a04a9dfab..1cd402d8ba 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
@@ -24,6 +24,10 @@ DataDirective PointerSizeDirective() {
}
}
+int PlatformEmbeddedFileWriterBase::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
int DataDirectiveSize(DataDirective directive) {
switch (directive) {
case kByte:
@@ -39,24 +43,37 @@ int DataDirectiveSize(DataDirective directive) {
}
int PlatformEmbeddedFileWriterBase::WriteByteChunk(const uint8_t* data) {
- DCHECK_EQ(ByteChunkDataDirective(), kOcta);
-
- static constexpr size_t kSize = kInt64Size;
-
- uint64_t part1, part2;
- // Use memcpy for the reads since {data} is not guaranteed to be aligned.
+ size_t kSize = DataDirectiveSize(ByteChunkDataDirective());
+ size_t kHalfSize = kSize / 2;
+ uint64_t high = 0, low = 0;
+
+ switch (kSize) {
+ case 1:
+ low = *data;
+ break;
+ case 4:
+ low = *reinterpret_cast<const uint32_t*>(data);
+ break;
+ case 8:
+ low = *reinterpret_cast<const uint64_t*>(data);
+ break;
+ case 16:
#ifdef V8_TARGET_BIG_ENDIAN
- memcpy(&part1, data, kSize);
- memcpy(&part2, data + kSize, kSize);
+ memcpy(&high, data, kHalfSize);
+ memcpy(&low, data + kHalfSize, kHalfSize);
#else
- memcpy(&part1, data + kSize, kSize);
- memcpy(&part2, data, kSize);
+ memcpy(&high, data + kHalfSize, kHalfSize);
+ memcpy(&low, data, kHalfSize);
#endif // V8_TARGET_BIG_ENDIAN
+ break;
+ default:
+ UNREACHABLE();
+ }
- if (part1 != 0) {
- return fprintf(fp(), "0x%" PRIx64 "%016" PRIx64, part1, part2);
+ if (high != 0) {
+ return fprintf(fp(), "0x%" PRIx64 "%016" PRIx64, high, low);
} else {
- return fprintf(fp(), "0x%" PRIx64, part2);
+ return fprintf(fp(), "0x%" PRIx64, low);
}
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
index eab5ca8ec6..b8709cb932 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
@@ -67,7 +67,7 @@ class PlatformEmbeddedFileWriterBase {
virtual void DeclareFunctionEnd(const char* name) = 0;
// Returns the number of printed characters.
- virtual int HexLiteral(uint64_t value) = 0;
+ virtual int HexLiteral(uint64_t value);
virtual void Comment(const char* string) = 0;
virtual void Newline() { fprintf(fp_, "\n"); }
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index f1d6efc767..7e779ec6e5 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -114,10 +114,6 @@ void PlatformEmbeddedFileWriterGeneric::DeclareFunctionBegin(const char* name,
void PlatformEmbeddedFileWriterGeneric::DeclareFunctionEnd(const char* name) {}
-int PlatformEmbeddedFileWriterGeneric::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
void PlatformEmbeddedFileWriterGeneric::FilePrologue() {
// TODO(v8:10026): Add ELF note required for BTI.
}
@@ -146,6 +142,18 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
return fprintf(fp_, " %s ", DirectiveAsString(directive));
}
+DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective()
+ const {
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ // MIPS uses a fixed 4 byte instruction set, using .long
+ // to prevent any unnecessary padding.
+ return kLong;
+#else
+ // Other ISAs just listen to the base
+ return PlatformEmbeddedFileWriterBase::ByteChunkDataDirective();
+#endif
+}
+
#undef SYMBOL_PREFIX
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h
index 1f899cbb5c..4d8284f31f 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h
@@ -39,8 +39,6 @@ class PlatformEmbeddedFileWriterGeneric
void DeclareFunctionBegin(const char* name, uint32_t size) override;
void DeclareFunctionEnd(const char* name) override;
- int HexLiteral(uint64_t value) override;
-
void Comment(const char* string) override;
void FilePrologue() override;
@@ -49,6 +47,8 @@ class PlatformEmbeddedFileWriterGeneric
int IndentedDataDirective(DataDirective directive) override;
+ DataDirective ByteChunkDataDirective() const override;
+
private:
void DeclareSymbolGlobal(const char* name);
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index 234f8a1f48..a094a81ee2 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -89,10 +89,6 @@ void PlatformEmbeddedFileWriterMac::DeclareFunctionBegin(const char* name,
void PlatformEmbeddedFileWriterMac::DeclareFunctionEnd(const char* name) {}
-int PlatformEmbeddedFileWriterMac::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
void PlatformEmbeddedFileWriterMac::FilePrologue() {}
void PlatformEmbeddedFileWriterMac::DeclareExternalFilename(
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
index 76780d75f3..79f8fdf587 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
@@ -37,8 +37,6 @@ class PlatformEmbeddedFileWriterMac : public PlatformEmbeddedFileWriterBase {
void DeclareFunctionBegin(const char* name, uint32_t size) override;
void DeclareFunctionEnd(const char* name) override;
- int HexLiteral(uint64_t value) override;
-
void Comment(const char* string) override;
void FilePrologue() override;
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index d0d9414707..c0d3846e9e 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -13,8 +13,8 @@
#include "src/codegen/source-position-table.h"
#include "src/flags/flags.h"
#include "src/sanitizer/msan.h"
+#include "src/snapshot/context-serializer.h"
#include "src/snapshot/embedded/embedded-file-writer.h"
-#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h"
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 6b02f22e65..2de08846d4 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -41,16 +41,15 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
Object root;
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
- FullObjectSlot(&root));
+ VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
DeserializeDeferredObjects();
FlushICache();
LinkAllocationSites();
LogNewMapEvents();
result = handle(HeapObject::cast(root), isolate);
+ Rehash();
allocator()->RegisterDeserializedObjectsForBlackAllocation();
}
- Rehash();
CommitPostProcessedObjects();
return scope.CloseAndEscape(result);
}
@@ -91,8 +90,7 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
}
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
- // Serializer writes backing store ref in |backing_store| field.
- size_t store_index = reinterpret_cast<size_t>(buffer->backing_store());
+ uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index a2ad1a0279..c1c96666ca 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -29,8 +29,8 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
// Read-only object cache is not yet populated.
DCHECK(!ro_heap->read_only_object_cache_is_initialized());
- // Partial snapshot cache is not yet populated.
- DCHECK(isolate->partial_snapshot_cache()->empty());
+ // Startup object cache is not yet populated.
+ DCHECK(isolate->startup_object_cache()->empty());
// Builtins are not yet created.
DCHECK(!isolate->builtins()->is_initialized());
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 41e6188154..9bc8f105d1 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -16,8 +16,9 @@
namespace v8 {
namespace internal {
-ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate)
- : RootsSerializer(isolate, RootIndex::kFirstReadOnlyRoot) {
+ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate,
+ Snapshot::SerializerFlags flags)
+ : RootsSerializer(isolate, flags, RootIndex::kFirstReadOnlyRoot) {
STATIC_ASSERT(RootIndex::kFirstReadOnlyRoot == RootIndex::kFirstRoot);
allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
@@ -50,7 +51,8 @@ void ReadOnlySerializer::SerializeReadOnlyRoots() {
// No active threads.
CHECK_NULL(isolate()->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
- CHECK(isolate()->handle_scope_implementer()->blocks()->empty());
+ CHECK_IMPLIES(!allow_active_isolate_for_testing(),
+ isolate()->handle_scope_implementer()->blocks()->empty());
ReadOnlyRoots(isolate()).Iterate(this);
}
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
index c73c397647..f30b2c30ba 100644
--- a/deps/v8/src/snapshot/read-only-serializer.h
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -17,7 +17,7 @@ class SnapshotByteSink;
class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
public:
- explicit ReadOnlySerializer(Isolate* isolate);
+ ReadOnlySerializer(Isolate* isolate, Snapshot::SerializerFlags flags);
~ReadOnlySerializer() override;
void SerializeReadOnlyRoots();
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index f354dec158..6a8f2bb05e 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -13,8 +13,9 @@ namespace v8 {
namespace internal {
RootsSerializer::RootsSerializer(Isolate* isolate,
+ Snapshot::SerializerFlags flags,
RootIndex first_root_to_be_serialized)
- : Serializer(isolate),
+ : Serializer(isolate, flags),
first_root_to_be_serialized_(first_root_to_be_serialized),
can_be_rehashed_(true) {
for (size_t i = 0; i < static_cast<size_t>(first_root_to_be_serialized);
@@ -47,7 +48,7 @@ void RootsSerializer::VisitRootPointers(Root root, const char* description,
// - Only root list elements that have been fully serialized can be
// referenced using kRootArray bytecodes.
for (FullObjectSlot current = start; current < end; ++current) {
- SerializeRootObject(*current);
+ SerializeRootObject(current);
size_t root_index = current - roots_table.begin();
root_has_been_serialized_.set(root_index);
}
diff --git a/deps/v8/src/snapshot/roots-serializer.h b/deps/v8/src/snapshot/roots-serializer.h
index cfb59dd75e..be41d7220f 100644
--- a/deps/v8/src/snapshot/roots-serializer.h
+++ b/deps/v8/src/snapshot/roots-serializer.h
@@ -24,7 +24,8 @@ class RootsSerializer : public Serializer {
public:
// The serializer expects that all roots before |first_root_to_be_serialized|
// are already serialized.
- RootsSerializer(Isolate* isolate, RootIndex first_root_to_be_serialized);
+ RootsSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
+ RootIndex first_root_to_be_serialized);
bool can_be_rehashed() const { return can_be_rehashed_; }
bool root_has_been_serialized(RootIndex root_index) const {
diff --git a/deps/v8/src/snapshot/serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h
index 0d15c5a91b..51264961cd 100644
--- a/deps/v8/src/snapshot/serializer-allocator.h
+++ b/deps/v8/src/snapshot/serializer-allocator.h
@@ -5,7 +5,8 @@
#ifndef V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/references.h"
+#include "src/snapshot/snapshot-data.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/serializer-deserializer.cc b/deps/v8/src/snapshot/serializer-deserializer.cc
new file mode 100644
index 0000000000..37fb2636ce
--- /dev/null
+++ b/deps/v8/src/snapshot/serializer-deserializer.cc
@@ -0,0 +1,61 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/serializer-deserializer.h"
+
+#include "src/objects/foreign-inl.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// The startup object cache is terminated by undefined. We visit the context
+// snapshot...
+// - during deserialization to populate it.
+// - during normal GC to keep its content alive.
+// - not during serialization. The context serializer adds to it explicitly.
+DISABLE_CFI_PERF
+void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
+ std::vector<Object>* cache = isolate->startup_object_cache();
+ for (size_t i = 0;; ++i) {
+ // Extend the array ready to get a value when deserializing.
+ if (cache->size() <= i) cache->push_back(Smi::zero());
+ // During deserialization, the visitor populates the startup object cache
+ // and eventually terminates the cache with undefined.
+ visitor->VisitRootPointer(Root::kStartupObjectCache, nullptr,
+ FullObjectSlot(&cache->at(i)));
+ if (cache->at(i).IsUndefined(isolate)) break;
+ }
+}
+
+bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
+ // ArrayBuffer instances are serialized by first re-assigning a index
+ // to the backing store field, then serializing the object, and then
+ // storing the actual backing store address again (and the same for the
+ // ArrayBufferExtension). If serialization of the object itself is deferred,
+ // the real backing store address is written into the snapshot, which cannot
+ // be processed when deserializing.
+ return !o.IsString() && !o.IsScript() && !o.IsJSTypedArray() &&
+ !o.IsJSArrayBuffer();
+}
+
+void SerializerDeserializer::RestoreExternalReferenceRedirectors(
+ Isolate* isolate, const std::vector<AccessorInfo>& accessor_infos) {
+ // Restore wiped accessor infos.
+ for (AccessorInfo info : accessor_infos) {
+ Foreign::cast(info.js_getter())
+ .set_foreign_address(isolate, info.redirected_getter());
+ }
+}
+
+void SerializerDeserializer::RestoreExternalReferenceRedirectors(
+ Isolate* isolate, const std::vector<CallHandlerInfo>& call_handler_infos) {
+ for (CallHandlerInfo info : call_handler_infos) {
+ Foreign::cast(info.js_callback())
+ .set_foreign_address(isolate, info.redirected_callback());
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-deserializer.h
index 3636da3aa4..d9d62d89f0 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -1,18 +1,12 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
+// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SNAPSHOT_SERIALIZER_COMMON_H_
-#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
+#ifndef V8_SNAPSHOT_SERIALIZER_DESERIALIZER_H_
+#define V8_SNAPSHOT_SERIALIZER_DESERIALIZER_H_
-#include "src/base/bits.h"
-#include "src/base/memory.h"
-#include "src/codegen/external-reference-table.h"
-#include "src/common/globals.h"
#include "src/objects/visitors.h"
-#include "src/sanitizer/msan.h"
#include "src/snapshot/references.h"
-#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
@@ -20,95 +14,57 @@ namespace internal {
class CallHandlerInfo;
class Isolate;
-class ExternalReferenceEncoder {
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer : public RootVisitor {
public:
- class Value {
- public:
- explicit Value(uint32_t raw) : value_(raw) {}
- Value() : value_(0) {}
- static uint32_t Encode(uint32_t index, bool is_from_api) {
- return Index::encode(index) | IsFromAPI::encode(is_from_api);
- }
-
- bool is_from_api() const { return IsFromAPI::decode(value_); }
- uint32_t index() const { return Index::decode(value_); }
-
- private:
- using Index = base::BitField<uint32_t, 0, 31>;
- using IsFromAPI = base::BitField<bool, 31, 1>;
- uint32_t value_;
- };
-
- explicit ExternalReferenceEncoder(Isolate* isolate);
- ~ExternalReferenceEncoder(); // NOLINT (modernize-use-equals-default)
-
- Value Encode(Address key);
- Maybe<Value> TryEncode(Address key);
-
- const char* NameOfAddress(Isolate* isolate, Address address) const;
-
- private:
- AddressToIndexHashMap* map_;
-
-#ifdef DEBUG
- std::vector<int> count_;
- const intptr_t* api_references_;
-#endif // DEBUG
-
- DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
-};
+ static void Iterate(Isolate* isolate, RootVisitor* visitor);
-class HotObjectsList {
- public:
- HotObjectsList() : index_(0) {}
+ protected:
+ class HotObjectsList {
+ public:
+ HotObjectsList() = default;
- void Add(HeapObject object) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- circular_queue_[index_] = object;
- index_ = (index_ + 1) & kSizeMask;
- }
+ void Add(HeapObject object) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ circular_queue_[index_] = object;
+ index_ = (index_ + 1) & kSizeMask;
+ }
- HeapObject Get(int index) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- DCHECK(!circular_queue_[index].is_null());
- return circular_queue_[index];
- }
+ HeapObject Get(int index) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ DCHECK(!circular_queue_[index].is_null());
+ return circular_queue_[index];
+ }
- static const int kNotFound = -1;
+ static const int kNotFound = -1;
- int Find(HeapObject object) {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- for (int i = 0; i < kSize; i++) {
- if (circular_queue_[i] == object) return i;
+ int Find(HeapObject object) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
+ for (int i = 0; i < kSize; i++) {
+ if (circular_queue_[i] == object) return i;
+ }
+ return kNotFound;
}
- return kNotFound;
- }
- static const int kSize = 8;
+ static const int kSize = 8;
- private:
- static_assert(base::bits::IsPowerOfTwo(kSize), "kSize must be power of two");
- static const int kSizeMask = kSize - 1;
- HeapObject circular_queue_[kSize];
- int index_;
-
- DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
-};
+ private:
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
+ static const int kSizeMask = kSize - 1;
+ HeapObject circular_queue_[kSize];
+ int index_ = 0;
-// The Serializer/Deserializer class is a common superclass for Serializer and
-// Deserializer which is used to store common constants and methods used by
-// both.
-class SerializerDeserializer : public RootVisitor {
- public:
- static void Iterate(Isolate* isolate, RootVisitor* visitor);
+ DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
+ };
- protected:
static bool CanBeDeferred(HeapObject o);
void RestoreExternalReferenceRedirectors(
- const std::vector<AccessorInfo>& accessor_infos);
+ Isolate* isolate, const std::vector<AccessorInfo>& accessor_infos);
void RestoreExternalReferenceRedirectors(
- const std::vector<CallHandlerInfo>& call_handler_infos);
+ Isolate* isolate, const std::vector<CallHandlerInfo>& call_handler_infos);
static const int kNumberOfPreallocatedSpaces =
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
@@ -119,8 +75,7 @@ class SerializerDeserializer : public RootVisitor {
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \
- /* Free range 0x26..0x2f */ \
- V(0x26) V(0x27) \
+ /* Free range 0x28..0x2f */ \
V(0x28) V(0x29) V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \
V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
@@ -180,11 +135,11 @@ class SerializerDeserializer : public RootVisitor {
kBackref = 0x08,
//
- // ---------- byte code range 0x10..0x25 ----------
+ // ---------- byte code range 0x10..0x27 ----------
//
- // Object in the partial snapshot cache.
- kPartialSnapshotCache = 0x10,
+ // Object in the startup object cache.
+ kStartupObjectCache = 0x10,
// Root array item.
kRootArray,
// Object provided in the attached list.
@@ -218,6 +173,12 @@ class SerializerDeserializer : public RootVisitor {
kApiReference,
// External reference referenced by id.
kExternalReference,
+ // Same as two bytecodes above but for serializing sandboxed external
+ // pointer values.
+ // TODO(v8:10391): Remove them once all ExternalPointer usages are
+ // sandbox-ready.
+ kSandboxedApiReference,
+ kSandboxedExternalReference,
// Internal reference of a code objects in code stream.
kInternalReference,
// In-place weak references.
@@ -235,7 +196,6 @@ class SerializerDeserializer : public RootVisitor {
// 0x60..0x7f
kFixedRawData = 0x60,
- kOnePointerRawData = kFixedRawData,
kFixedRawDataStart = kFixedRawData - 1,
//
@@ -294,73 +254,7 @@ class SerializerDeserializer : public RootVisitor {
HotObjectsList hot_objects_;
};
-class SerializedData {
- public:
- class Reservation {
- public:
- Reservation() : reservation_(0) {}
- explicit Reservation(uint32_t size)
- : reservation_(ChunkSizeBits::encode(size)) {}
-
- uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
- bool is_last() const { return IsLastChunkBits::decode(reservation_); }
-
- void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
-
- private:
- uint32_t reservation_;
- };
-
- SerializedData(byte* data, int size)
- : data_(data), size_(size), owns_data_(false) {}
- SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
- SerializedData(SerializedData&& other) V8_NOEXCEPT
- : data_(other.data_),
- size_(other.size_),
- owns_data_(other.owns_data_) {
- // Ensure |other| will not attempt to destroy our data in destructor.
- other.owns_data_ = false;
- }
-
- virtual ~SerializedData() {
- if (owns_data_) DeleteArray<byte>(data_);
- }
-
- uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
-
- using ChunkSizeBits = base::BitField<uint32_t, 0, 31>;
- using IsLastChunkBits = base::BitField<bool, 31, 1>;
-
- static constexpr uint32_t kMagicNumberOffset = 0;
- static constexpr uint32_t kMagicNumber =
- 0xC0DE0000 ^ ExternalReferenceTable::kSize;
-
- protected:
- void SetHeaderValue(uint32_t offset, uint32_t value) {
- base::WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset,
- value);
- }
-
- uint32_t GetHeaderValue(uint32_t offset) const {
- return base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(data_) + offset);
- }
-
- void AllocateData(uint32_t size);
-
- void SetMagicNumber() { SetHeaderValue(kMagicNumberOffset, kMagicNumber); }
-
- byte* data_;
- uint32_t size_;
- bool owns_data_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SerializedData);
-};
-
-V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload);
-
} // namespace internal
} // namespace v8
-#endif // V8_SNAPSHOT_SERIALIZER_COMMON_H_
+#endif // V8_SNAPSHOT_SERIALIZER_DESERIALIZER_H_
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 9ad8d091cd..d443ff67a1 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h" // For Space::identity().
+#include "src/heap/memory-chunk-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/code.h"
@@ -14,42 +15,22 @@
#include "src/objects/map.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
-#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-Serializer::Serializer(Isolate* isolate)
+Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
: isolate_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
+ flags_(flags),
allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
for (int space = 0; space < kNumberOfSpaces; ++space) {
- instance_type_count_[space] = NewArray<int>(kInstanceTypes);
- instance_type_size_[space] = NewArray<size_t>(kInstanceTypes);
- for (int i = 0; i < kInstanceTypes; i++) {
- instance_type_count_[space][i] = 0;
- instance_type_size_[space][i] = 0;
- }
- }
- } else {
- for (int space = 0; space < kNumberOfSpaces; ++space) {
- instance_type_count_[space] = nullptr;
- instance_type_size_[space] = nullptr;
- }
- }
-#endif // OBJECT_PRINT
-}
-
-Serializer::~Serializer() {
- if (code_address_map_ != nullptr) delete code_address_map_;
-#ifdef OBJECT_PRINT
- for (int space = 0; space < kNumberOfSpaces; ++space) {
- if (instance_type_count_[space] != nullptr) {
- DeleteArray(instance_type_count_[space]);
- DeleteArray(instance_type_size_[space]);
+ // Value-initialized to 0.
+ instance_type_count_[space] = std::make_unique<int[]>(kInstanceTypes);
+ instance_type_size_[space] = std::make_unique<size_t[]>(kInstanceTypes);
}
}
#endif // OBJECT_PRINT
@@ -102,15 +83,16 @@ bool Serializer::MustBeDeferred(HeapObject object) { return false; }
void Serializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
for (FullObjectSlot current = start; current < end; ++current) {
- SerializeRootObject(*current);
+ SerializeRootObject(current);
}
}
-void Serializer::SerializeRootObject(Object object) {
- if (object.IsSmi()) {
- PutSmi(Smi::cast(object));
+void Serializer::SerializeRootObject(FullObjectSlot slot) {
+ Object o = *slot;
+ if (o.IsSmi()) {
+ PutSmiRoot(slot);
} else {
- SerializeObject(HeapObject::cast(object));
+ SerializeObject(HeapObject::cast(o));
}
}
@@ -209,12 +191,21 @@ void Serializer::PutRoot(RootIndex root, HeapObject object) {
}
}
-void Serializer::PutSmi(Smi smi) {
- sink_.Put(kOnePointerRawData, "Smi");
- Tagged_t raw_value = static_cast<Tagged_t>(smi.ptr());
- byte bytes[kTaggedSize];
- memcpy(bytes, &raw_value, kTaggedSize);
- for (int i = 0; i < kTaggedSize; i++) sink_.Put(bytes[i], "Byte");
+void Serializer::PutSmiRoot(FullObjectSlot slot) {
+ // Serializing a smi root in compressed pointer builds will serialize the
+ // full object slot (of kSystemPointerSize) to avoid complications during
+ // deserialization (endianness or smi sequences).
+ STATIC_ASSERT(decltype(slot)::kSlotDataSize == sizeof(Address));
+ STATIC_ASSERT(decltype(slot)::kSlotDataSize == kSystemPointerSize);
+ static constexpr int bytes_to_output = decltype(slot)::kSlotDataSize;
+ static constexpr int size_in_tagged = bytes_to_output >> kTaggedSizeLog2;
+ sink_.PutSection(kFixedRawDataStart + size_in_tagged, "Smi");
+
+ Address raw_value = Smi::cast(*slot).ptr();
+ const byte* raw_value_as_bytes = reinterpret_cast<const byte*>(&raw_value);
+ for (size_t i = 0; i < bytes_to_output; i++) {
+ sink_.Put(raw_value_as_bytes[i], "Byte");
+ }
}
void Serializer::PutBackReference(HeapObject object,
@@ -283,7 +274,7 @@ void Serializer::Pad(int padding_offset) {
void Serializer::InitializeCodeAddressMap() {
isolate_->InitializeLoggingAndCounters();
- code_address_map_ = new CodeAddressMap(isolate_);
+ code_address_map_ = std::make_unique<CodeAddressMap>(isolate_);
}
Code Serializer::CopyCode(Code code) {
@@ -363,7 +354,8 @@ uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
JSTypedArray typed_array = JSTypedArray::cast(object_);
if (typed_array.is_on_heap()) {
- typed_array.RemoveExternalPointerCompensationForSerialization();
+ typed_array.RemoveExternalPointerCompensationForSerialization(
+ serializer_->isolate());
} else {
if (!typed_array.WasDetached()) {
// Explicitly serialize the backing store now.
@@ -379,13 +371,9 @@ void Serializer::ObjectSerializer::SerializeJSTypedArray() {
reinterpret_cast<Address>(typed_array.DataPtr()) - byte_offset);
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- // To properly share the buffer, we set the backing store ref as an
- // off-heap offset from nullptr. On deserialization we re-set data
- // pointer to proper value.
- typed_array.SetOffHeapDataPtr(nullptr, ref);
- DCHECK_EQ(ref, reinterpret_cast<Address>(typed_array.DataPtr()));
+ typed_array.SetExternalBackingStoreRefForSerialization(ref);
} else {
- typed_array.SetOffHeapDataPtr(nullptr, 0);
+ typed_array.SetExternalBackingStoreRefForSerialization(0);
}
}
SerializeObject();
@@ -402,10 +390,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- // To properly share the buffer, we set the backing store ref as an
- // a backing store address. On deserialization we re-set data pointer
- // to proper value.
- buffer.set_backing_store(reinterpret_cast<void*>(static_cast<size_t>(ref)));
+ buffer.SetBackingStoreRefForSerialization(ref);
// Ensure deterministic output by setting extension to null during
// serialization.
@@ -414,7 +399,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
SerializeObject();
- buffer.set_backing_store(backing_store);
+ buffer.set_backing_store(serializer_->isolate(), backing_store);
buffer.set_extension(extension);
}
@@ -428,9 +413,9 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
if (serializer_->external_reference_encoder_.TryEncode(resource).To(
&reference)) {
DCHECK(reference.is_from_api());
- string.set_uint32_as_resource(reference.index());
+ string.set_uint32_as_resource(serializer_->isolate(), reference.index());
SerializeObject();
- string.set_address_as_resource(resource);
+ string.set_address_as_resource(serializer_->isolate(), resource);
} else {
SerializeExternalStringAsSequentialString();
}
@@ -584,11 +569,16 @@ SnapshotSpace GetSnapshotSpace(HeapObject object) {
// Large code objects are not supported and cannot be expressed by
// SnapshotSpace.
DCHECK_NE(heap_space, CODE_LO_SPACE);
- // Young generation large objects are tenured.
- if (heap_space == NEW_LO_SPACE) {
- return SnapshotSpace::kLargeObject;
- } else {
- return static_cast<SnapshotSpace>(heap_space);
+ switch (heap_space) {
+ // Young generation objects are tenured, as objects that have survived
+ // until snapshot building probably deserve to be considered 'old'.
+ case NEW_SPACE:
+ return SnapshotSpace::kOld;
+ case NEW_LO_SPACE:
+ return SnapshotSpace::kLargeObject;
+
+ default:
+ return static_cast<SnapshotSpace>(heap_space);
}
}
}
@@ -728,32 +718,64 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
- Address* p) {
- auto encoded_reference =
- serializer_->EncodeExternalReference(host.foreign_address());
- if (encoded_reference.is_from_api()) {
- sink_->Put(kApiReference, "ApiRef");
+void Serializer::ObjectSerializer::OutputExternalReference(Address target,
+ int target_size,
+ bool sandboxify) {
+ DCHECK_LE(target_size, sizeof(target)); // Must fit in Address.
+ ExternalReferenceEncoder::Value encoded_reference;
+ bool encoded_successfully;
+
+ if (serializer_->allow_unknown_external_references_for_testing()) {
+ encoded_successfully =
+ serializer_->TryEncodeExternalReference(target).To(&encoded_reference);
} else {
- sink_->Put(kExternalReference, "ExternalRef");
+ encoded_reference = serializer_->EncodeExternalReference(target);
+ encoded_successfully = true;
+ }
+
+ if (!encoded_successfully) {
+ // In this case the serialized snapshot will not be used in a different
+ // Isolate and thus the target address will not change between
+ // serialization and deserialization. We can serialize seen external
+ // references verbatim.
+ CHECK(serializer_->allow_unknown_external_references_for_testing());
+ CHECK(IsAligned(target_size, kObjectAlignment));
+ CHECK_LE(target_size, kNumberOfFixedRawData * kTaggedSize);
+ int size_in_tagged = target_size >> kTaggedSizeLog2;
+ sink_->PutSection(kFixedRawDataStart + size_in_tagged, "FixedRawData");
+ sink_->PutRaw(reinterpret_cast<byte*>(&target), target_size, "Bytes");
+ } else if (encoded_reference.is_from_api()) {
+ if (V8_HEAP_SANDBOX_BOOL && sandboxify) {
+ sink_->Put(kSandboxedApiReference, "SandboxedApiRef");
+ } else {
+ sink_->Put(kApiReference, "ApiRef");
+ }
+ sink_->PutInt(encoded_reference.index(), "reference index");
+ } else {
+ if (V8_HEAP_SANDBOX_BOOL && sandboxify) {
+ sink_->Put(kSandboxedExternalReference, "SandboxedExternalRef");
+ } else {
+ sink_->Put(kExternalReference, "ExternalRef");
+ }
+ sink_->PutInt(encoded_reference.index(), "reference index");
}
- sink_->PutInt(encoded_reference.index(), "reference index");
- bytes_processed_so_far_ += kSystemPointerSize;
+ bytes_processed_so_far_ += target_size;
+}
+
+void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
+ Address* p) {
+ // "Sandboxify" external reference.
+ OutputExternalReference(host.foreign_address(), kExternalPointerSize, true);
}
void Serializer::ObjectSerializer::VisitExternalReference(Code host,
RelocInfo* rinfo) {
Address target = rinfo->target_external_reference();
- auto encoded_reference = serializer_->EncodeExternalReference(target);
- if (encoded_reference.is_from_api()) {
- DCHECK(!rinfo->IsCodedSpecially());
- sink_->Put(kApiReference, "ApiRef");
- } else {
- sink_->Put(kExternalReference, "ExternalRef");
- }
DCHECK_NE(target, kNullAddress); // Code does not reference null.
- sink_->PutInt(encoded_reference.index(), "reference index");
- bytes_processed_so_far_ += rinfo->target_address_size();
+ DCHECK_IMPLIES(serializer_->EncodeExternalReference(target).is_from_api(),
+ !rinfo->IsCodedSpecially());
+ // Don't "sandboxify" external references embedded in the code.
+ OutputExternalReference(target, rinfo->target_address_size(), false);
}
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
@@ -834,8 +856,8 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
DCHECK(to_skip == bytes_to_output);
if (IsAligned(bytes_to_output, kObjectAlignment) &&
bytes_to_output <= kNumberOfFixedRawData * kTaggedSize) {
- int size_in_words = bytes_to_output >> kTaggedSizeLog2;
- sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
+ int size_in_tagged = bytes_to_output >> kTaggedSizeLog2;
+ sink_->PutSection(kFixedRawDataStart + size_in_tagged, "FixedRawData");
} else {
sink_->Put(kVariableRawData, "VariableRawData");
sink_->PutInt(bytes_to_output, "length");
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index de65a92013..be748de562 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -7,13 +7,15 @@
#include <map>
+#include "src/codegen/external-reference-encoder.h"
#include "src/execution/isolate.h"
#include "src/logging/log.h"
#include "src/objects/objects.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/serializer-allocator.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -157,8 +159,7 @@ class ObjectCacheIndexMap {
class Serializer : public SerializerDeserializer {
public:
- explicit Serializer(Isolate* isolate);
- ~Serializer() override;
+ Serializer(Isolate* isolate, Snapshot::SerializerFlags flags);
std::vector<SerializedData::Reservation> EncodeReservations() const {
return allocator_.EncodeReservations();
@@ -198,10 +199,10 @@ class Serializer : public SerializerDeserializer {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;
- void SerializeRootObject(Object object);
+ void SerializeRootObject(FullObjectSlot slot);
void PutRoot(RootIndex root_index, HeapObject object);
- void PutSmi(Smi smi);
+ void PutSmiRoot(FullObjectSlot slot);
void PutBackReference(HeapObject object, SerializerReference reference);
void PutAttachedReference(SerializerReference reference);
// Emit alignment prefix if necessary, return required padding space in bytes.
@@ -224,6 +225,10 @@ class Serializer : public SerializerDeserializer {
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
+ Maybe<ExternalReferenceEncoder::Value> TryEncodeExternalReference(
+ Address addr) {
+ return external_reference_encoder_.TryEncode(addr);
+ }
// GetInt reads 4 bytes at once, requiring padding at the end.
// Use padding_offset to specify the space you want to use after padding.
@@ -260,21 +265,29 @@ class Serializer : public SerializerDeserializer {
SnapshotByteSink sink_; // Used directly by subclasses.
+ bool allow_unknown_external_references_for_testing() const {
+ return (flags_ & Snapshot::kAllowUnknownExternalReferencesForTesting) != 0;
+ }
+ bool allow_active_isolate_for_testing() const {
+ return (flags_ & Snapshot::kAllowActiveIsolateForTesting) != 0;
+ }
+
private:
Isolate* isolate_;
SerializerReferenceMap reference_map_;
ExternalReferenceEncoder external_reference_encoder_;
RootIndexMap root_index_map_;
- CodeAddressMap* code_address_map_ = nullptr;
+ std::unique_ptr<CodeAddressMap> code_address_map_;
std::vector<byte> code_buffer_;
std::vector<HeapObject> deferred_objects_; // To handle stack overflow.
int recursion_depth_ = 0;
+ const Snapshot::SerializerFlags flags_;
SerializerAllocator allocator_;
#ifdef OBJECT_PRINT
- static const int kInstanceTypes = LAST_TYPE + 1;
- int* instance_type_count_[kNumberOfSpaces];
- size_t* instance_type_size_[kNumberOfSpaces];
+ static constexpr int kInstanceTypes = LAST_TYPE + 1;
+ std::unique_ptr<int[]> instance_type_count_[kNumberOfSpaces];
+ std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSpaces];
#endif // OBJECT_PRINT
#ifdef DEBUG
@@ -327,6 +340,8 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
// This function outputs or skips the raw data between the last pointer and
// up to the current position.
void SerializeContent(Map map, int size);
+ void OutputExternalReference(Address target, int target_size,
+ bool sandboxify);
void OutputRawData(Address up_to);
void OutputCode(int size);
uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
diff --git a/deps/v8/src/snapshot/snapshot-compression.cc b/deps/v8/src/snapshot/snapshot-compression.cc
index dea16bfa34..09ac2eecda 100644
--- a/deps/v8/src/snapshot/snapshot-compression.cc
+++ b/deps/v8/src/snapshot/snapshot-compression.cc
@@ -4,7 +4,9 @@
#include "src/snapshot/snapshot-compression.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/utils/memcopy.h"
+#include "src/utils/utils.h"
#include "third_party/zlib/google/compression_utils_portable.h"
namespace v8 {
diff --git a/deps/v8/src/snapshot/snapshot-compression.h b/deps/v8/src/snapshot/snapshot-compression.h
index 59c21feb74..fe637bd1a6 100644
--- a/deps/v8/src/snapshot/snapshot-compression.h
+++ b/deps/v8/src/snapshot/snapshot-compression.h
@@ -5,9 +5,7 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_COMPRESSION_H_
#define V8_SNAPSHOT_SNAPSHOT_COMPRESSION_H_
-#include "src/snapshot/serializer-common.h"
-#include "src/snapshot/serializer.h"
-#include "src/snapshot/snapshot.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/utils/vector.h"
namespace v8 {
diff --git a/deps/v8/src/snapshot/snapshot-data.cc b/deps/v8/src/snapshot/snapshot-data.cc
new file mode 100644
index 0000000000..b6dccf2af5
--- /dev/null
+++ b/deps/v8/src/snapshot/snapshot-data.cc
@@ -0,0 +1,80 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/snapshot-data.h"
+
+#include "src/common/assert-scope.h"
+#include "src/snapshot/serializer.h"
+
+#ifdef V8_SNAPSHOT_COMPRESSION
+#include "src/snapshot/snapshot-compression.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+void SerializedData::AllocateData(uint32_t size) {
+ DCHECK(!owns_data_);
+ data_ = NewArray<byte>(size);
+ size_ = size;
+ owns_data_ = true;
+}
+
+// static
+constexpr uint32_t SerializedData::kMagicNumber;
+
+SnapshotData::SnapshotData(const Serializer* serializer) {
+ DisallowHeapAllocation no_gc;
+ std::vector<Reservation> reservations = serializer->EncodeReservations();
+ const std::vector<byte>* payload = serializer->Payload();
+
+ // Calculate sizes.
+ uint32_t reservation_size =
+ static_cast<uint32_t>(reservations.size()) * kUInt32Size;
+ uint32_t payload_offset = kHeaderSize + reservation_size;
+ uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+ uint32_t size =
+ padded_payload_offset + static_cast<uint32_t>(payload->size());
+
+ // Allocate backing store and create result data.
+ AllocateData(size);
+
+ // Zero out pre-payload data. Part of that is only used for padding.
+ memset(data_, 0, padded_payload_offset);
+
+ // Set header values.
+ SetMagicNumber();
+ SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
+ SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
+
+ // Copy reservation chunk sizes.
+ CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.data()),
+ reservation_size);
+
+ // Copy serialized data.
+ CopyBytes(data_ + padded_payload_offset, payload->data(),
+ static_cast<size_t>(payload->size()));
+}
+
+std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
+ uint32_t size = GetHeaderValue(kNumReservationsOffset);
+ std::vector<SerializedData::Reservation> reservations(size);
+ memcpy(reservations.data(), data_ + kHeaderSize,
+ size * sizeof(SerializedData::Reservation));
+ return reservations;
+}
+
+Vector<const byte> SnapshotData::Payload() const {
+ uint32_t reservations_size =
+ GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
+ uint32_t padded_payload_offset =
+ POINTER_SIZE_ALIGN(kHeaderSize + reservations_size);
+ const byte* payload = data_ + padded_payload_offset;
+ uint32_t length = GetHeaderValue(kPayloadLengthOffset);
+ DCHECK_EQ(data_ + size_, payload + length);
+ return Vector<const byte>(payload, length);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-data.h b/deps/v8/src/snapshot/snapshot-data.h
new file mode 100644
index 0000000000..b8a9133e7f
--- /dev/null
+++ b/deps/v8/src/snapshot/snapshot-data.h
@@ -0,0 +1,129 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SNAPSHOT_DATA_H_
+#define V8_SNAPSHOT_SNAPSHOT_DATA_H_
+
+#include "src/base/bit-field.h"
+#include "src/base/memory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/utils/memcopy.h"
+#include "src/utils/vector.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Isolate;
+class Serializer;
+
+class SerializedData {
+ public:
+ class Reservation {
+ public:
+ Reservation() : reservation_(0) {}
+ explicit Reservation(uint32_t size)
+ : reservation_(ChunkSizeBits::encode(size)) {}
+
+ uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
+ bool is_last() const { return IsLastChunkBits::decode(reservation_); }
+
+ void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
+
+ private:
+ uint32_t reservation_;
+ };
+
+ SerializedData(byte* data, int size)
+ : data_(data), size_(size), owns_data_(false) {}
+ SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
+ SerializedData(SerializedData&& other) V8_NOEXCEPT
+ : data_(other.data_),
+ size_(other.size_),
+ owns_data_(other.owns_data_) {
+ // Ensure |other| will not attempt to destroy our data in destructor.
+ other.owns_data_ = false;
+ }
+
+ virtual ~SerializedData() {
+ if (owns_data_) DeleteArray<byte>(data_);
+ }
+
+ uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
+
+ using ChunkSizeBits = base::BitField<uint32_t, 0, 31>;
+ using IsLastChunkBits = base::BitField<bool, 31, 1>;
+
+ static constexpr uint32_t kMagicNumberOffset = 0;
+ static constexpr uint32_t kMagicNumber =
+ 0xC0DE0000 ^ ExternalReferenceTable::kSize;
+
+ protected:
+ void SetHeaderValue(uint32_t offset, uint32_t value) {
+ base::WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset,
+ value);
+ }
+
+ uint32_t GetHeaderValue(uint32_t offset) const {
+ return base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(data_) + offset);
+ }
+
+ void AllocateData(uint32_t size);
+
+ void SetMagicNumber() { SetHeaderValue(kMagicNumberOffset, kMagicNumber); }
+
+ byte* data_;
+ uint32_t size_;
+ bool owns_data_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SerializedData);
+};
+
+// Wrapper around reservation sizes and the serialization payload.
+class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
+ public:
+ // Used when producing.
+ explicit SnapshotData(const Serializer* serializer);
+
+ // Used when consuming.
+ explicit SnapshotData(const Vector<const byte> snapshot)
+ : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
+ }
+
+ std::vector<Reservation> Reservations() const;
+ virtual Vector<const byte> Payload() const;
+
+ Vector<const byte> RawData() const {
+ return Vector<const byte>(data_, size_);
+ }
+
+ protected:
+ // Empty constructor used by SnapshotCompression so it can manually allocate
+ // memory.
+ SnapshotData() : SerializedData() {}
+ friend class SnapshotCompression;
+
+ // Resize used by SnapshotCompression so it can shrink the compressed
+ // SnapshotData.
+ void Resize(uint32_t size) { size_ = size; }
+
+ // The data header consists of uint32_t-sized entries:
+ // [0] magic number and (internal) external reference count
+ // [1] number of reservation size entries
+ // [2] payload length
+ // ... reservations
+ // ... serialized payload
+ static const uint32_t kNumReservationsOffset =
+ kMagicNumberOffset + kUInt32Size;
+ static const uint32_t kPayloadLengthOffset =
+ kNumReservationsOffset + kUInt32Size;
+ static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_SNAPSHOT_DATA_H_
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index adcfa8df61..eb4427a75f 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -8,7 +8,7 @@
#include <utility>
#include "src/base/logging.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot-utils.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -63,24 +63,6 @@ class SnapshotByteSource final {
return answer;
}
- int GetIntSlow() {
- // Unlike GetInt, this reads only up to the end of the blob, even if less
- // than 4 bytes are remaining.
- // TODO(jgruber): Remove once the use in MakeFromScriptsSource is gone.
- DCHECK(position_ < length_);
- uint32_t answer = data_[position_];
- if (position_ + 1 < length_) answer |= data_[position_ + 1] << 8;
- if (position_ + 2 < length_) answer |= data_[position_ + 2] << 16;
- if (position_ + 3 < length_) answer |= data_[position_ + 3] << 24;
- int bytes = (answer & 3) + 1;
- Advance(bytes);
- uint32_t mask = 0xffffffffu;
- mask >>= 32 - (bytes << 3);
- answer &= mask;
- answer >>= 2;
- return answer;
- }
-
// Returns length.
int GetBlob(const byte** data);
diff --git a/deps/v8/src/snapshot/snapshot-utils.cc b/deps/v8/src/snapshot/snapshot-utils.cc
new file mode 100644
index 0000000000..88e8e794c2
--- /dev/null
+++ b/deps/v8/src/snapshot/snapshot-utils.cc
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/snapshot-utils.h"
+
+#include "src/sanitizer/msan.h"
+#include "third_party/zlib/zlib.h"
+
+namespace v8 {
+namespace internal {
+
+uint32_t Checksum(Vector<const byte> payload) {
+#ifdef MEMORY_SANITIZER
+ // Computing the checksum includes padding bytes for objects like strings.
+ // Mark every object as initialized in the code serializer.
+ MSAN_MEMORY_IS_INITIALIZED(payload.begin(), payload.length());
+#endif // MEMORY_SANITIZER
+ // Priming the adler32 call so it can see what CPU features are available.
+ adler32(0, NULL, 0);
+ return static_cast<uint32_t>(adler32(0, payload.begin(), payload.length()));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-utils.h b/deps/v8/src/snapshot/snapshot-utils.h
new file mode 100644
index 0000000000..045813b139
--- /dev/null
+++ b/deps/v8/src/snapshot/snapshot-utils.h
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SNAPSHOT_UTILS_H_
+#define V8_SNAPSHOT_SNAPSHOT_UTILS_H_
+
+#include "src/utils/vector.h"
+
+namespace v8 {
+namespace internal {
+
+V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_SNAPSHOT_UTILS_H_
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot.cc
index 8e80b0b0b0..6c129a846a 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -7,10 +7,17 @@
#include "src/snapshot/snapshot.h"
#include "src/base/platform/platform.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
-#include "src/snapshot/partial-deserializer.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/snapshot/context-deserializer.h"
+#include "src/snapshot/context-serializer.h"
#include "src/snapshot/read-only-deserializer.h"
+#include "src/snapshot/read-only-serializer.h"
+#include "src/snapshot/snapshot-utils.h"
#include "src/snapshot/startup-deserializer.h"
+#include "src/snapshot/startup-serializer.h"
#include "src/utils/memcopy.h"
#include "src/utils/version.h"
@@ -21,6 +28,82 @@
namespace v8 {
namespace internal {
+namespace {
+
+class SnapshotImpl : public AllStatic {
+ public:
+ static v8::StartupData CreateSnapshotBlob(
+ const SnapshotData* startup_snapshot_in,
+ const SnapshotData* read_only_snapshot_in,
+ const std::vector<SnapshotData*>& context_snapshots_in,
+ bool can_be_rehashed);
+
+ static uint32_t ExtractNumContexts(const v8::StartupData* data);
+ static uint32_t ExtractContextOffset(const v8::StartupData* data,
+ uint32_t index);
+ static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
+ static Vector<const byte> ExtractReadOnlyData(const v8::StartupData* data);
+ static Vector<const byte> ExtractContextData(const v8::StartupData* data,
+ uint32_t index);
+
+ static uint32_t GetHeaderValue(const v8::StartupData* data, uint32_t offset) {
+ return base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(data->data) + offset);
+ }
+ static void SetHeaderValue(char* data, uint32_t offset, uint32_t value) {
+ base::WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset,
+ value);
+ }
+
+ static void CheckVersion(const v8::StartupData* data);
+
+ // Snapshot blob layout:
+ // [0] number of contexts N
+ // [1] rehashability
+ // [2] checksum
+ // [3] (128 bytes) version string
+ // [4] offset to readonly
+ // [5] offset to context 0
+ // [6] offset to context 1
+ // ...
+ // ... offset to context N - 1
+ // ... startup snapshot data
+ // ... read-only snapshot data
+ // ... context 0 snapshot data
+ // ... context 1 snapshot data
+
+ static const uint32_t kNumberOfContextsOffset = 0;
+ // TODO(yangguo): generalize rehashing, and remove this flag.
+ static const uint32_t kRehashabilityOffset =
+ kNumberOfContextsOffset + kUInt32Size;
+ static const uint32_t kChecksumOffset = kRehashabilityOffset + kUInt32Size;
+ static const uint32_t kVersionStringOffset = kChecksumOffset + kUInt32Size;
+ static const uint32_t kVersionStringLength = 64;
+ static const uint32_t kReadOnlyOffsetOffset =
+ kVersionStringOffset + kVersionStringLength;
+ static const uint32_t kFirstContextOffsetOffset =
+ kReadOnlyOffsetOffset + kUInt32Size;
+
+ static Vector<const byte> ChecksummedContent(const v8::StartupData* data) {
+ STATIC_ASSERT(kVersionStringOffset == kChecksumOffset + kUInt32Size);
+ const uint32_t kChecksumStart = kVersionStringOffset;
+ return Vector<const byte>(
+ reinterpret_cast<const byte*>(data->data + kChecksumStart),
+ data->raw_size - kChecksumStart);
+ }
+
+ static uint32_t StartupSnapshotOffset(int num_contexts) {
+ return POINTER_SIZE_ALIGN(kFirstContextOffsetOffset +
+ num_contexts * kInt32Size);
+ }
+
+ static uint32_t ContextSnapshotOffsetOffset(int index) {
+ return kFirstContextOffsetOffset + index * kInt32Size;
+ }
+};
+
+} // namespace
+
SnapshotData MaybeDecompress(const Vector<const byte>& snapshot_data) {
#ifdef V8_SNAPSHOT_COMPRESSION
return SnapshotCompression::Decompress(snapshot_data);
@@ -31,7 +114,7 @@ SnapshotData MaybeDecompress(const Vector<const byte>& snapshot_data) {
#ifdef DEBUG
bool Snapshot::SnapshotIsValid(const v8::StartupData* snapshot_blob) {
- return Snapshot::ExtractNumContexts(snapshot_blob) > 0;
+ return SnapshotImpl::ExtractNumContexts(snapshot_blob) > 0;
}
#endif // DEBUG
@@ -40,7 +123,8 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
const v8::StartupData* blob = isolate->snapshot_blob();
if (blob == nullptr) return false;
if (blob->data == nullptr) return false;
- size_t num_contexts = static_cast<size_t>(ExtractNumContexts(blob));
+ size_t num_contexts =
+ static_cast<size_t>(SnapshotImpl::ExtractNumContexts(blob));
return index < num_contexts;
}
@@ -52,10 +136,10 @@ bool Snapshot::Initialize(Isolate* isolate) {
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
- CheckVersion(blob);
+ SnapshotImpl::CheckVersion(blob);
CHECK(VerifyChecksum(blob));
- Vector<const byte> startup_data = ExtractStartupData(blob);
- Vector<const byte> read_only_data = ExtractReadOnlyData(blob);
+ Vector<const byte> startup_data = SnapshotImpl::ExtractStartupData(blob);
+ Vector<const byte> read_only_data = SnapshotImpl::ExtractReadOnlyData(blob);
SnapshotData startup_snapshot_data(MaybeDecompress(startup_data));
SnapshotData read_only_snapshot_data(MaybeDecompress(read_only_data));
@@ -85,11 +169,11 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
const v8::StartupData* blob = isolate->snapshot_blob();
bool can_rehash = ExtractRehashability(blob);
- Vector<const byte> context_data =
- ExtractContextData(blob, static_cast<uint32_t>(context_index));
+ Vector<const byte> context_data = SnapshotImpl::ExtractContextData(
+ blob, static_cast<uint32_t>(context_index));
SnapshotData snapshot_data(MaybeDecompress(context_data));
- MaybeHandle<Context> maybe_result = PartialDeserializer::DeserializeContext(
+ MaybeHandle<Context> maybe_result = ContextDeserializer::DeserializeContext(
isolate, &snapshot_data, can_rehash, global_proxy,
embedder_fields_deserializer);
@@ -105,6 +189,125 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return result;
}
+// static
+void Snapshot::ClearReconstructableDataForSerialization(
+ Isolate* isolate, bool clear_recompilable_data) {
+ // Clear SFIs and JSRegExps.
+
+ if (clear_recompilable_data) {
+ HandleScope scope(isolate);
+ std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
+ { // Heap allocation is disallowed within this scope.
+ i::HeapObjectIterator it(isolate->heap());
+ for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
+ if (o.IsSharedFunctionInfo()) {
+ i::SharedFunctionInfo shared = i::SharedFunctionInfo::cast(o);
+ if (shared.script().IsScript() &&
+ Script::cast(shared.script()).type() == Script::TYPE_EXTENSION) {
+ continue; // Don't clear extensions, they cannot be recompiled.
+ }
+ if (shared.CanDiscardCompiled()) {
+ sfis_to_clear.emplace_back(shared, isolate);
+ }
+ } else if (o.IsJSRegExp()) {
+ i::JSRegExp regexp = i::JSRegExp::cast(o);
+ if (regexp.HasCompiledCode()) {
+ regexp.DiscardCompiledCodeForSerialization();
+ }
+ }
+ }
+ }
+
+ // Must happen after heap iteration since SFI::DiscardCompiled may allocate.
+ for (i::Handle<i::SharedFunctionInfo> shared : sfis_to_clear) {
+ i::SharedFunctionInfo::DiscardCompiled(isolate, shared);
+ }
+ }
+
+ // Clear JSFunctions.
+
+ i::HeapObjectIterator it(isolate->heap());
+ for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
+ if (!o.IsJSFunction()) continue;
+
+ i::JSFunction fun = i::JSFunction::cast(o);
+ fun.CompleteInobjectSlackTrackingIfActive();
+
+ i::SharedFunctionInfo shared = fun.shared();
+ if (shared.script().IsScript() &&
+ Script::cast(shared.script()).type() == Script::TYPE_EXTENSION) {
+ continue; // Don't clear extensions, they cannot be recompiled.
+ }
+
+ // Also, clear out feedback vectors, or any optimized code.
+ // Note that checking for fun.IsOptimized() || fun.IsInterpreted() is
+ // not sufficient because the function can have a feedback vector even
+ // if it is not compiled (e.g. when the bytecode was flushed). On the
+ // other hand, only checking for the feedback vector is not sufficient
+ // because there can be multiple functions sharing the same feedback
+ // vector. So we need all these checks.
+ if (fun.IsOptimized() || fun.IsInterpreted() ||
+ !fun.raw_feedback_cell().value().IsUndefined()) {
+ fun.raw_feedback_cell().set_value(
+ i::ReadOnlyRoots(isolate).undefined_value());
+ fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
+ }
+#ifdef DEBUG
+ if (clear_recompilable_data) {
+ DCHECK(fun.shared().HasWasmExportedFunctionData() ||
+ fun.shared().HasBuiltinId() || fun.shared().IsApiFunction() ||
+ fun.shared().HasUncompiledDataWithoutPreparseData());
+ }
+#endif // DEBUG
+ }
+}
+
+// static
+void Snapshot::SerializeDeserializeAndVerifyForTesting(
+ Isolate* isolate, Handle<Context> default_context) {
+ StartupData serialized_data;
+ std::unique_ptr<const char[]> auto_delete_serialized_data;
+
+ isolate->heap()->CollectAllAvailableGarbage(
+ i::GarbageCollectionReason::kSnapshotCreator);
+
+ // Test serialization.
+ {
+ DisallowHeapAllocation no_gc;
+
+ Snapshot::SerializerFlags flags(
+ Snapshot::kAllowUnknownExternalReferencesForTesting |
+ Snapshot::kAllowActiveIsolateForTesting);
+ serialized_data = Snapshot::Create(isolate, *default_context, no_gc, flags);
+ auto_delete_serialized_data.reset(serialized_data.data);
+ }
+
+ // Test deserialization.
+ Isolate* new_isolate = Isolate::New();
+ {
+ // Set serializer_enabled() to not install extensions and experimental
+ // natives on the new isolate.
+ // TODO(v8:10416): This should be a separate setting on the isolate.
+ new_isolate->enable_serializer();
+ new_isolate->Enter();
+ new_isolate->set_snapshot_blob(&serialized_data);
+ new_isolate->set_array_buffer_allocator(
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator());
+ CHECK(Snapshot::Initialize(new_isolate));
+
+ HandleScope scope(new_isolate);
+ Handle<Context> new_native_context =
+ new_isolate->bootstrapper()->CreateEnvironmentForTesting();
+ CHECK(new_native_context->IsNativeContext());
+
+#ifdef VERIFY_HEAP
+ new_isolate->heap()->Verify();
+#endif // VERIFY_HEAP
+ }
+ new_isolate->Exit();
+ Isolate::Delete(new_isolate);
+}
+
void ProfileDeserialization(
const SnapshotData* read_only_snapshot,
const SnapshotData* startup_snapshot,
@@ -129,7 +332,70 @@ void ProfileDeserialization(
}
}
-v8::StartupData Snapshot::CreateSnapshotBlob(
+// static
+constexpr Snapshot::SerializerFlags Snapshot::kDefaultSerializerFlags;
+
+// static
+v8::StartupData Snapshot::Create(
+ Isolate* isolate, std::vector<Context>* contexts,
+ const std::vector<SerializeInternalFieldsCallback>&
+ embedder_fields_serializers,
+ const DisallowHeapAllocation& no_gc, SerializerFlags flags) {
+ DCHECK_EQ(contexts->size(), embedder_fields_serializers.size());
+ DCHECK_GT(contexts->size(), 0);
+
+ ReadOnlySerializer read_only_serializer(isolate, flags);
+ read_only_serializer.SerializeReadOnlyRoots();
+
+ StartupSerializer startup_serializer(isolate, flags, &read_only_serializer);
+ startup_serializer.SerializeStrongReferences(no_gc);
+
+ // Serialize each context with a new serializer.
+ const int num_contexts = static_cast<int>(contexts->size());
+ std::vector<SnapshotData*> context_snapshots;
+ context_snapshots.reserve(num_contexts);
+
+ // TODO(v8:6593): generalize rehashing, and remove this flag.
+ bool can_be_rehashed = true;
+
+ for (int i = 0; i < num_contexts; i++) {
+ ContextSerializer context_serializer(isolate, flags, &startup_serializer,
+ embedder_fields_serializers[i]);
+ context_serializer.Serialize(&contexts->at(i), no_gc);
+ can_be_rehashed = can_be_rehashed && context_serializer.can_be_rehashed();
+ context_snapshots.push_back(new SnapshotData(&context_serializer));
+ }
+
+ startup_serializer.SerializeWeakReferencesAndDeferred();
+ can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
+
+ startup_serializer.CheckNoDirtyFinalizationRegistries();
+
+ read_only_serializer.FinalizeSerialization();
+ can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
+
+ SnapshotData read_only_snapshot(&read_only_serializer);
+ SnapshotData startup_snapshot(&startup_serializer);
+ v8::StartupData result =
+ SnapshotImpl::CreateSnapshotBlob(&startup_snapshot, &read_only_snapshot,
+ context_snapshots, can_be_rehashed);
+
+ for (const SnapshotData* ptr : context_snapshots) delete ptr;
+
+ CHECK(Snapshot::VerifyChecksum(&result));
+ return result;
+}
+
+// static
+v8::StartupData Snapshot::Create(Isolate* isolate, Context default_context,
+ const DisallowHeapAllocation& no_gc,
+ SerializerFlags flags) {
+ std::vector<Context> contexts{default_context};
+ std::vector<SerializeInternalFieldsCallback> callbacks{{}};
+ return Snapshot::Create(isolate, &contexts, callbacks, no_gc, flags);
+}
+
+v8::StartupData SnapshotImpl::CreateSnapshotBlob(
const SnapshotData* startup_snapshot_in,
const SnapshotData* read_only_snapshot_in,
const std::vector<SnapshotData*>& context_snapshots_in,
@@ -163,7 +429,8 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
#endif
uint32_t num_contexts = static_cast<uint32_t>(context_snapshots->size());
- uint32_t startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
+ uint32_t startup_snapshot_offset =
+ SnapshotImpl::StartupSnapshotOffset(num_contexts);
uint32_t total_length = startup_snapshot_offset;
total_length += static_cast<uint32_t>(startup_snapshot->RawData().length());
total_length += static_cast<uint32_t>(read_only_snapshot->RawData().length());
@@ -176,15 +443,18 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
char* data = new char[total_length];
// Zero out pre-payload data. Part of that is only used for padding.
- memset(data, 0, StartupSnapshotOffset(num_contexts));
+ memset(data, 0, SnapshotImpl::StartupSnapshotOffset(num_contexts));
- SetHeaderValue(data, kNumberOfContextsOffset, num_contexts);
- SetHeaderValue(data, kRehashabilityOffset, can_be_rehashed ? 1 : 0);
+ SnapshotImpl::SetHeaderValue(data, SnapshotImpl::kNumberOfContextsOffset,
+ num_contexts);
+ SnapshotImpl::SetHeaderValue(data, SnapshotImpl::kRehashabilityOffset,
+ can_be_rehashed ? 1 : 0);
// Write version string into snapshot data.
- memset(data + kVersionStringOffset, 0, kVersionStringLength);
- Version::GetString(
- Vector<char>(data + kVersionStringOffset, kVersionStringLength));
+ memset(data + SnapshotImpl::kVersionStringOffset, 0,
+ SnapshotImpl::kVersionStringLength);
+ Version::GetString(Vector<char>(data + SnapshotImpl::kVersionStringOffset,
+ SnapshotImpl::kVersionStringLength));
// Startup snapshot (isolate-specific data).
uint32_t payload_offset = startup_snapshot_offset;
@@ -201,7 +471,8 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_offset += payload_length;
// Read-only.
- SetHeaderValue(data, kReadOnlyOffsetOffset, payload_offset);
+ SnapshotImpl::SetHeaderValue(data, SnapshotImpl::kReadOnlyOffsetOffset,
+ payload_offset);
payload_length = read_only_snapshot->RawData().length();
CopyBytes(
data + payload_offset,
@@ -212,9 +483,10 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
}
payload_offset += payload_length;
- // Partial snapshots (context-specific data).
+ // Context snapshots (context-specific data).
for (uint32_t i = 0; i < num_contexts; i++) {
- SetHeaderValue(data, ContextSnapshotOffsetOffset(i), payload_offset);
+ SnapshotImpl::SetHeaderValue(
+ data, SnapshotImpl::ContextSnapshotOffsetOffset(i), payload_offset);
SnapshotData* context_snapshot = (*context_snapshots)[i];
payload_length = context_snapshot->RawData().length();
CopyBytes(
@@ -233,12 +505,14 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
DCHECK_EQ(total_length, payload_offset);
v8::StartupData result = {data, static_cast<int>(total_length)};
- SetHeaderValue(data, kChecksumOffset, Checksum(ChecksummedContent(&result)));
+ SnapshotImpl::SetHeaderValue(
+ data, SnapshotImpl::kChecksumOffset,
+ Checksum(SnapshotImpl::ChecksummedContent(&result)));
return result;
}
-uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
+uint32_t SnapshotImpl::ExtractNumContexts(const v8::StartupData* data) {
CHECK_LT(kNumberOfContextsOffset, data->raw_size);
uint32_t num_contexts = GetHeaderValue(data, kNumberOfContextsOffset);
return num_contexts;
@@ -247,8 +521,9 @@ uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
bool Snapshot::VerifyChecksum(const v8::StartupData* data) {
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
- uint32_t expected = GetHeaderValue(data, kChecksumOffset);
- uint32_t result = Checksum(ChecksummedContent(data));
+ uint32_t expected =
+ SnapshotImpl::GetHeaderValue(data, SnapshotImpl::kChecksumOffset);
+ uint32_t result = Checksum(SnapshotImpl::ChecksummedContent(data));
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
PrintF("[Verifying snapshot checksum took %0.3f ms]\n", ms);
@@ -256,8 +531,8 @@ bool Snapshot::VerifyChecksum(const v8::StartupData* data) {
return result == expected;
}
-uint32_t Snapshot::ExtractContextOffset(const v8::StartupData* data,
- uint32_t index) {
+uint32_t SnapshotImpl::ExtractContextOffset(const v8::StartupData* data,
+ uint32_t index) {
// Extract the offset of the context at a given index from the StartupData,
// and check that it is within bounds.
uint32_t context_offset =
@@ -267,8 +542,10 @@ uint32_t Snapshot::ExtractContextOffset(const v8::StartupData* data,
}
bool Snapshot::ExtractRehashability(const v8::StartupData* data) {
- CHECK_LT(kRehashabilityOffset, static_cast<uint32_t>(data->raw_size));
- uint32_t rehashability = GetHeaderValue(data, kRehashabilityOffset);
+ CHECK_LT(SnapshotImpl::kRehashabilityOffset,
+ static_cast<uint32_t>(data->raw_size));
+ uint32_t rehashability =
+ SnapshotImpl::GetHeaderValue(data, SnapshotImpl::kRehashabilityOffset);
CHECK_IMPLIES(rehashability != 0, rehashability == 1);
return rehashability != 0;
}
@@ -285,23 +562,25 @@ Vector<const byte> ExtractData(const v8::StartupData* snapshot,
}
} // namespace
-Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
- DCHECK(SnapshotIsValid(data));
+Vector<const byte> SnapshotImpl::ExtractStartupData(
+ const v8::StartupData* data) {
+ DCHECK(Snapshot::SnapshotIsValid(data));
uint32_t num_contexts = ExtractNumContexts(data);
return ExtractData(data, StartupSnapshotOffset(num_contexts),
GetHeaderValue(data, kReadOnlyOffsetOffset));
}
-Vector<const byte> Snapshot::ExtractReadOnlyData(const v8::StartupData* data) {
- DCHECK(SnapshotIsValid(data));
+Vector<const byte> SnapshotImpl::ExtractReadOnlyData(
+ const v8::StartupData* data) {
+ DCHECK(Snapshot::SnapshotIsValid(data));
return ExtractData(data, GetHeaderValue(data, kReadOnlyOffsetOffset),
GetHeaderValue(data, ContextSnapshotOffsetOffset(0)));
}
-Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
- uint32_t index) {
+Vector<const byte> SnapshotImpl::ExtractContextData(const v8::StartupData* data,
+ uint32_t index) {
uint32_t num_contexts = ExtractNumContexts(data);
CHECK_LT(index, num_contexts);
@@ -320,7 +599,7 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
return Vector<const byte>(context_data, context_length);
}
-void Snapshot::CheckVersion(const v8::StartupData* data) {
+void SnapshotImpl::CheckVersion(const v8::StartupData* data) {
char version[kVersionStringLength];
memset(version, 0, kVersionStringLength);
CHECK_LT(kVersionStringOffset + kVersionStringLength,
@@ -339,58 +618,6 @@ void Snapshot::CheckVersion(const v8::StartupData* data) {
}
}
-SnapshotData::SnapshotData(const Serializer* serializer) {
- DisallowHeapAllocation no_gc;
- std::vector<Reservation> reservations = serializer->EncodeReservations();
- const std::vector<byte>* payload = serializer->Payload();
-
- // Calculate sizes.
- uint32_t reservation_size =
- static_cast<uint32_t>(reservations.size()) * kUInt32Size;
- uint32_t payload_offset = kHeaderSize + reservation_size;
- uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- uint32_t size =
- padded_payload_offset + static_cast<uint32_t>(payload->size());
-
- // Allocate backing store and create result data.
- AllocateData(size);
-
- // Zero out pre-payload data. Part of that is only used for padding.
- memset(data_, 0, padded_payload_offset);
-
- // Set header values.
- SetMagicNumber();
- SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
- SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
-
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.data()),
- reservation_size);
-
- // Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload->data(),
- static_cast<size_t>(payload->size()));
-}
-
-std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
- uint32_t size = GetHeaderValue(kNumReservationsOffset);
- std::vector<SerializedData::Reservation> reservations(size);
- memcpy(reservations.data(), data_ + kHeaderSize,
- size * sizeof(SerializedData::Reservation));
- return reservations;
-}
-
-Vector<const byte> SnapshotData::Payload() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- uint32_t padded_payload_offset =
- POINTER_SIZE_ALIGN(kHeaderSize + reservations_size);
- const byte* payload = data_ + padded_payload_offset;
- uint32_t length = GetHeaderValue(kPayloadLengthOffset);
- DCHECK_EQ(data_ + size_, payload + length);
- return Vector<const byte>(payload, length);
-}
-
namespace {
bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index d9f05c59a8..e0ea02681c 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -1,168 +1,103 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
-#include "src/snapshot/partial-serializer.h"
-#include "src/snapshot/startup-serializer.h"
-
-#include "src/utils/utils.h"
+#include "include/v8.h" // For StartupData.
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
-// Forward declarations.
+class Context;
class Isolate;
-class PartialSerializer;
-class SnapshotCompression;
-class StartupSerializer;
-
-// Wrapper around reservation sizes and the serialization payload.
-class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
- public:
- // Used when producing.
- explicit SnapshotData(const Serializer* serializer);
-
- // Used when consuming.
- explicit SnapshotData(const Vector<const byte> snapshot)
- : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
- }
-
- std::vector<Reservation> Reservations() const;
- virtual Vector<const byte> Payload() const;
-
- Vector<const byte> RawData() const {
- return Vector<const byte>(data_, size_);
- }
-
- protected:
- // Empty constructor used by SnapshotCompression so it can manually allocate
- // memory.
- SnapshotData() : SerializedData() {}
- friend class SnapshotCompression;
-
- // Resize used by SnapshotCompression so it can shrink the compressed
- // SnapshotData.
- void Resize(uint32_t size) { size_ = size; }
-
- // The data header consists of uint32_t-sized entries:
- // [0] magic number and (internal) external reference count
- // [1] number of reservation size entries
- // [2] payload length
- // ... reservations
- // ... serialized payload
- static const uint32_t kNumReservationsOffset =
- kMagicNumberOffset + kUInt32Size;
- static const uint32_t kPayloadLengthOffset =
- kNumReservationsOffset + kUInt32Size;
- static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
-};
+class SnapshotData;
+class JSGlobalProxy;
class Snapshot : public AllStatic {
public:
- // ---------------- Deserialization ----------------
+ // ---------------- Serialization -------------------------------------------
+
+ enum SerializerFlag {
+ // If set, serializes unknown external references as verbatim data. This
+ // usually leads to invalid state if the snapshot is deserialized in a
+ // different isolate or a different process.
+ // If unset, all external references must be known to the encoder.
+ kAllowUnknownExternalReferencesForTesting = 1 << 0,
+ // If set, the serializer enters a more permissive mode which allows
+ // serialization of a currently active, running isolate. This has multiple
+ // effects; for example, open handles are allowed, microtasks may exist,
+ // etc. Note that in this mode, the serializer is allowed to skip
+ // visitation of certain problematic areas even if they are non-empty. The
+ // resulting snapshot is not guaranteed to result in a runnable context
+ // after deserialization.
+ // If unset, we assert that these previously mentioned areas are empty.
+ kAllowActiveIsolateForTesting = 1 << 1,
+ };
+ using SerializerFlags = base::Flags<SerializerFlag>;
+ V8_EXPORT_PRIVATE static constexpr SerializerFlags kDefaultSerializerFlags =
+ {};
+
+ // In preparation for serialization, clear data from the given isolate's heap
+ // that 1. can be reconstructed and 2. is not suitable for serialization. The
+ // `clear_recompilable_data` flag controls whether compiled objects are
+ // cleared from shared function infos and regexp objects.
+ V8_EXPORT_PRIVATE static void ClearReconstructableDataForSerialization(
+ Isolate* isolate, bool clear_recompilable_data);
+
+ // Serializes the given isolate and contexts. Each context may have an
+ // associated callback to serialize internal fields. The default context must
+ // be passed at index 0.
+ static v8::StartupData Create(
+ Isolate* isolate, std::vector<Context>* contexts,
+ const std::vector<SerializeInternalFieldsCallback>&
+ embedder_fields_serializers,
+ const DisallowHeapAllocation& no_gc,
+ SerializerFlags flags = kDefaultSerializerFlags);
+
+ // Convenience helper for the above when only serializing a single context.
+ static v8::StartupData Create(
+ Isolate* isolate, Context default_context,
+ const DisallowHeapAllocation& no_gc,
+ SerializerFlags flags = kDefaultSerializerFlags);
+
+ // ---------------- Deserialization -----------------------------------------
// Initialize the Isolate from the internal snapshot. Returns false if no
// snapshot could be found.
static bool Initialize(Isolate* isolate);
- // Create a new context using the internal partial snapshot.
+ // Create a new context using the internal context snapshot.
static MaybeHandle<Context> NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
- // ---------------- Helper methods ----------------
+ // ---------------- Testing -------------------------------------------------
+
+ // This function is used to stress the snapshot component. It serializes the
+ // current isolate and context into a snapshot, deserializes the snapshot into
+ // a new isolate and context, and finally runs VerifyHeap on the fresh
+ // isolate.
+ V8_EXPORT_PRIVATE static void SerializeDeserializeAndVerifyForTesting(
+ Isolate* isolate, Handle<Context> default_context);
+
+ // ---------------- Helper methods ------------------------------------------
static bool HasContextSnapshot(Isolate* isolate, size_t index);
static bool EmbedsScript(Isolate* isolate);
+ V8_EXPORT_PRIVATE static bool VerifyChecksum(const v8::StartupData* data);
+ static bool ExtractRehashability(const v8::StartupData* data);
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
- V8_EXPORT_PRIVATE static bool VerifyChecksum(const v8::StartupData* data);
-
- // ---------------- Serialization ----------------
-
- static v8::StartupData CreateSnapshotBlob(
- const SnapshotData* startup_snapshot_in,
- const SnapshotData* read_only_snapshot_in,
- const std::vector<SnapshotData*>& context_snapshots_in,
- bool can_be_rehashed);
-
#ifdef DEBUG
static bool SnapshotIsValid(const v8::StartupData* snapshot_blob);
#endif // DEBUG
-
- static bool ExtractRehashability(const v8::StartupData* data);
-
- private:
- static uint32_t ExtractNumContexts(const v8::StartupData* data);
- static uint32_t ExtractContextOffset(const v8::StartupData* data,
- uint32_t index);
- static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
- static Vector<const byte> ExtractReadOnlyData(const v8::StartupData* data);
- static Vector<const byte> ExtractContextData(const v8::StartupData* data,
- uint32_t index);
-
- static uint32_t GetHeaderValue(const v8::StartupData* data, uint32_t offset) {
- return base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(data->data) + offset);
- }
- static void SetHeaderValue(char* data, uint32_t offset, uint32_t value) {
- base::WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset,
- value);
- }
-
- static void CheckVersion(const v8::StartupData* data);
-
- // Snapshot blob layout:
- // [0] number of contexts N
- // [1] rehashability
- // [2] checksum
- // [3] (128 bytes) version string
- // [4] offset to readonly
- // [5] offset to context 0
- // [6] offset to context 1
- // ...
- // ... offset to context N - 1
- // ... startup snapshot data
- // ... read-only snapshot data
- // ... context 0 snapshot data
- // ... context 1 snapshot data
-
- static const uint32_t kNumberOfContextsOffset = 0;
- // TODO(yangguo): generalize rehashing, and remove this flag.
- static const uint32_t kRehashabilityOffset =
- kNumberOfContextsOffset + kUInt32Size;
- static const uint32_t kChecksumOffset = kRehashabilityOffset + kUInt32Size;
- static const uint32_t kVersionStringOffset = kChecksumOffset + kUInt32Size;
- static const uint32_t kVersionStringLength = 64;
- static const uint32_t kReadOnlyOffsetOffset =
- kVersionStringOffset + kVersionStringLength;
- static const uint32_t kFirstContextOffsetOffset =
- kReadOnlyOffsetOffset + kUInt32Size;
-
- static Vector<const byte> ChecksummedContent(const v8::StartupData* data) {
- STATIC_ASSERT(kVersionStringOffset == kChecksumOffset + kUInt32Size);
- const uint32_t kChecksumStart = kVersionStringOffset;
- return Vector<const byte>(
- reinterpret_cast<const byte*>(data->data + kChecksumStart),
- data->raw_size - kChecksumStart);
- }
-
- static uint32_t StartupSnapshotOffset(int num_contexts) {
- return POINTER_SIZE_ALIGN(kFirstContextOffsetOffset +
- num_contexts * kInt32Size);
- }
-
- static uint32_t ContextSnapshotOffsetOffset(int index) {
- return kFirstContextOffsetOffset + index * kInt32Size;
- }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
// Convenience wrapper around snapshot data blob creation used e.g. by tests and
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 7ef6ac6168..095009b4e8 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -8,6 +8,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/execution/v8threads.h"
#include "src/heap/heap-inl.h"
+#include "src/logging/log.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -24,20 +25,23 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active handles.
DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
- // Partial snapshot cache is not yet populated.
- DCHECK(isolate->partial_snapshot_cache()->empty());
+ // Startup object cache is not yet populated.
+ DCHECK(isolate->startup_object_cache()->empty());
// Builtins are not yet created.
DCHECK(!isolate->builtins()->is_initialized());
{
DisallowHeapAllocation no_gc;
isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
+ isolate->heap()->IterateRoots(
+ this,
+ base::EnumSet<SkipRoot>{SkipRoot::kUnserializable, SkipRoot::kWeak});
Iterate(isolate, this);
- isolate->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
+ isolate->heap()->IterateWeakRoots(
+ this, base::EnumSet<SkipRoot>{SkipRoot::kUnserializable});
DeserializeDeferredObjects();
- RestoreExternalReferenceRedirectors(accessor_infos());
- RestoreExternalReferenceRedirectors(call_handler_infos());
+ RestoreExternalReferenceRedirectors(isolate, accessor_infos());
+ RestoreExternalReferenceRedirectors(isolate, call_handler_infos());
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 141fcfb23f..0820b044c6 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -19,17 +19,61 @@
namespace v8 {
namespace internal {
+namespace {
+
+// The isolate roots may not point at context-specific objects during
+// serialization.
+class SanitizeIsolateScope final {
+ public:
+ SanitizeIsolateScope(Isolate* isolate, bool allow_active_isolate_for_testing,
+ const DisallowHeapAllocation& no_gc)
+ : isolate_(isolate),
+ feedback_vectors_for_profiling_tools_(
+ isolate->heap()->feedback_vectors_for_profiling_tools()),
+ detached_contexts_(isolate->heap()->detached_contexts()) {
+#ifdef DEBUG
+ if (!allow_active_isolate_for_testing) {
+ // These should already be empty when creating a real snapshot.
+ DCHECK_EQ(feedback_vectors_for_profiling_tools_,
+ ReadOnlyRoots(isolate).undefined_value());
+ DCHECK_EQ(detached_contexts_,
+ ReadOnlyRoots(isolate).empty_weak_array_list());
+ }
+#endif
+
+ isolate->SetFeedbackVectorsForProfilingTools(
+ ReadOnlyRoots(isolate).undefined_value());
+ isolate->heap()->SetDetachedContexts(
+ ReadOnlyRoots(isolate).empty_weak_array_list());
+ }
+
+ ~SanitizeIsolateScope() {
+ // Restore saved fields.
+ isolate_->SetFeedbackVectorsForProfilingTools(
+ feedback_vectors_for_profiling_tools_);
+ isolate_->heap()->SetDetachedContexts(detached_contexts_);
+ }
+
+ private:
+ Isolate* isolate_;
+ const Object feedback_vectors_for_profiling_tools_;
+ const WeakArrayList detached_contexts_;
+};
+
+} // namespace
+
StartupSerializer::StartupSerializer(Isolate* isolate,
+ Snapshot::SerializerFlags flags,
ReadOnlySerializer* read_only_serializer)
- : RootsSerializer(isolate, RootIndex::kFirstStrongRoot),
+ : RootsSerializer(isolate, flags, RootIndex::kFirstStrongRoot),
read_only_serializer_(read_only_serializer) {
allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
InitializeCodeAddressMap();
}
StartupSerializer::~StartupSerializer() {
- RestoreExternalReferenceRedirectors(accessor_infos_);
- RestoreExternalReferenceRedirectors(call_handler_infos_);
+ RestoreExternalReferenceRedirectors(isolate(), accessor_infos_);
+ RestoreExternalReferenceRedirectors(isolate(), call_handler_infos_);
OutputStatistics("StartupSerializer");
}
@@ -96,13 +140,17 @@ void StartupSerializer::SerializeObject(HeapObject obj) {
if (use_simulator && obj.IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
AccessorInfo info = AccessorInfo::cast(obj);
- Address original_address = Foreign::cast(info.getter()).foreign_address();
- Foreign::cast(info.js_getter()).set_foreign_address(original_address);
+ Address original_address =
+ Foreign::cast(info.getter()).foreign_address(isolate());
+ Foreign::cast(info.js_getter())
+ .set_foreign_address(isolate(), original_address);
accessor_infos_.push_back(info);
} else if (use_simulator && obj.IsCallHandlerInfo()) {
CallHandlerInfo info = CallHandlerInfo::cast(obj);
- Address original_address = Foreign::cast(info.callback()).foreign_address();
- Foreign::cast(info.js_callback()).set_foreign_address(original_address);
+ Address original_address =
+ Foreign::cast(info.callback()).foreign_address(isolate());
+ Foreign::cast(info.js_callback())
+ .set_foreign_address(isolate(), original_address);
call_handler_infos_.push_back(info);
} else if (obj.IsScript() && Script::cast(obj).IsUserJavaScript()) {
Script::cast(obj).set_context_data(
@@ -125,28 +173,36 @@ void StartupSerializer::SerializeObject(HeapObject obj) {
}
void StartupSerializer::SerializeWeakReferencesAndDeferred() {
- // This comes right after serialization of the partial snapshot, where we
- // add entries to the partial snapshot cache of the startup snapshot. Add
- // one entry with 'undefined' to terminate the partial snapshot cache.
+ // This comes right after serialization of the context snapshot, where we
+ // add entries to the startup object cache of the startup snapshot. Add
+ // one entry with 'undefined' to terminate the startup object cache.
Object undefined = ReadOnlyRoots(isolate()).undefined_value();
- VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ VisitRootPointer(Root::kStartupObjectCache, nullptr,
FullObjectSlot(&undefined));
- isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
+ isolate()->heap()->IterateWeakRoots(
+ this, base::EnumSet<SkipRoot>{SkipRoot::kUnserializable});
SerializeDeferredObjects();
Pad();
}
-void StartupSerializer::SerializeStrongReferences() {
+void StartupSerializer::SerializeStrongReferences(
+ const DisallowHeapAllocation& no_gc) {
Isolate* isolate = this->isolate();
// No active threads.
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
- CHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ CHECK_IMPLIES(!allow_active_isolate_for_testing(),
+ isolate->handle_scope_implementer()->blocks()->empty());
+
+ SanitizeIsolateScope sanitize_isolate(
+ isolate, allow_active_isolate_for_testing(), no_gc);
// Visit smi roots and immortal immovables first to make sure they end up in
// the first page.
isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
+ isolate->heap()->IterateRoots(
+ this,
+ base::EnumSet<SkipRoot>{SkipRoot::kUnserializable, SkipRoot::kWeak});
}
SerializedHandleChecker::SerializedHandleChecker(Isolate* isolate,
@@ -163,11 +219,11 @@ bool StartupSerializer::SerializeUsingReadOnlyObjectCache(
return read_only_serializer_->SerializeUsingReadOnlyObjectCache(sink, obj);
}
-void StartupSerializer::SerializeUsingPartialSnapshotCache(
- SnapshotByteSink* sink, HeapObject obj) {
+void StartupSerializer::SerializeUsingStartupObjectCache(SnapshotByteSink* sink,
+ HeapObject obj) {
int cache_index = SerializeInObjectCache(obj);
- sink->Put(kPartialSnapshotCache, "PartialSnapshotCache");
- sink->PutInt(cache_index, "partial_snapshot_cache_index");
+ sink->Put(kStartupObjectCache, "StartupObjectCache");
+ sink->PutInt(cache_index, "startup_object_cache_index");
}
void StartupSerializer::CheckNoDirtyFinalizationRegistries() {
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 50c023852f..e09c3e4857 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -18,15 +18,16 @@ class ReadOnlySerializer;
class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
public:
- StartupSerializer(Isolate* isolate, ReadOnlySerializer* read_only_serializer);
+ StartupSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
+ ReadOnlySerializer* read_only_serializer);
~StartupSerializer() override;
// Serialize the current state of the heap. The order is:
// 1) Strong roots
// 2) Builtins and bytecode handlers
- // 3) Partial snapshot cache
+ // 3) Startup object cache
// 4) Weak references (e.g. the string table)
- void SerializeStrongReferences();
+ void SerializeStrongReferences(const DisallowHeapAllocation& no_gc);
void SerializeWeakReferencesAndDeferred();
// If |obj| can be serialized in the read-only snapshot then add it to the
@@ -36,10 +37,9 @@ class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
HeapObject obj);
- // Adds |obj| to the partial snapshot object cache if not already present and
- // emits a PartialSnapshotCache bytecode into |sink|.
- void SerializeUsingPartialSnapshotCache(SnapshotByteSink* sink,
- HeapObject obj);
+ // Adds |obj| to the startup object object cache if not already present and
+ // emits a StartupObjectCache bytecode into |sink|.
+ void SerializeUsingStartupObjectCache(SnapshotByteSink* sink, HeapObject obj);
// The per-heap dirty FinalizationRegistry list is weak and not serialized. No
// JSFinalizationRegistries should be used during startup.
diff --git a/deps/v8/src/strings/uri.cc b/deps/v8/src/strings/uri.cc
index de4e339b39..466c3616a1 100644
--- a/deps/v8/src/strings/uri.cc
+++ b/deps/v8/src/strings/uri.cc
@@ -420,7 +420,7 @@ static MaybeHandle<String> UnescapePrivate(Isolate* isolate,
int index;
{
DisallowHeapAllocation no_allocation;
- StringSearch<uint8_t, Char> search(isolate, StaticCharVector("%"));
+ StringSearch<uint8_t, Char> search(isolate, StaticOneByteVector("%"));
index = search.Search(source->GetCharVector<Char>(no_allocation), 0);
if (index < 0) return source;
}
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 93e0622aee..76c047c7ee 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -93,7 +93,7 @@ namespace torque {
AST_STATEMENT_NODE_KIND_LIST(V) \
AST_DECLARATION_NODE_KIND_LIST(V) \
V(Identifier) \
- V(LabelBlock) \
+ V(TryHandler) \
V(ClassBody)
struct AstNode {
@@ -204,7 +204,7 @@ struct EnumDescription {
class Ast {
public:
- Ast() {}
+ Ast() = default;
std::vector<Declaration*>& declarations() { return declarations_; }
const std::vector<Declaration*>& declarations() const {
@@ -780,14 +780,17 @@ struct ForLoopStatement : Statement {
Statement* body;
};
-struct LabelBlock : AstNode {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
- LabelBlock(SourcePosition pos, Identifier* label,
+struct TryHandler : AstNode {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(TryHandler)
+ enum class HandlerKind { kCatch, kLabel };
+ TryHandler(SourcePosition pos, HandlerKind handler_kind, Identifier* label,
const ParameterList& parameters, Statement* body)
: AstNode(kKind, pos),
+ handler_kind(handler_kind),
label(label),
parameters(parameters),
body(std::move(body)) {}
+ HandlerKind handler_kind;
Identifier* label;
ParameterList parameters;
Statement* body;
@@ -802,15 +805,13 @@ struct StatementExpression : Expression {
struct TryLabelExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TryLabelExpression)
- TryLabelExpression(SourcePosition pos, bool catch_exceptions,
- Expression* try_expression, LabelBlock* label_block)
+ TryLabelExpression(SourcePosition pos, Expression* try_expression,
+ TryHandler* label_block)
: Expression(kKind, pos),
- catch_exceptions(catch_exceptions),
try_expression(try_expression),
label_block(label_block) {}
- bool catch_exceptions;
Expression* try_expression;
- LabelBlock* label_block;
+ TryHandler* label_block;
};
struct BlockStatement : Statement {
@@ -1175,8 +1176,7 @@ struct ClassBody : AstNode {
struct ClassDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassDeclaration)
ClassDeclaration(SourcePosition pos, Identifier* name, ClassFlags flags,
- base::Optional<TypeExpression*> super,
- base::Optional<std::string> generates,
+ TypeExpression* super, base::Optional<std::string> generates,
std::vector<Declaration*> methods,
std::vector<ClassFieldExpression> fields,
InstanceTypeConstraints instance_type_constraints)
@@ -1188,7 +1188,7 @@ struct ClassDeclaration : TypeDeclaration {
fields(std::move(fields)),
instance_type_constraints(std::move(instance_type_constraints)) {}
ClassFlags flags;
- base::Optional<TypeExpression*> super;
+ TypeExpression* super;
base::Optional<std::string> generates;
std::vector<Declaration*> methods;
std::vector<ClassFieldExpression> fields;
diff --git a/deps/v8/src/torque/class-debug-reader-generator.cc b/deps/v8/src/torque/class-debug-reader-generator.cc
index 52646dff26..b89ec85d80 100644
--- a/deps/v8/src/torque/class-debug-reader-generator.cc
+++ b/deps/v8/src/torque/class-debug-reader-generator.cc
@@ -53,7 +53,7 @@ class ValueTypeFieldIterator {
if (const auto type_wrapped_in_smi =
Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
type = *type_wrapped_in_smi;
- bitfield_start_offset = kSmiTagSize + kSmiShiftSize;
+ bitfield_start_offset = TargetArchitecture::SmiTagAndShiftSize();
}
if (const BitFieldStructType* bit_field_struct_type =
BitFieldStructType::DynamicCast(type)) {
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 9eba568ac9..616e7a23ac 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -24,6 +24,7 @@ static const char* const BOOL_TYPE_STRING = "bool";
static const char* const VOID_TYPE_STRING = "void";
static const char* const ARGUMENTS_TYPE_STRING = "Arguments";
static const char* const CONTEXT_TYPE_STRING = "Context";
+static const char* const NO_CONTEXT_TYPE_STRING = "NoContext";
static const char* const NATIVE_CONTEXT_TYPE_STRING = "NativeContext";
static const char* const JS_FUNCTION_TYPE_STRING = "JSFunction";
static const char* const MAP_TYPE_STRING = "Map";
@@ -38,6 +39,7 @@ static const char* const UNINITIALIZED_TYPE_STRING = "Uninitialized";
static const char* const UNINITIALIZED_HEAP_OBJECT_TYPE_STRING =
"UninitializedHeapObject";
static const char* const RAWPTR_TYPE_STRING = "RawPtr";
+static const char* const EXTERNALPTR_TYPE_STRING = "ExternalPointer";
static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
static const char* const STRING_TYPE_STRING = "String";
static const char* const NUMBER_TYPE_STRING = "Number";
@@ -126,7 +128,6 @@ enum class ClassFlag {
kIsShape = 1 << 5,
kHasSameInstanceTypeAsParent = 1 << 6,
kGenerateCppClassDefinitions = 1 << 7,
- kHasIndexedField = 1 << 8,
kHighestInstanceTypeWithinParent = 1 << 9,
kLowestInstanceTypeWithinParent = 1 << 10,
kUndefinedLayout = 1 << 11,
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 9716ccbad4..45ed7f3af4 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -5,6 +5,7 @@
#include "src/torque/csa-generator.h"
#include "src/common/globals.h"
+#include "src/torque/global-context.h"
#include "src/torque/type-oracle.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -511,8 +512,14 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
LowerType(instruction.builtin->signature().return_type);
if (instruction.is_tailcall) {
out() << " CodeStubAssembler(state_).TailCallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName() << ", ";
- PrintCommaSeparatedList(out(), arguments);
+ << instruction.builtin->ExternalName();
+ if (!instruction.builtin->signature().HasContextParameter()) {
+ // Add dummy context parameter to satisfy the TailCallBuiltin signature.
+ out() << ", TNode<Object>()";
+ }
+ for (const std::string& argument : arguments) {
+ out() << ", " << argument;
+ }
out() << ");\n";
} else {
std::string result_name;
@@ -524,25 +531,24 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
std::string catch_name =
PreCallableExceptionPreparation(instruction.catch_block);
Stack<std::string> pre_call_stack = *stack;
- if (result_types.size() == 1) {
- std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
- stack->Push(result_name);
- out() << " " << result_name << " = ";
- if (generated_type != "Object") out() << "TORQUE_CAST(";
- out() << "CodeStubAssembler(state_).CallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName() << ", ";
- PrintCommaSeparatedList(out(), arguments);
- if (generated_type != "Object") out() << ")";
- out() << ");\n";
- } else {
- DCHECK_EQ(0, result_types.size());
- // TODO(tebbi): Actually, builtins have to return a value, so we should
- // not have to handle this case.
- out() << " CodeStubAssembler(state_).CallBuiltin(Builtins::k"
- << instruction.builtin->ExternalName() << ", ";
- PrintCommaSeparatedList(out(), arguments);
- out() << ");\n";
+
+ DCHECK_EQ(1, result_types.size());
+ std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
+ stack->Push(result_name);
+ out() << " " << result_name << " = ";
+ if (generated_type != "Object") out() << "TORQUE_CAST(";
+ out() << "CodeStubAssembler(state_).CallBuiltin(Builtins::k"
+ << instruction.builtin->ExternalName();
+ if (!instruction.builtin->signature().HasContextParameter()) {
+ // Add dummy context parameter to satisfy the CallBuiltin signature.
+ out() << ", TNode<Object>()";
}
+ for (const std::string& argument : arguments) {
+ out() << ", " << argument;
+ }
+ if (generated_type != "Object") out() << ")";
+ out() << ");\n";
+
PostCallableExceptionPreparation(
catch_name,
result_types.size() == 0 ? TypeOracle::GetVoidType() : result_types[0],
@@ -554,8 +560,8 @@ void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
void CSAGenerator::EmitInstruction(
const CallBuiltinPointerInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> function_and_arguments =
- stack->PopMany(1 + instruction.argc);
+ std::vector<std::string> arguments = stack->PopMany(instruction.argc);
+ std::string function = stack->Pop();
std::vector<const Type*> result_types =
LowerType(instruction.type->return_type());
if (result_types.size() != 1) {
@@ -575,8 +581,15 @@ void CSAGenerator::EmitInstruction(
"CallableFor(ca_."
"isolate(),"
"ExampleBuiltinForTorqueFunctionPointerType("
- << instruction.type->function_pointer_type_id() << ")).descriptor(), ";
- PrintCommaSeparatedList(out(), function_and_arguments);
+ << instruction.type->function_pointer_type_id() << ")).descriptor(), "
+ << function;
+ if (!instruction.type->HasContextParameter()) {
+ // Add dummy context parameter to satisfy the CallBuiltinPointer signature.
+ out() << ", TNode<Object>()";
+ }
+ for (const std::string& argument : arguments) {
+ out() << ", " << argument;
+ }
out() << ")";
if (generated_type != "Object") out() << ")";
out() << ";\n";
@@ -858,13 +871,20 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
}
namespace {
-std::string GetBitFieldSpecialization(const BitFieldStructType* container,
+std::string GetBitFieldSpecialization(const Type* container,
const BitField& field) {
+ auto smi_tagged_type =
+ Type::MatchUnaryGeneric(container, TypeOracle::GetSmiTaggedGeneric());
+ std::string container_type = smi_tagged_type
+ ? "uintptr_t"
+ : container->GetConstexprGeneratedTypeName();
+ int offset = smi_tagged_type
+ ? field.offset + TargetArchitecture::SmiTagAndShiftSize()
+ : field.offset;
std::stringstream stream;
stream << "base::BitField<"
<< field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
- << field.offset << ", " << field.num_bits << ", "
- << container->GetConstexprGeneratedTypeName() << ">";
+ << offset << ", " << field.num_bits << ", " << container_type << ">";
return stream.str();
}
} // namespace
@@ -877,23 +897,36 @@ void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
std::string bit_field_struct = stack->Pop();
stack->Push(result_name);
- const BitFieldStructType* source_type = instruction.bit_field_struct_type;
- const Type* result_type = instruction.bit_field.name_and_type.type;
- bool source_uintptr = source_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
- bool result_uintptr = result_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
- std::string source_word_type = source_uintptr ? "WordT" : "Word32T";
+ const Type* struct_type = instruction.bit_field_struct_type;
+ const Type* field_type = instruction.bit_field.name_and_type.type;
+ auto smi_tagged_type =
+ Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
+ bool struct_is_pointer_size =
+ IsPointerSizeIntegralType(struct_type) || smi_tagged_type;
+ DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type));
+ bool field_is_pointer_size = IsPointerSizeIntegralType(field_type);
+ DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type));
+ std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T";
std::string decoder =
- source_uintptr
- ? (result_uintptr ? "DecodeWord" : "DecodeWord32FromWord")
- : (result_uintptr ? "DecodeWordFromWord32" : "DecodeWord32");
+ struct_is_pointer_size
+ ? (field_is_pointer_size ? "DecodeWord" : "DecodeWord32FromWord")
+ : (field_is_pointer_size ? "DecodeWordFromWord32" : "DecodeWord32");
- decls() << " " << result_type->GetGeneratedTypeName() << " " << result_name
+ decls() << " " << field_type->GetGeneratedTypeName() << " " << result_name
<< ";\n";
+
+ if (smi_tagged_type) {
+ // If the container is a SMI, then UncheckedCast is insufficient and we must
+ // use a bit cast.
+ bit_field_struct =
+ "ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")";
+ }
+
out() << " " << result_name << " = ca_.UncheckedCast<"
- << result_type->GetGeneratedTNodeTypeName()
+ << field_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_)." << decoder << "<"
- << GetBitFieldSpecialization(source_type, instruction.bit_field)
- << ">(ca_.UncheckedCast<" << source_word_type << ">("
+ << GetBitFieldSpecialization(struct_type, instruction.bit_field)
+ << ">(ca_.UncheckedCast<" << struct_word_type << ">("
<< bit_field_struct << ")));\n";
}
@@ -906,25 +939,47 @@ void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
std::string bit_field_struct = stack->Pop();
stack->Push(result_name);
- const BitFieldStructType* struct_type = instruction.bit_field_struct_type;
+ const Type* struct_type = instruction.bit_field_struct_type;
const Type* field_type = instruction.bit_field.name_and_type.type;
- bool struct_uintptr = struct_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
- bool field_uintptr = field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
- std::string struct_word_type = struct_uintptr ? "WordT" : "Word32T";
- std::string field_word_type = field_uintptr ? "UintPtrT" : "Uint32T";
+ auto smi_tagged_type =
+ Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
+ bool struct_is_pointer_size =
+ IsPointerSizeIntegralType(struct_type) || smi_tagged_type;
+ DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type));
+ bool field_is_pointer_size = IsPointerSizeIntegralType(field_type);
+ DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type));
+ std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T";
+ std::string field_word_type = field_is_pointer_size ? "UintPtrT" : "Uint32T";
std::string encoder =
- struct_uintptr ? (field_uintptr ? "UpdateWord" : "UpdateWord32InWord")
- : (field_uintptr ? "UpdateWordInWord32" : "UpdateWord32");
+ struct_is_pointer_size
+ ? (field_is_pointer_size ? "UpdateWord" : "UpdateWord32InWord")
+ : (field_is_pointer_size ? "UpdateWordInWord32" : "UpdateWord32");
decls() << " " << struct_type->GetGeneratedTypeName() << " " << result_name
<< ";\n";
+
+ if (smi_tagged_type) {
+ // If the container is a SMI, then UncheckedCast is insufficient and we must
+ // use a bit cast.
+ bit_field_struct =
+ "ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")";
+ }
+
+ std::string result_expression =
+ "CodeStubAssembler(state_)." + encoder + "<" +
+ GetBitFieldSpecialization(struct_type, instruction.bit_field) +
+ ">(ca_.UncheckedCast<" + struct_word_type + ">(" + bit_field_struct +
+ "), ca_.UncheckedCast<" + field_word_type + ">(" + value + ")" +
+ (instruction.starts_as_zero ? ", true" : "") + ")";
+
+ if (smi_tagged_type) {
+ result_expression =
+ "ca_.BitcastWordToTaggedSigned(" + result_expression + ")";
+ }
+
out() << " " << result_name << " = ca_.UncheckedCast<"
- << struct_type->GetGeneratedTNodeTypeName()
- << ">(CodeStubAssembler(state_)." << encoder << "<"
- << GetBitFieldSpecialization(struct_type, instruction.bit_field)
- << ">(ca_.UncheckedCast<" << struct_word_type << ">("
- << bit_field_struct << "), ca_.UncheckedCast<" << field_word_type
- << ">(" << value << ")));\n";
+ << struct_type->GetGeneratedTNodeTypeName() << ">(" << result_expression
+ << ");\n";
}
// static
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 5e3c8bbcb2..99b7bdddcd 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -103,6 +103,10 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
*signature.return_type, ".");
}
+ if (signature.return_type == TypeOracle::GetVoidType()) {
+ Error("Builtins cannot have return type void.");
+ }
+
return Declarations::CreateBuiltin(std::move(external_name),
std::move(readable_name), kind,
std::move(signature), body);
@@ -122,11 +126,12 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
"Missing parameters for runtime function, at least the context "
"parameter is required.");
}
- if (!(signature.parameter_types.types[0] == TypeOracle::GetContextType())) {
+ if (!(signature.parameter_types.types[0] == TypeOracle::GetContextType() ||
+ signature.parameter_types.types[0] == TypeOracle::GetNoContextType())) {
ReportError(
"first parameter to runtime functions has to be the context and have "
- "type Context, but found type ",
- signature.parameter_types.types[0]);
+ "type Context or NoContext, but found type ",
+ *signature.parameter_types.types[0]);
}
if (!(signature.return_type->IsSubtypeOf(TypeOracle::GetObjectType()) ||
signature.return_type == TypeOracle::GetVoidType() ||
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 81b75bdd7a..77bcd2ab9c 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -52,8 +52,7 @@ enum class ParseResultHolderBase::TypeId {
kDeclarationPtr,
kTypeExpressionPtr,
kOptionalTypeExpressionPtr,
- kLabelBlockPtr,
- kOptionalLabelBlockPtr,
+ kTryHandlerPtr,
kNameAndTypeExpression,
kImplicitParameters,
kOptionalImplicitParameters,
@@ -82,7 +81,7 @@ enum class ParseResultHolderBase::TypeId {
kOptionalTypeList,
kLabelAndTypes,
kStdVectorOfLabelAndTypes,
- kStdVectorOfLabelBlockPtr,
+ kStdVectorOfTryHandlerPtr,
kOptionalStatementPtr,
kOptionalExpressionPtr,
kTypeswitchCase,
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
index e236de5a93..35ddb1d2e2 100644
--- a/deps/v8/src/torque/global-context.cc
+++ b/deps/v8/src/torque/global-context.cc
@@ -24,7 +24,12 @@ GlobalContext::GlobalContext(Ast ast)
TargetArchitecture::TargetArchitecture(bool force_32bit)
: tagged_size_(force_32bit ? sizeof(int32_t) : kTaggedSize),
- raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize) {}
+ raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize),
+ smi_tag_and_shift_size_(
+ kSmiTagSize + (force_32bit ? SmiTagging<kApiInt32Size>::kSmiShiftSize
+ : kSmiShiftSize)),
+ external_ptr_size_(force_32bit ? sizeof(int32_t) : kExternalPointerSize) {
+}
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index bbfbb686ef..6182762a6a 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -67,6 +67,14 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return Get().generated_per_file_[file];
}
+ static void SetInstanceTypesInitialized() {
+ DCHECK(!Get().instance_types_initialized_);
+ Get().instance_types_initialized_ = true;
+ }
+ static bool IsInstanceTypesInitialized() {
+ return Get().instance_types_initialized_;
+ }
+
private:
bool collect_language_server_data_;
bool force_assert_statements_;
@@ -76,6 +84,7 @@ class GlobalContext : public ContextualClass<GlobalContext> {
std::set<std::string> cpp_includes_;
std::map<SourceId, PerFileStreams> generated_per_file_;
std::map<std::string, size_t> fresh_ids_;
+ bool instance_types_initialized_ = false;
friend class LanguageServerData;
};
@@ -91,12 +100,16 @@ class TargetArchitecture : public ContextualClass<TargetArchitecture> {
static size_t TaggedSize() { return Get().tagged_size_; }
static size_t RawPtrSize() { return Get().raw_ptr_size_; }
+ static size_t ExternalPointerSize() { return Get().external_ptr_size_; }
static size_t MaxHeapAlignment() { return TaggedSize(); }
static bool ArePointersCompressed() { return TaggedSize() < RawPtrSize(); }
+ static int SmiTagAndShiftSize() { return Get().smi_tag_and_shift_size_; }
private:
const size_t tagged_size_;
const size_t raw_ptr_size_;
+ const int smi_tag_and_shift_size_;
+ const size_t external_ptr_size_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 820576e02a..bee31b4d32 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -530,17 +530,8 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
} else {
DCHECK(builtin->IsStub());
- // Context
- const bool context_is_implicit = signature.implicit_count > 0;
- std::string parameter0 =
- AddParameter(0, builtin, &parameters, &parameter_types,
- &parameter_bindings, context_is_implicit);
- source_out() << " TNode<Context> " << parameter0
- << " = UncheckedCast<Context>(Parameter("
- << "Descriptor::kContext));\n";
- source_out() << " USE(" << parameter0 << ");\n";
-
- for (size_t i = 1; i < signature.parameter_names.size(); ++i) {
+ bool has_context_parameter = signature.HasContextParameter();
+ for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
const Type* type = signature.types()[i];
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
@@ -548,8 +539,14 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
source_out() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
<< "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::ParameterIndex<" << (i - 1)
- << ">()));\n";
+ << ">(Parameter(";
+ if (i == 0 && has_context_parameter) {
+ source_out() << "Descriptor::kContext";
+ } else {
+ source_out() << "Descriptor::ParameterIndex<"
+ << (has_context_parameter ? i - 1 : i) << ">()";
+ }
+ source_out() << "));\n";
source_out() << " USE(" << var << ");\n";
}
}
@@ -1192,30 +1189,6 @@ VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
}
-void ImplementationVisitor::CheckInitializersWellformed(
- const std::string& aggregate_name,
- const std::vector<Field>& aggregate_fields,
- const std::vector<NameAndExpression>& initializers,
- bool ignore_first_field) {
- size_t fields_offset = ignore_first_field ? 1 : 0;
- size_t fields_size = aggregate_fields.size() - fields_offset;
- for (size_t i = 0; i < std::min(fields_size, initializers.size()); i++) {
- const std::string& field_name =
- aggregate_fields[i + fields_offset].name_and_type.name;
- Identifier* found_name = initializers[i].name;
- if (field_name != found_name->value) {
- Error("Expected field name \"", field_name, "\" instead of \"",
- found_name->value, "\"")
- .Position(found_name->pos)
- .Throw();
- }
- }
- if (fields_size != initializers.size()) {
- ReportError("expected ", fields_size, " initializers for ", aggregate_name,
- " found ", initializers.size());
- }
-}
-
InitializerResults ImplementationVisitor::VisitInitializerResults(
const ClassType* class_type,
const std::vector<NameAndExpression>& initializers) {
@@ -1924,21 +1897,57 @@ VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
}
// Compute and check struct type from given struct name and argument types
- const StructType* struct_type = TypeVisitor::ComputeTypeForStructExpression(
+ const Type* type = TypeVisitor::ComputeTypeForStructExpression(
expr->type, term_argument_types);
- CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
- initializers);
+ if (const auto* struct_type = StructType::DynamicCast(type)) {
+ CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
+ initializers);
- // Implicitly convert values and thereby build the struct on the stack
- StackRange struct_range = assembler().TopRange(0);
- auto& fields = struct_type->fields();
- for (size_t i = 0; i < values.size(); i++) {
- values[i] =
- GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
- struct_range.Extend(values[i].stack_range());
+ // Implicitly convert values and thereby build the struct on the stack
+ StackRange struct_range = assembler().TopRange(0);
+ auto& fields = struct_type->fields();
+ for (size_t i = 0; i < values.size(); i++) {
+ values[i] =
+ GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
+ struct_range.Extend(values[i].stack_range());
+ }
+
+ return stack_scope.Yield(VisitResult(struct_type, struct_range));
+ } else {
+ const auto* bitfield_struct_type = BitFieldStructType::cast(type);
+ CheckInitializersWellformed(bitfield_struct_type->name(),
+ bitfield_struct_type->fields(), initializers);
+
+ // Create a zero and cast it to the desired bitfield struct type.
+ VisitResult result{TypeOracle::GetConstInt32Type(), "0"};
+ result = GenerateImplicitConvert(TypeOracle::GetInt32Type(), result);
+ result = GenerateCall("Unsigned", Arguments{{result}, {}}, {});
+ result = GenerateCall("%RawDownCast", Arguments{{result}, {}},
+ {bitfield_struct_type});
+
+ // Set each field in the result. If these fields are constexpr, then all of
+ // this initialization will end up reduced to a single value during TurboFan
+ // optimization.
+ auto& fields = bitfield_struct_type->fields();
+ for (size_t i = 0; i < values.size(); i++) {
+ values[i] =
+ GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
+ result = GenerateSetBitField(bitfield_struct_type, fields[i], result,
+ values[i], /*starts_as_zero=*/true);
+ }
+
+ return stack_scope.Yield(result);
}
+}
- return stack_scope.Yield(VisitResult(struct_type, struct_range));
+VisitResult ImplementationVisitor::GenerateSetBitField(
+ const Type* bitfield_struct_type, const BitField& bitfield,
+ VisitResult bitfield_struct, VisitResult value, bool starts_as_zero) {
+ GenerateCopy(bitfield_struct);
+ GenerateCopy(value);
+ assembler().Emit(
+ StoreBitFieldInstruction{bitfield_struct_type, bitfield, starts_as_zero});
+ return VisitResult(bitfield_struct_type, assembler().TopRange(1));
}
LocationReference ImplementationVisitor::GetLocationReference(
@@ -2002,6 +2011,20 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
const BitField& field = bitfield_struct->LookupField(fieldname);
return LocationReference::BitFieldAccess(reference, field);
}
+ if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
+ reference.ReferencedType(), TypeOracle::GetSmiTaggedGeneric())) {
+ const BitFieldStructType* bitfield_struct =
+ BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
+ if (bitfield_struct == nullptr) {
+ ReportError(
+ "When a value of type SmiTagged<T> is used in a field access "
+ "expression, T is expected to be a bitfield struct type. Instead, T "
+ "is ",
+ **type_wrapped_in_smi);
+ }
+ const BitField& field = bitfield_struct->LookupField(fieldname);
+ return LocationReference::BitFieldAccess(reference, field);
+ }
if (reference.IsHeapReference()) {
VisitResult ref = reference.heap_reference();
bool is_const;
@@ -2190,9 +2213,8 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
// First fetch the bitfield struct, then get the bits out of it.
VisitResult bit_field_struct =
GenerateFetchFromLocation(reference.bit_field_struct_location());
- assembler().Emit(LoadBitFieldInstruction{
- BitFieldStructType::cast(bit_field_struct.type()),
- reference.bit_field()});
+ assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
+ reference.bit_field()});
return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
} else {
if (reference.IsHeapSlice()) {
@@ -2269,17 +2291,14 @@ void ImplementationVisitor::GenerateAssignToLocation(
GenerateFetchFromLocation(reference.bit_field_struct_location());
VisitResult converted_value =
GenerateImplicitConvert(reference.ReferencedType(), assignment_value);
- GenerateCopy(bit_field_struct);
- GenerateCopy(converted_value);
- assembler().Emit(StoreBitFieldInstruction{
- BitFieldStructType::cast(bit_field_struct.type()),
- reference.bit_field()});
- GenerateAssignToLocation(
- reference.bit_field_struct_location(),
- VisitResult(bit_field_struct.type(), assembler().TopRange(1)));
+ VisitResult updated_bit_field_struct =
+ GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
+ bit_field_struct, converted_value);
+ GenerateAssignToLocation(reference.bit_field_struct_location(),
+ updated_bit_field_struct);
} else {
DCHECK(reference.IsTemporary());
- ReportError("cannot assign to temporary ",
+ ReportError("cannot assign to const-bound or temporary ",
reference.temporary_description());
}
}
@@ -2669,7 +2688,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
LocationReference target = GetLocationReference(expr->target);
if (!target.IsVariableAccess()) {
VisitResult result = GenerateFetchFromLocation(target);
- target = LocationReference::Temporary(result, "method target result");
+ target = LocationReference::Temporary(result, "this parameter");
}
const AggregateType* target_type =
AggregateType::DynamicCast(target.ReferencedType());
@@ -2958,13 +2977,15 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
<< builtin->ExternalName();
std::string descriptor_name = builtin->ExternalName() + "Descriptor";
- constexpr size_t kFirstNonContextParameter = 1;
+ bool has_context_parameter = builtin->signature().HasContextParameter();
+ size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
size_t parameter_count =
builtin->parameter_names().size() - kFirstNonContextParameter;
- interface_descriptors << "class " << descriptor_name
- << " : public TorqueInterfaceDescriptor<"
- << parameter_count << "> {\n";
+ interface_descriptors
+ << "class " << descriptor_name
+ << " : public TorqueInterfaceDescriptor<" << parameter_count << ", "
+ << (has_context_parameter ? "true" : "false") << "> {\n";
interface_descriptors << " DECLARE_DESCRIPTOR_WITH_BASE("
<< descriptor_name
<< ", TorqueInterfaceDescriptor)\n";
@@ -3112,10 +3133,10 @@ class FieldOffsetsGenerator {
// In the presence of indexed fields, we already emitted kHeaderSize before
// the indexed field.
- if (!type_->IsShape() && !type_->HasIndexedField()) {
+ if (!type_->IsShape() && !header_size_emitted_) {
WriteMarker("kHeaderSize");
}
- if (type_->HasStaticSize()) {
+ if (!type_->IsAbstract() && type_->HasStaticSize()) {
WriteMarker("kSize");
}
}
@@ -3247,12 +3268,28 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
}
}
- header << "#define TORQUE_BODY_DESCRIPTOR_LIST_GENERATOR(V, _)\\\n";
+ header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
for (const ClassType* type : TypeOracle::GetClasses()) {
- if (type->ShouldGenerateBodyDescriptor()) {
+ if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
std::string type_name =
CapifyStringWithUnderscores(type->name()) + "_TYPE";
- header << "V(_, " << type_name << ", " << type->name() << ")\\\n";
+ header << "V(" << type_name << "," << type->name() << ")\\\n";
+ }
+ }
+ header << "\n";
+
+ header << "#define TORQUE_DATA_ONLY_VISITOR_ID_LIST(V)\\\n";
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ if (type->ShouldGenerateBodyDescriptor() && type->HasNoPointerSlots()) {
+ header << "V(" << type->name() << ")\\\n";
+ }
+ }
+ header << "\n";
+
+ header << "#define TORQUE_POINTER_VISITOR_ID_LIST(V)\\\n";
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ if (type->ShouldGenerateBodyDescriptor() && !type->HasNoPointerSlots()) {
+ header << "V(" << type->name() << ")\\\n";
}
}
header << "\n";
@@ -3356,7 +3393,7 @@ class CppClassGenerator {
void GenerateFieldAccessor(const Field& f);
void GenerateFieldAccessorForUntagged(const Field& f);
void GenerateFieldAccessorForSmi(const Field& f);
- void GenerateFieldAccessorForObject(const Field& f);
+ void GenerateFieldAccessorForTagged(const Field& f);
void GenerateClassCasts();
@@ -3473,26 +3510,18 @@ void CppClassGenerator::GenerateClass() {
hdr_ << " size += " << index_name_and_type.name << " * "
<< field_size << ";\n";
}
+ if (type_->size().Alignment() < TargetArchitecture::TaggedSize()) {
+ hdr_ << " size = OBJECT_POINTER_ALIGN(size);\n";
+ }
}
hdr_ << " return size;\n";
hdr_ << " }\n\n";
- hdr_ << " V8_INLINE static constexpr int32_t SizeFor(D o) {\n";
+ hdr_ << " V8_INLINE int32_t AllocatedSize() {\n";
hdr_ << " return SizeFor(";
first = true;
for (auto field : *index_fields) {
if (!first) hdr_ << ", ";
- // Subclasses of FixedArrayBase need to use the synchronized length
- // accessor to be consistent (theoretically, FixedArrayBase classes
- // can concurrently change size e.g. through left-trimming, although
- // in practice this won't happen for Torque-generated classes) as well as
- // explicitly convert to a Smi, since the C++-side accessors are
- // int-based.
- if (field.aggregate == TypeOracle::GetFixedArrayBaseType() &&
- field.name_and_type.name == "length") {
- hdr_ << "o.synchronized_length()";
- } else {
- hdr_ << "o." << field.name_and_type.name << "()";
- }
+ hdr_ << "this->" << field.name_and_type.name << "()";
first = false;
}
hdr_ << ");\n }\n";
@@ -3532,13 +3561,72 @@ void CppClassGenerator::GenerateClassConstructors() {
hdr_ << "protected:\n";
hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
+ hdr_ << " // Special-purpose constructor for subclasses that have fast "
+ "paths where\n";
+ hdr_ << " // their ptr() is a Smi.\n";
+ hdr_ << " inline explicit " << gen_name_
+ << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi);\n";
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
inl_ << " : P(ptr) {\n";
inl_ << " SLOW_DCHECK(this->Is" << name_ << "());\n";
inl_ << "}\n";
+
+ inl_ << "template<class D, class P>\n";
+ inl_ << "inline " << gen_name_T_ << "::" << gen_name_
+ << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
+ inl_ << " : P(ptr, allow_smi) {\n";
+ inl_ << " SLOW_DCHECK((allow_smi == "
+ "HeapObject::AllowInlineSmiStorage::kAllowBeingASmi && "
+ << "this->IsSmi()) || this->Is" << name_ << "());\n";
+ inl_ << "}\n";
+}
+
+namespace {
+std::string GenerateRuntimeTypeCheck(const Type* type,
+ const std::string& value) {
+ bool maybe_object = !type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
+ std::stringstream type_check;
+ bool at_start = true;
+ // If weak pointers are allowed, then start by checking for a cleared value.
+ if (maybe_object) {
+ type_check << value << ".IsCleared()";
+ at_start = false;
+ }
+ for (const RuntimeType& runtime_type : type->GetRuntimeTypes()) {
+ if (!at_start) type_check << " || ";
+ at_start = false;
+ if (maybe_object) {
+ bool strong = runtime_type.weak_ref_to.empty();
+ if (strong && runtime_type.type == "MaybeObject") {
+ // Rather than a generic Weak<T>, this is a basic type Tagged or
+ // WeakHeapObject. We can't validate anything more about the type of
+ // the object pointed to, so just check that it's weak.
+ type_check << value << ".IsWeak()";
+ } else {
+ type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
+ << value << ".GetHeapObjectOrSmi().Is"
+ << (strong ? runtime_type.type : runtime_type.weak_ref_to)
+ << "())";
+ }
+ } else {
+ type_check << value << ".Is" << runtime_type.type << "()";
+ }
+ }
+ return type_check.str();
+}
+
+void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
+ const ClassType* type, const Field& f) {
+ os << " DCHECK_GE(" << index << ", 0);\n";
+ if (base::Optional<NameAndType> array_length =
+ ExtractSimpleFieldArraySize(*type, *f.index)) {
+ os << " DCHECK_LT(" << index << ", this->" << array_length->name
+ << "());\n";
+ }
}
+} // namespace
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
void CppClassGenerator::GenerateFieldAccessor(const Field& f) {
@@ -3548,14 +3636,19 @@ void CppClassGenerator::GenerateFieldAccessor(const Field& f) {
// TODO(danno): Support generation of struct accessors
if (f.name_and_type.type->IsStructType()) return;
+ // TODO(v8:10391) Generate accessors for external pointers
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
+ return;
+ }
+
if (!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
return GenerateFieldAccessorForUntagged(f);
}
if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
return GenerateFieldAccessorForSmi(f);
}
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetObjectType())) {
- return GenerateFieldAccessorForObject(f);
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ return GenerateFieldAccessorForTagged(f);
}
Error("Generation of field accessor for ", type_->name(),
@@ -3599,6 +3692,7 @@ void CppClassGenerator::GenerateFieldAccessorForUntagged(const Field& f) {
}
inl_ << ") const {\n";
if (f.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, f);
size_t field_size;
std::string size_string;
std::tie(field_size, size_string) = f.GetFieldSizeInformation();
@@ -3617,6 +3711,7 @@ void CppClassGenerator::GenerateFieldAccessorForUntagged(const Field& f) {
}
inl_ << type << " value) {\n";
if (f.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, f);
size_t field_size;
std::string size_string;
std::tie(field_size, size_string) = f.GetFieldSizeInformation();
@@ -3653,6 +3748,7 @@ void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
}
inl_ << ") const {\n";
if (f.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, f);
inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
inl_ << " return this->template ReadField<Smi>(offset).value();\n";
inl_ << "}\n";
@@ -3669,6 +3765,7 @@ void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
}
inl_ << type << " value) {\n";
if (f.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, f);
inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
inl_ << " WRITE_FIELD(*this, offset, Smi::FromInt(value));\n";
} else {
@@ -3677,18 +3774,16 @@ void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
inl_ << "}\n\n";
}
-void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
+void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
const Type* field_type = f.name_and_type.type;
- DCHECK(field_type->IsSubtypeOf(TypeOracle::GetObjectType()));
+ DCHECK(field_type->IsSubtypeOf(TypeOracle::GetTaggedType()));
const std::string& name = f.name_and_type.name;
- const std::string offset = "k" + CamelifyString(name) + "Offset";
- base::Optional<const ClassType*> class_type = field_type->ClassSupertype();
-
- std::string type =
- class_type ? (*class_type)->GetGeneratedTNodeTypeName() : "Object";
+ std::string offset = "k" + CamelifyString(name) + "Offset";
+ bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
+ std::string type = field_type->GetRuntimeType();
// Generate declarations in header.
- if (!class_type && field_type != TypeOracle::GetObjectType()) {
+ if (!field_type->IsClassType() && field_type != TypeOracle::GetObjectType()) {
hdr_ << " // Torque type: " << field_type->ToString() << "\n";
}
@@ -3699,11 +3794,7 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
hdr_ << " inline void set_" << name << "(" << (f.index ? "int i, " : "")
<< type << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
- std::string type_check;
- for (const RuntimeType& runtime_type : field_type->GetRuntimeTypes()) {
- if (!type_check.empty()) type_check += " || ";
- type_check += "value.Is" + runtime_type.type + "()";
- }
+ std::string type_check = GenerateRuntimeTypeCheck(field_type, "value");
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
@@ -3719,28 +3810,25 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
<< "(const Isolate* isolate" << (f.index ? ", int i" : "")
<< ") const {\n";
- if (class_type) {
- if (f.index) {
- inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
- inl_ << " return " << type
- << "::cast(RELAXED_READ_FIELD(*this, offset));\n";
- } else {
- inl_ << " return TaggedField<" << type << ", " << offset
- << ">::load(isolate, *this);\n";
- }
+ // TODO(tebbi): The distinction between relaxed and non-relaxed accesses here
+ // is pretty arbitrary and just tries to preserve what was there before.
+ // It currently doesn't really make a difference due to concurrent marking
+ // turning all loads and stores to be relaxed. We should probably drop the
+ // distinction at some point, even though in principle non-relaxed operations
+ // would give us TSAN protection.
+ if (f.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, f);
+ inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
+ inl_ << " auto value = TaggedField<" << type
+ << ">::Relaxed_Load(isolate, *this, offset);\n";
} else {
- // TODO(tebbi): load value as HeapObject when possible
- if (f.index) {
- inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
- inl_ << " Object value = Object::cast(RELAXED_READ_FIELD(*this, "
- "offset));\n";
- } else {
- inl_ << " Object value = TaggedField<Object, " << offset
- << ">::load(isolate, *this);\n";
- }
+ inl_ << " auto value = TaggedField<" << type << ", " << offset
+ << ">::load(isolate, *this);\n";
+ }
+ if (!type_check.empty()) {
inl_ << " DCHECK(" << type_check << ");\n";
- inl_ << " return value;\n";
}
+ inl_ << " return value;\n";
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
@@ -3749,30 +3837,37 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
inl_ << "int i, ";
}
inl_ << type << " value, WriteBarrierMode mode) {\n";
- inl_ << " SLOW_DCHECK(" << type_check << ");\n";
+ if (!type_check.empty()) {
+ inl_ << " SLOW_DCHECK(" << type_check << ");\n";
+ }
if (f.index) {
+ GenerateBoundsDCheck(inl_, "i", type_, f);
+ const char* write_macro =
+ strong_pointer ? "WRITE_FIELD" : "RELAXED_WRITE_WEAK_FIELD";
inl_ << " int offset = " << offset << " + i * kTaggedSize;\n";
- inl_ << " WRITE_FIELD(*this, offset, value);\n";
+ offset = "offset";
+ inl_ << " " << write_macro << "(*this, offset, value);\n";
} else {
- inl_ << " WRITE_FIELD(*this, " << offset << ", value);\n";
+ const char* write_macro =
+ strong_pointer ? "RELAXED_WRITE_FIELD" : "RELAXED_WRITE_WEAK_FIELD";
+ inl_ << " " << write_macro << "(*this, " << offset << ", value);\n";
}
- inl_ << " CONDITIONAL_WRITE_BARRIER(*this, " << offset
- << ", value, mode);\n";
+ const char* write_barrier = strong_pointer ? "CONDITIONAL_WRITE_BARRIER"
+ : "CONDITIONAL_WEAK_WRITE_BARRIER";
+ inl_ << " " << write_barrier << "(*this, " << offset << ", value, mode);\n";
inl_ << "}\n\n";
}
void EmitClassDefinitionHeadersIncludes(const std::string& basename,
std::stringstream& header,
std::stringstream& inline_header) {
- header << "#include \"src/objects/fixed-array.h\"\n";
header << "#include \"src/objects/objects.h\"\n";
+ header << "#include \"src/objects/heap-object.h\"\n";
header << "#include \"src/objects/smi.h\"\n";
header << "#include \"torque-generated/field-offsets-tq.h\"\n";
header << "#include <type_traits>\n\n";
inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n";
- inline_header << "#include "
- "\"torque-generated/objects-body-descriptors-tq-inl.h\"\n\n";
inline_header << "#include \"src/objects/js-promise.h\"\n";
inline_header << "#include \"src/objects/js-weak-refs.h\"\n";
inline_header << "#include \"src/objects/module.h\"\n";
@@ -3811,14 +3906,19 @@ void ImplementationVisitor::GenerateClassDefinitions(
std::stringstream inline_external_header;
std::stringstream internal_header;
std::stringstream inline_internal_header;
+ std::stringstream exported_header;
+ std::stringstream inline_exported_header;
std::stringstream implementation;
std::stringstream factory_header;
std::stringstream factory_impl;
std::string basename = "class-definitions-tq";
std::string internal_basename = "internal-" + basename;
+ std::string exported_basename = "exported-" + basename;
std::string file_basename = output_directory + "/" + basename;
std::string internal_file_basename =
output_directory + "/" + internal_basename;
+ std::string exported_file_basename =
+ output_directory + "/" + exported_basename;
std::string factory_basename = "factory-tq";
std::string factory_file_basename = output_directory + "/" + factory_basename;
@@ -3834,8 +3934,24 @@ void ImplementationVisitor::GenerateClassDefinitions(
IncludeGuardScope internal_inline_header_guard(
inline_internal_header, internal_basename + "-inl.h");
- external_header
+ IncludeGuardScope exported_header_guard(exported_header,
+ exported_basename + ".h");
+
+ IncludeGuardScope exported_inline_header_guard(
+ inline_exported_header, exported_basename + "-inl.h");
+
+ internal_header << "#include \"torque-generated/class-definitions-tq.h\"\n";
+ internal_header << "#include \"src/objects/fixed-array.h\"\n";
+ inline_internal_header
<< "#include \"torque-generated/internal-class-definitions-tq.h\"\n";
+ inline_internal_header
+ << "#include \"torque-generated/class-definitions-tq-inl.h\"\n";
+
+ exported_header << "#include \"src/objects/fixed-array.h\"\n";
+ exported_header << "#include \"torque-generated/class-definitions-tq.h\"\n";
+ inline_exported_header
+ << "#include \"torque-generated/exported-class-definitions-tq.h\"\n";
+ inline_exported_header << "#include \"src/objects/fixed-array-inl.h\"\n";
EmitClassDefinitionHeadersIncludes(basename, external_header,
inline_external_header);
@@ -3850,6 +3966,10 @@ void ImplementationVisitor::GenerateClassDefinitions(
IncludeObjectMacrosScope internal_inline_header_macros(
inline_internal_header);
+ IncludeObjectMacrosScope exported_header_macros(exported_header);
+ IncludeObjectMacrosScope exported_inline_header_macros(
+ inline_exported_header);
+
NamespaceScope header_namespaces(external_header, {"v8", "internal"});
NamespaceScope inline_header_namespaces(inline_external_header,
{"v8", "internal"});
@@ -3857,6 +3977,10 @@ void ImplementationVisitor::GenerateClassDefinitions(
{"v8", "internal"});
NamespaceScope internal_inline_header_namespaces(inline_internal_header,
{"v8", "internal"});
+ NamespaceScope exported_header_namespaces(exported_header,
+ {"v8", "internal"});
+ NamespaceScope exported_inline_header_namespaces(inline_exported_header,
+ {"v8", "internal"});
EmitClassDefinitionHeadersForwardDeclarations(external_header);
EmitClassDefinitionHeadersForwardDeclarations(internal_header);
@@ -3869,44 +3993,37 @@ void ImplementationVisitor::GenerateClassDefinitions(
factory_impl
<< "#include "
"\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
+ factory_impl
+ << "#include "
+ "\"torque-generated/exported-class-definitions-tq-inl.h\"\n\n";
NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
factory_impl << "\n";
implementation
<< "#include \"torque-generated/class-definitions-tq.h\"\n\n";
implementation << "#include \"torque-generated/class-verifiers-tq.h\"\n\n";
- implementation << "#include \"src/objects/arguments-inl.h\"\n";
- implementation << "#include \"src/objects/js-collection-inl.h\"\n";
- implementation << "#include \"src/objects/embedder-data-array-inl.h\"\n";
- implementation << "#include \"src/objects/js-generator-inl.h\"\n";
- implementation << "#include \"src/objects/js-regexp-inl.h\"\n";
- implementation << "#include \"src/objects/js-weak-refs-inl.h\"\n";
implementation
- << "#include \"src/objects/js-regexp-string-iterator-inl.h\"\n";
- implementation << "#include \"src/objects/literal-objects-inl.h\"\n";
- implementation << "#include \"src/objects/microtask-inl.h\"\n";
- implementation << "#include \"src/objects/module-inl.h\"\n";
- implementation << "#include \"src/objects/promise-inl.h\"\n";
- implementation
- << "#include \"src/objects/property-descriptor-object-inl.h\"\n";
- implementation << "#include \"src/objects/stack-frame-info-inl.h\"\n";
- implementation << "#include \"src/objects/struct-inl.h\"\n";
- implementation << "#include \"src/objects/template-objects-inl.h\"\n\n";
+ << "#include \"src/objects/class-definitions-tq-deps-inl.h\"\n\n";
implementation
<< "#include "
"\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
+ implementation
+ << "#include "
+ "\"torque-generated/exported-class-definitions-tq-inl.h\"\n\n";
NamespaceScope implementation_namespaces(implementation,
{"v8", "internal"});
std::set<const StructType*, TypeLess> structs_used_in_classes;
for (const ClassType* type : TypeOracle::GetClasses()) {
- std::stringstream& header = (type->IsExtern() || type->ShouldExport())
- ? external_header
- : internal_header;
+ std::stringstream& header =
+ type->IsExtern()
+ ? external_header
+ : type->ShouldExport() ? exported_header : internal_header;
std::stringstream& inline_header =
- (type->IsExtern() || type->ShouldExport()) ? inline_external_header
- : inline_internal_header;
+ type->IsExtern() ? inline_external_header
+ : type->ShouldExport() ? inline_exported_header
+ : inline_internal_header;
if (type->GenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
@@ -4002,6 +4119,8 @@ void ImplementationVisitor::GenerateClassDefinitions(
WriteFile(file_basename + ".cc", implementation.str());
WriteFile(internal_file_basename + ".h", internal_header.str());
WriteFile(internal_file_basename + "-inl.h", inline_internal_header.str());
+ WriteFile(exported_file_basename + ".h", exported_header.str());
+ WriteFile(exported_file_basename + "-inl.h", inline_exported_header.str());
WriteFile(factory_file_basename + ".inc", factory_header.str());
WriteFile(factory_file_basename + ".cc", factory_impl.str());
}
@@ -4053,6 +4172,8 @@ void ImplementationVisitor::GeneratePrintDefinitions(
impl << "#include <iosfwd>\n\n";
impl << "#include "
"\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
+ impl << "#include "
+ "\"torque-generated/exported-class-definitions-tq-inl.h\"\n";
impl << "#include \"src/objects/struct-inl.h\"\n\n";
impl << "#include \"src/objects/template-objects-inl.h\"\n\n";
@@ -4080,140 +4201,174 @@ void ImplementationVisitor::GeneratePrintDefinitions(
WriteFile(output_directory + "/" + file_name, new_contents);
}
+base::Optional<std::string> MatchSimpleBodyDescriptor(const ClassType* type) {
+ std::vector<ObjectSlotKind> slots = type->ComputeHeaderSlotKinds();
+ if (!type->HasStaticSize()) {
+ slots.push_back(*type->ComputeArraySlotKind());
+ }
+
+ // Skip the map slot.
+ size_t i = 1;
+ while (i < slots.size() && slots[i] == ObjectSlotKind::kNoPointer) ++i;
+ if (i == slots.size()) return "DataOnlyBodyDescriptor";
+ bool has_weak_pointers = false;
+ size_t start_index = i;
+ for (; i < slots.size(); ++i) {
+ if (slots[i] == ObjectSlotKind::kStrongPointer) {
+ continue;
+ } else if (slots[i] == ObjectSlotKind::kMaybeObjectPointer) {
+ has_weak_pointers = true;
+ } else if (slots[i] == ObjectSlotKind::kNoPointer) {
+ break;
+ } else {
+ return base::nullopt;
+ }
+ }
+ size_t end_index = i;
+ for (; i < slots.size(); ++i) {
+ if (slots[i] != ObjectSlotKind::kNoPointer) return base::nullopt;
+ }
+ size_t start_offset = start_index * TargetArchitecture::TaggedSize();
+ size_t end_offset = end_index * TargetArchitecture::TaggedSize();
+ // We pick a suffix-range body descriptor even in cases where the object size
+ // is fixed, to reduce the amount of code executed for object visitation.
+ if (end_index == slots.size()) {
+ return ToString("SuffixRange", has_weak_pointers ? "Weak" : "",
+ "BodyDescriptor<", start_offset, ">");
+ }
+ if (!has_weak_pointers) {
+ return ToString("FixedRangeDescriptor<", start_offset, ", ", end_offset,
+ ", ", *type->size().SingleValue(), ">");
+ }
+ return base::nullopt;
+}
+
void ImplementationVisitor::GenerateBodyDescriptors(
const std::string& output_directory) {
- std::string file_name = "objects-body-descriptors-tq-inl";
+ std::string file_name = "objects-body-descriptors-tq-inl.inc";
std::stringstream h_contents;
- {
- IncludeGuardScope include_guard(h_contents, file_name + ".h");
-
- h_contents << "\n#include \"src/objects/objects-body-descriptors.h\"\n";
- h_contents << "\n#include \"torque-generated/class-definitions-tq.h\"\n";
- h_contents
- << "\n#include \"torque-generated/internal-class-definitions-tq.h\"\n";
- h_contents << "\n#include "
- "\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
-
- NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
for (const ClassType* type : TypeOracle::GetClasses()) {
std::string name = type->name();
if (!type->ShouldGenerateBodyDescriptor()) continue;
- const ClassType* super_class = type->GetSuperClass();
- std::string super_name = super_class->name();
- h_contents << "class " << name
- << "::BodyDescriptor final : public BodyDescriptorBase {\n";
-
- h_contents << " public:\n";
-
- h_contents << " static bool IsValidSlot(Map map, HeapObject obj, int "
- "offset) {\n";
-
- if (super_class == TypeOracle::GetHeapObjectType() ||
- super_class == TypeOracle::GetFixedArrayBaseType()) {
- h_contents << " if (offset < " << super_name
- << "::kHeaderSize) return true;\n";
+ bool has_array_fields = !type->HasStaticSize();
+ std::vector<ObjectSlotKind> header_slot_kinds =
+ type->ComputeHeaderSlotKinds();
+ base::Optional<ObjectSlotKind> array_slot_kind =
+ type->ComputeArraySlotKind();
+ DCHECK_EQ(has_array_fields, array_slot_kind.has_value());
+
+ h_contents << "class " << name << "::BodyDescriptor final : public ";
+ if (auto descriptor_name = MatchSimpleBodyDescriptor(type)) {
+ h_contents << *descriptor_name << " {\n";
+ h_contents << " public:\n";
} else {
- h_contents << " if (" << super_name
- << "::BodyDescriptor::IsValidSlot(map, obj, offset)) return "
- "true;\n";
- }
+ h_contents << "BodyDescriptorBase {\n";
+ h_contents << " public:\n";
- h_contents << " return offset >= " << name
- << "::kStartOfStrongFieldsOffset"
- << " && offset < " << name << ""
- << "::kEndOfStrongFieldsOffset;\n";
- h_contents << " }\n\n";
-
- h_contents << " template <typename ObjectVisitor>\n";
- h_contents << " static inline void IterateBody(Map map, HeapObject obj, "
- "int object_size, ObjectVisitor* v) {\n";
-
- // There may be MaybeObjects embedded in the strong pointer section, which
- // are not suppored.
- for (auto& f : type->fields()) {
- for (const Type* t : LowerType(f.name_and_type.type)) {
- if (t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
- !t->IsSubtypeOf(TypeOracle::GetObjectType())) {
- Error("Cannot generate body descriptor for field ",
- f.name_and_type.name, " of class ", name, " because ", *t,
- " can contain tagged weak pointers.");
- }
+ h_contents << " static bool IsValidSlot(Map map, HeapObject obj, int "
+ "offset) {\n";
+ if (has_array_fields) {
+ h_contents << " if (offset < kHeaderSize) {\n";
}
- }
+ h_contents << " bool valid_slots[] = {";
+ for (ObjectSlotKind slot : header_slot_kinds) {
+ h_contents << (slot != ObjectSlotKind::kNoPointer ? "1" : "0") << ",";
+ }
+ h_contents << "};\n"
+ << " return valid_slots[static_cast<unsigned "
+ "int>(offset)/kTaggedSize];\n";
+ if (has_array_fields) {
+ h_contents << " }\n";
+ bool array_is_tagged = *array_slot_kind != ObjectSlotKind::kNoPointer;
+ h_contents << " return " << (array_is_tagged ? "true" : "false")
+ << ";\n";
+ }
+ h_contents << " }\n\n";
- if (super_class != TypeOracle::GetHeapObjectType() &&
- super_class != TypeOracle::GetFixedArrayBaseType()) {
+ h_contents << " template <typename ObjectVisitor>\n";
h_contents
- << " " << super_name
- << "::BodyDescriptor::IterateBody(map, obj, object_size, v);\n";
- }
-
- h_contents << " if (" << name
- << "::kStartOfStrongFieldsOffset != " << name
- << "::kEndOfStrongFieldsOffset) {\n";
- h_contents << " IteratePointers(obj, " << name
- << "::kStartOfStrongFieldsOffset, " << name
- << "::kEndOfStrongFieldsOffset, v);\n";
- h_contents << " }\n";
-
- h_contents << " if (" << name
- << "::kStartOfWeakFieldsOffset != " << name
- << "::kEndOfWeakFieldsOffset) {\n";
- h_contents << " IterateCustomWeakPointers(obj, " << name
- << "::kStartOfWeakFieldsOffset, " << name
- << "::kEndOfWeakFieldsOffset, v);\n";
- h_contents << " }\n";
-
- // Since all of the index fields are at the end of the object and must
- // only be Tagged values, emit only a single IteratePointers from the
- // beginning of the first indexed field to the end of the object.
- bool first_index_seen = false;
- for (const Field& field : type->ComputeAllFields()) {
- if (field.index && !first_index_seen) {
- std::string indexed_field_name =
- CamelifyString(field.name_and_type.name);
- if (field.name_and_type.type->IsSubtypeOf(
- TypeOracle::GetObjectType())) {
- h_contents << " BodyDescriptorBase::IteratePointers(obj, "
- << name << "::k" << indexed_field_name << "Offset, "
- << name << "::SizeFor(" << name << "::cast(obj)), v);\n";
+ << " static inline void IterateBody(Map map, HeapObject obj, "
+ "int object_size, ObjectVisitor* v) {\n";
+
+ std::vector<ObjectSlotKind> slots = std::move(header_slot_kinds);
+ if (has_array_fields) slots.push_back(*array_slot_kind);
+
+ // Skip the map slot.
+ slots.erase(slots.begin());
+ size_t start_offset = TargetArchitecture::TaggedSize();
+
+ size_t end_offset = start_offset;
+ ObjectSlotKind section_kind;
+ for (size_t i = 0; i <= slots.size(); ++i) {
+ base::Optional<ObjectSlotKind> next_section_kind;
+ bool finished_section = false;
+ if (i == 0) {
+ next_section_kind = slots[i];
+ } else if (i < slots.size()) {
+ if (auto combined = Combine(section_kind, slots[i])) {
+ next_section_kind = *combined;
+ } else {
+ next_section_kind = slots[i];
+ finished_section = true;
+ }
} else {
- Error(
- "generating body descriptors for indexed fields not subtype of "
- "Object isn't (yet) supported");
+ finished_section = true;
}
- first_index_seen = true;
- }
- if (first_index_seen) {
- for (const Type* t : LowerType(field.name_and_type.type)) {
- if (!t->IsSubtypeOf(TypeOracle::GetObjectType())) {
- Error("cannot generate class body descriptor for \"",
- type->name(),
- "\", all fields of including and after the first indexed "
- "member must no comprised only of subtypes of Object "
- "(field \"",
- field.name_and_type.name, "\" is not)");
+ if (finished_section) {
+ bool is_array_slot = i == slots.size() && has_array_fields;
+ bool multiple_slots =
+ is_array_slot ||
+ (end_offset - start_offset > TargetArchitecture::TaggedSize());
+ base::Optional<std::string> iterate_command;
+ switch (section_kind) {
+ case ObjectSlotKind::kStrongPointer:
+ iterate_command = "IteratePointer";
+ break;
+ case ObjectSlotKind::kMaybeObjectPointer:
+ iterate_command = "IterateMaybeWeakPointer";
+ break;
+ case ObjectSlotKind::kCustomWeakPointer:
+ iterate_command = "IterateCustomWeakPointer";
+ break;
+ case ObjectSlotKind::kNoPointer:
+ break;
+ }
+ if (iterate_command) {
+ if (multiple_slots) *iterate_command += "s";
+ h_contents << " " << *iterate_command << "(obj, "
+ << start_offset;
+ if (multiple_slots) {
+ h_contents << ", "
+ << (i == slots.size() ? "object_size"
+ : std::to_string(end_offset));
+ }
+ h_contents << ", v);\n";
}
+ start_offset = end_offset;
}
+ if (i < slots.size()) section_kind = *next_section_kind;
+ end_offset += TargetArchitecture::TaggedSize();
}
- }
- h_contents << " }\n\n";
+ h_contents << " }\n\n";
+ }
h_contents
<< " static inline int SizeOf(Map map, HeapObject raw_object) {\n";
- h_contents << " " << name << " object = " << name
- << "::cast(raw_object);\n";
- h_contents << " return " << name << "::SizeFor(object);\n";
+ if (type->size().SingleValue()) {
+ h_contents << " return " << *type->size().SingleValue() << ";\n";
+ } else {
+ h_contents << " return " << name
+ << "::cast(raw_object).AllocatedSize();\n";
+ }
h_contents << " }\n\n";
h_contents << "};\n";
}
- }
- WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
+ WriteFile(output_directory + "/" + file_name, h_contents.str());
}
namespace {
@@ -4242,9 +4397,8 @@ void GenerateFieldValueVerifier(const std::string& class_name,
// Read the field.
cc_contents << " " << object_type << " " << value << " = TaggedField<"
- << object_type << ", " << class_name << "::k"
- << CamelifyString(class_field.name_and_type.name)
- << "Offset>::load(o, " << index_offset << ");\n";
+ << object_type << ", " << *class_field.offset << ">::load(o, "
+ << index_offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
@@ -4254,34 +4408,8 @@ void GenerateFieldValueVerifier(const std::string& class_name,
// the Object type because it would not check anything beyond what we already
// checked with VerifyPointer.
if (field_type != TypeOracle::GetObjectType()) {
- std::stringstream type_check;
- bool at_start = true;
- // If weak pointers are allowed, then start by checking for a cleared value.
- if (maybe_object) {
- type_check << value << ".IsCleared()";
- at_start = false;
- }
- for (const RuntimeType& runtime_type : field_type->GetRuntimeTypes()) {
- if (!at_start) type_check << " || ";
- at_start = false;
- if (maybe_object) {
- bool strong = runtime_type.weak_ref_to.empty();
- if (strong && runtime_type.type == "MaybeObject") {
- // Rather than a generic Weak<T>, this is a basic type Tagged or
- // WeakHeapObject. We can't validate anything more about the type of
- // the object pointed to, so just check that it's weak.
- type_check << value << ".IsWeak()";
- } else {
- type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
- << value << ".GetHeapObjectOrSmi().Is"
- << (strong ? runtime_type.type : runtime_type.weak_ref_to)
- << "())";
- }
- } else {
- type_check << value << ".Is" << runtime_type.type << "()";
- }
- }
- cc_contents << " CHECK(" << type_check.str() << ");\n";
+ cc_contents << " CHECK(" << GenerateRuntimeTypeCheck(field_type, value)
+ << ");\n";
}
}
@@ -4369,6 +4497,8 @@ void ImplementationVisitor::GenerateClassVerifiers(
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
cc_contents << "#include "
"\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
+ cc_contents << "#include "
+ "\"torque-generated/exported-class-definitions-tq-inl.h\"\n";
IncludeObjectMacrosScope object_macros(cc_contents);
@@ -4384,7 +4514,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
const char* verifier_class = "TorqueGeneratedClassVerifiers";
- h_contents << "class " << verifier_class << "{\n";
+ h_contents << "class V8_EXPORT_PRIVATE " << verifier_class << "{\n";
h_contents << " public:\n";
for (const ClassType* type : TypeOracle::GetClasses()) {
@@ -4481,6 +4611,9 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
h_contents << "#include \"torque-generated/csa-types-tq.h\"\n";
h_contents
<< "#include \"torque-generated/internal-class-definitions-tq.h\"\n";
+ h_contents
+ << "#include \"torque-generated/exported-class-definitions-tq.h\"\n";
+ cc_contents << "#include \"src/objects/fixed-array-inl.h\"\n";
cc_contents << "#include \"src/objects/free-space.h\"\n";
cc_contents << "#include \"src/objects/js-regexp-string-iterator.h\"\n";
cc_contents << "#include \"src/objects/ordered-hash-table.h\"\n";
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 0cd58aa922..c980f3d59b 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -432,11 +432,29 @@ class ImplementationVisitor {
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
+ template <typename T>
void CheckInitializersWellformed(
- const std::string& aggregate_name,
- const std::vector<Field>& aggregate_fields,
+ const std::string& aggregate_name, const std::vector<T>& aggregate_fields,
const std::vector<NameAndExpression>& initializers,
- bool ignore_first_field = false);
+ bool ignore_first_field = false) {
+ size_t fields_offset = ignore_first_field ? 1 : 0;
+ size_t fields_size = aggregate_fields.size() - fields_offset;
+ for (size_t i = 0; i < std::min(fields_size, initializers.size()); i++) {
+ const std::string& field_name =
+ aggregate_fields[i + fields_offset].name_and_type.name;
+ Identifier* found_name = initializers[i].name;
+ if (field_name != found_name->value) {
+ Error("Expected field name \"", field_name, "\" instead of \"",
+ found_name->value, "\"")
+ .Position(found_name->pos)
+ .Throw();
+ }
+ }
+ if (fields_size != initializers.size()) {
+ ReportError("expected ", fields_size, " initializers for ",
+ aggregate_name, " found ", initializers.size());
+ }
+ }
InitializerResults VisitInitializerResults(
const ClassType* class_type,
@@ -713,6 +731,12 @@ class ImplementationVisitor {
StackRange GenerateLabelGoto(LocalLabel* label,
base::Optional<StackRange> arguments = {});
+ VisitResult GenerateSetBitField(const Type* bitfield_struct_type,
+ const BitField& bitfield,
+ VisitResult bitfield_struct,
+ VisitResult value,
+ bool starts_as_zero = false);
+
std::vector<Binding<LocalLabel>*> LabelsFromIdentifiers(
const std::vector<Identifier*>& names);
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
index a06c984629..6e708f7fea 100644
--- a/deps/v8/src/torque/instance-type-generator.cc
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -256,6 +256,9 @@ int SolveInstanceTypeConstraints(
root->start = root->value;
}
root->num_values = root->end - root->start + 1;
+ root->type->InitializeInstanceTypes(
+ root->value == -1 ? base::Optional<int>{} : root->value,
+ std::make_pair(root->start, root->end));
if (root->num_values > 0) {
destination->push_back(std::move(root));
@@ -433,46 +436,60 @@ void ImplementationVisitor::GenerateInstanceTypes(
header << only_declared_range_instance_types.str();
header << "\n";
- header << "// Instance types for non-extern Torque classes.\n";
- header << "#define TORQUE_INSTANCE_TYPES(V) \\\n";
+ std::stringstream torque_internal_class_list;
+ std::stringstream torque_internal_varsize_instance_type_list;
+ std::stringstream torque_internal_fixed_instance_type_list;
+ std::stringstream torque_internal_map_csa_list;
+ std::stringstream torque_internal_map_root_list;
+
for (const ClassType* type : TypeOracle::GetClasses()) {
- if (type->IsExtern()) continue;
- std::string type_name =
+ std::string upper_case_name = type->name();
+ std::string lower_case_name = SnakeifyString(type->name());
+ std::string instance_type_name =
CapifyStringWithUnderscores(type->name()) + "_TYPE";
- header << " V(" << type_name << ") \\\n";
- }
- header << "\n";
- header << "// Map list macros for non-extern Torque classes.\n";
- header << "#define TORQUE_INTERNAL_VARSIZE_CLASS_LIST_GENERATOR(V, _) \\\n";
- for (const ClassType* type : TypeOracle::GetClasses()) {
if (type->IsExtern()) continue;
- if (!type->HasIndexedField()) continue;
- std::string type_name =
- CapifyStringWithUnderscores(type->name()) + "_TYPE";
- std::string variable_name = SnakeifyString(type->name());
- header << " V(_, " << type_name << ", " << type->name() << ", "
- << variable_name << ") \\\n";
+ torque_internal_class_list << " V(" << upper_case_name << ") \\\n";
+
+ if (type->IsAbstract()) continue;
+ torque_internal_map_csa_list << " V(" << upper_case_name << "Map, "
+ << lower_case_name << "_map, "
+ << upper_case_name << "Map) \\\n";
+ torque_internal_map_root_list << " V(Map, " << lower_case_name
+ << "_map, " << upper_case_name
+ << "Map) \\\n";
+ std::stringstream& list =
+ type->HasStaticSize() ? torque_internal_fixed_instance_type_list
+ : torque_internal_varsize_instance_type_list;
+ list << " V(" << instance_type_name << ", " << upper_case_name << ", "
+ << lower_case_name << ") \\\n";
}
+
+ header << "// Non-extern Torque classes.\n";
+ header << "#define TORQUE_INTERNAL_CLASS_LIST(V) \\\n";
+ header << torque_internal_class_list.str();
header << "\n";
- header << "#define TORQUE_INTERNAL_FIXED_CLASS_LIST_GENERATOR(V, _) \\\n";
- for (const ClassType* type : TypeOracle::GetClasses()) {
- if (type->IsExtern()) continue;
- if (type->HasIndexedField()) continue;
- std::string type_name =
- CapifyStringWithUnderscores(type->name()) + "_TYPE";
- std::string variable_name = SnakeifyString(type->name());
- header << " V(_, " << type_name << ", " << type->name() << ", "
- << variable_name << ") \\\n";
- }
+ header << "#define TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(V) \\\n";
+ header << torque_internal_varsize_instance_type_list.str();
+ header << "\n";
+ header << "#define TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(V) \\\n";
+ header << torque_internal_fixed_instance_type_list.str();
header << "\n";
- header << "#define TORQUE_INTERNAL_CLASS_LIST_GENERATOR(V, _) \\\n";
- header << " TORQUE_INTERNAL_VARSIZE_CLASS_LIST_GENERATOR(V, _) \\\n";
- header << " TORQUE_INTERNAL_FIXED_CLASS_LIST_GENERATOR(V, _)\n";
+ header << "#define TORQUE_INTERNAL_INSTANCE_TYPE_LIST(V) \\\n";
+ header << " TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(V) \\\n";
+ header << " TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(V) \\\n";
+ header << "\n";
+ header << "#define TORQUE_INTERNAL_MAP_CSA_LIST(V) \\\n";
+ header << torque_internal_map_csa_list.str();
+ header << "\n";
+ header << "#define TORQUE_INTERNAL_MAP_ROOT_LIST(V) \\\n";
+ header << torque_internal_map_root_list.str();
header << "\n";
}
std::string output_header_path = output_directory + "/" + file_name;
WriteFile(output_header_path, header.str());
+
+ GlobalContext::SetInstanceTypesInitialized();
}
} // namespace torque
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 2d5eff71da..4609e8c223 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -349,14 +349,13 @@ struct StoreReferenceInstruction : InstructionBase {
// Pops a bitfield struct; pushes a bitfield value extracted from it.
struct LoadBitFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- LoadBitFieldInstruction(const BitFieldStructType* bit_field_struct_type,
- BitField bit_field)
+ LoadBitFieldInstruction(const Type* bit_field_struct_type, BitField bit_field)
: bit_field_struct_type(bit_field_struct_type),
bit_field(std::move(bit_field)) {}
DefinitionLocation GetValueDefinition() const;
- const BitFieldStructType* bit_field_struct_type;
+ const Type* bit_field_struct_type;
BitField bit_field;
};
@@ -364,15 +363,18 @@ struct LoadBitFieldInstruction : InstructionBase {
// containing the updated value.
struct StoreBitFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE()
- StoreBitFieldInstruction(const BitFieldStructType* bit_field_struct_type,
- BitField bit_field)
+ StoreBitFieldInstruction(const Type* bit_field_struct_type,
+ BitField bit_field, bool starts_as_zero)
: bit_field_struct_type(bit_field_struct_type),
- bit_field(std::move(bit_field)) {}
+ bit_field(std::move(bit_field)),
+ starts_as_zero(starts_as_zero) {}
DefinitionLocation GetValueDefinition() const;
- const BitFieldStructType* bit_field_struct_type;
+ const Type* bit_field_struct_type;
BitField bit_field;
+ // Allows skipping the mask step if we know the starting value is zero.
+ bool starts_as_zero;
};
struct CallIntrinsicInstruction : InstructionBase {
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index ad7d906d54..fd717b2649 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -73,6 +73,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
ImplementationVisitor implementation_visitor;
implementation_visitor.SetDryRun(output_directory.length() == 0);
+ implementation_visitor.GenerateInstanceTypes(output_directory);
implementation_visitor.BeginCSAFiles();
implementation_visitor.VisitAllDeclarables();
@@ -91,7 +92,6 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateBodyDescriptors(output_directory);
implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
implementation_visitor.GenerateCSATypes(output_directory);
- implementation_visitor.GenerateInstanceTypes(output_directory);
implementation_visitor.EndCSAFiles();
implementation_visitor.GenerateImplementation(output_directory);
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index e63827db2c..fa496ae6a0 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -86,12 +86,8 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<base::Optional<TypeExpression*>>::id =
ParseResultTypeId::kOptionalTypeExpressionPtr;
template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<LabelBlock*>::id =
- ParseResultTypeId::kLabelBlockPtr;
-template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId
- ParseResultHolder<base::Optional<LabelBlock*>>::id =
- ParseResultTypeId::kOptionalLabelBlockPtr;
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<TryHandler*>::id =
+ ParseResultTypeId::kTryHandlerPtr;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Expression*>::id =
ParseResultTypeId::kExpressionPtr;
@@ -215,8 +211,8 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultTypeId::kStdVectorOfLabelAndTypes;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
- ParseResultHolder<std::vector<LabelBlock*>>::id =
- ParseResultTypeId::kStdVectorOfLabelBlockPtr;
+ ParseResultHolder<std::vector<TryHandler*>>::id =
+ ParseResultTypeId::kStdVectorOfTryHandlerPtr;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<base::Optional<Statement*>>::id =
@@ -321,7 +317,7 @@ Expression* MakeCall(IdentifierExpression* callee,
// used as labels identifiers. All other statements in a call's otherwise
// must create intermediate Labels for the otherwise's statement code.
size_t label_id = 0;
- std::vector<LabelBlock*> temp_labels;
+ std::vector<TryHandler*> temp_labels;
for (auto* statement : otherwise) {
if (auto* e = ExpressionStatement::DynamicCast(statement)) {
if (auto* id = IdentifierExpression::DynamicCast(e->expression)) {
@@ -336,9 +332,10 @@ Expression* MakeCall(IdentifierExpression* callee,
auto label_id = MakeNode<Identifier>(label_name);
label_id->pos = SourcePosition::Invalid();
labels.push_back(label_id);
- auto* label_block =
- MakeNode<LabelBlock>(label_id, ParameterList::Empty(), statement);
- temp_labels.push_back(label_block);
+ auto* handler =
+ MakeNode<TryHandler>(TryHandler::HandlerKind::kLabel, label_id,
+ ParameterList::Empty(), statement);
+ temp_labels.push_back(handler);
}
// Create nested try-label expression for all of the temporary Labels that
@@ -351,7 +348,7 @@ Expression* MakeCall(IdentifierExpression* callee,
}
for (auto* label : temp_labels) {
- result = MakeNode<TryLabelExpression>(false, result, label);
+ result = MakeNode<TryLabelExpression>(result, label);
}
return result;
}
@@ -906,8 +903,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (!IsValidTypeName(name->value)) {
NamingConventionError("Type", name, "UpperCamelCase");
}
- auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
- if (extends && !BasicTypeExpression::DynamicCast(*extends)) {
+ auto extends = child_results->NextAs<TypeExpression*>();
+ if (!BasicTypeExpression::DynamicCast(extends)) {
ReportError("Expected type name in extends clause.");
}
auto generates = child_results->NextAs<base::Optional<std::string>>();
@@ -1410,8 +1407,9 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
BlockStatement* next_block = MakeNode<BlockStatement>();
current_block->statements.push_back(
MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
- false, MakeNode<StatementExpression>(case_block),
- MakeNode<LabelBlock>(MakeNode<Identifier>(kNextCaseLabelName),
+ MakeNode<StatementExpression>(case_block),
+ MakeNode<TryHandler>(TryHandler::HandlerKind::kLabel,
+ MakeNode<Identifier>(kNextCaseLabelName),
ParameterList::Empty(), next_block))));
current_block = next_block;
}
@@ -1512,15 +1510,21 @@ base::Optional<ParseResult> MakeTryLabelExpression(
auto try_block = child_results->NextAs<Statement*>();
CheckNotDeferredStatement(try_block);
Statement* result = try_block;
- auto label_blocks = child_results->NextAs<std::vector<LabelBlock*>>();
- auto catch_block = child_results->NextAs<base::Optional<LabelBlock*>>();
- for (auto block : label_blocks) {
- result = MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
- false, MakeNode<StatementExpression>(result), block));
- }
- if (catch_block) {
+ auto handlers = child_results->NextAs<std::vector<TryHandler*>>();
+ if (handlers.empty()) {
+ Error("Try blocks without catch or label don't make sense.");
+ }
+ for (size_t i = 0; i < handlers.size(); ++i) {
+ if (i != 0 &&
+ handlers[i]->handler_kind == TryHandler::HandlerKind::kCatch) {
+ Error(
+ "A catch handler always has to be first, before any label handler, "
+ "to avoid ambiguity about whether it catches exceptions from "
+ "preceding handlers or not.")
+ .Position(handlers[i]->pos);
+ }
result = MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
- true, MakeNode<StatementExpression>(result), *catch_block));
+ MakeNode<StatementExpression>(result), handlers[i]));
}
return ParseResult{result};
}
@@ -1546,7 +1550,8 @@ base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
}
auto parameters = child_results->NextAs<ParameterList>();
auto body = child_results->NextAs<Statement*>();
- LabelBlock* result = MakeNode<LabelBlock>(label, std::move(parameters), body);
+ TryHandler* result = MakeNode<TryHandler>(TryHandler::HandlerKind::kLabel,
+ label, std::move(parameters), body);
return ParseResult{result};
}
@@ -1561,8 +1566,9 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
parameters.types.push_back(MakeNode<BasicTypeExpression>(
std::vector<std::string>{}, "JSAny", std::vector<TypeExpression*>{}));
parameters.has_varargs = false;
- LabelBlock* result = MakeNode<LabelBlock>(
- MakeNode<Identifier>(kCatchLabelName), std::move(parameters), body);
+ TryHandler* result = MakeNode<TryHandler>(
+ TryHandler::HandlerKind::kCatch, MakeNode<Identifier>(kCatchLabelName),
+ std::move(parameters), body);
return ParseResult{result};
}
@@ -1831,6 +1837,10 @@ struct TorqueGrammar : Grammar {
}
continue;
}
+ if (MatchString("/*", pos)) {
+ while (!MatchString("*/", pos)) ++*pos;
+ continue;
+ }
return true;
}
}
@@ -2247,13 +2257,11 @@ struct TorqueGrammar : Grammar {
List<Statement*>(&statement), Token("}")},
MakeBlockStatement)};
- // Result: LabelBlock*
- Symbol labelBlock = {
+ // Result: TryHandler*
+ Symbol tryHandler = {
Rule({Token("label"), &name,
TryOrDefault<ParameterList>(&parameterListNoVararg), &block},
- MakeLabelBlock)};
-
- Symbol catchBlock = {
+ MakeLabelBlock),
Rule({Token("catch"), Token("("), &identifier, Token(")"), &block},
MakeCatchBlock)};
@@ -2299,13 +2307,16 @@ struct TorqueGrammar : Grammar {
MakeIfStatement),
Rule(
{
- Token("typeswitch"), Token("("), expression, Token(")"),
- Token("{"), NonemptyList<TypeswitchCase>(&typeswitchCase),
+ Token("typeswitch"),
+ Token("("),
+ expression,
+ Token(")"),
+ Token("{"),
+ NonemptyList<TypeswitchCase>(&typeswitchCase),
Token("}"),
},
MakeTypeswitchStatement),
- Rule({Token("try"), &block, List<LabelBlock*>(&labelBlock),
- Optional<LabelBlock*>(&catchBlock)},
+ Rule({Token("try"), &block, List<TryHandler*>(&tryHandler)},
MakeTryLabelExpression),
Rule({OneOf({"assert", "check"}), Token("("), &expressionWithSource,
Token(")"), Token(";")},
@@ -2354,8 +2365,7 @@ struct TorqueGrammar : Grammar {
&externalString, Token(";")},
AsSingletonVector<Declaration*, MakeExternConstDeclaration>()),
Rule({annotations, CheckIf(Token("extern")), CheckIf(Token("transient")),
- OneOf({"class", "shape"}), &name,
- Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
+ OneOf({"class", "shape"}), &name, Token("extends"), &type,
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
&optionalClassBody},
@@ -2426,21 +2436,17 @@ struct TorqueGrammar : Grammar {
Sequence({Token("constexpr"), &externalString})),
Token("{"), NonemptyList<Identifier*>(&name, Token(",")),
CheckIf(Sequence({Token(","), Token("...")})), Token("}")},
- MakeEnumDeclaration)};
+ MakeEnumDeclaration),
+ Rule({Token("namespace"), &identifier, Token("{"), &declarationList,
+ Token("}")},
+ AsSingletonVector<Declaration*, MakeNamespaceDeclaration>())};
// Result: std::vector<Declaration*>
Symbol declarationList = {
Rule({List<std::vector<Declaration*>>(&declaration)}, ConcatList)};
- // Result: std::vector<Declaration*>
- Symbol namespaceDeclaration = {
- Rule({Token("namespace"), &identifier, Token("{"), &declarationList,
- Token("}")},
- AsSingletonVector<Declaration*, MakeNamespaceDeclaration>())};
-
Symbol file = {Rule({&file, Token("import"), &externalString},
ProcessTorqueImportDeclaration),
- Rule({&file, &namespaceDeclaration}, AddGlobalDeclarations),
Rule({&file, &declaration}, AddGlobalDeclarations), Rule({})};
};
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 114bf043b5..c6939efa53 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -182,6 +182,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(RAWPTR_TYPE_STRING);
}
+ static const Type* GetExternalPointerType() {
+ return Get().GetBuiltinType(EXTERNALPTR_TYPE_STRING);
+ }
+
static const Type* GetMapType() {
return Get().GetBuiltinType(MAP_TYPE_STRING);
}
@@ -300,6 +304,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONTEXT_TYPE_STRING);
}
+ static const Type* GetNoContextType() {
+ return Get().GetBuiltinType(NO_CONTEXT_TYPE_STRING);
+ }
+
static const Type* GetNativeContextType() {
return Get().GetBuiltinType(NATIVE_CONTEXT_TYPE_STRING);
}
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 10ed87d247..5b61baf3a8 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -247,7 +247,7 @@ const ClassType* TypeVisitor::ComputeType(
ClassFlags flags = decl->flags;
bool is_shape = flags & ClassFlag::kIsShape;
std::string generates = decl->name->value;
- const Type* super_type = TypeVisitor::ComputeType(*decl->super);
+ const Type* super_type = TypeVisitor::ComputeType(decl->super);
if (is_shape) {
if (!(flags & ClassFlag::kExtern)) {
ReportError("Shapes must be extern, add \"extern\" to the declaration.");
@@ -266,9 +266,6 @@ const ClassType* TypeVisitor::ComputeType(
// support for type-checks on the C++ side.
generates = super_class->name();
}
- if (!decl->super) {
- ReportError("Extern class must extend another type.");
- }
if (super_type != TypeOracle::GetStrongTaggedType()) {
const ClassType* super_class = ClassType::DynamicCast(super_type);
if (!super_class) {
@@ -455,7 +452,7 @@ void TypeVisitor::VisitStructMethods(
DeclareMethods(struct_type, struct_declaration->methods);
}
-const StructType* TypeVisitor::ComputeTypeForStructExpression(
+const Type* TypeVisitor::ComputeTypeForStructExpression(
TypeExpression* type_expression,
const std::vector<const Type*>& term_argument_types) {
auto* basic = BasicTypeExpression::DynamicCast(type_expression);
@@ -475,11 +472,11 @@ const StructType* TypeVisitor::ComputeTypeForStructExpression(
// Compute types of non-generic structs as usual
if (!(maybe_generic_type && decl)) {
const Type* type = ComputeType(type_expression);
- const StructType* struct_type = StructType::DynamicCast(type);
- if (!struct_type) {
- ReportError(*type, " is not a struct, but used like one");
+ if (!type->IsStructType() && !type->IsBitFieldStructType()) {
+ ReportError(*type,
+ " is not a struct or bitfield struct, but used like one");
}
- return struct_type;
+ return type;
}
auto generic_type = *maybe_generic_type;
diff --git a/deps/v8/src/torque/type-visitor.h b/deps/v8/src/torque/type-visitor.h
index 6bb2bba499..205e842cc7 100644
--- a/deps/v8/src/torque/type-visitor.h
+++ b/deps/v8/src/torque/type-visitor.h
@@ -32,7 +32,9 @@ class TypeVisitor {
static void VisitStructMethods(StructType* struct_type,
const StructDeclaration* struct_declaration);
static Signature MakeSignature(const CallableDeclaration* declaration);
- static const StructType* ComputeTypeForStructExpression(
+ // Can return either StructType or BitFieldStructType, since they can both be
+ // used in struct expressions like `MyStruct{ a: 0, b: foo }`
+ static const Type* ComputeTypeForStructExpression(
TypeExpression* type_expression,
const std::vector<const Type*>& term_argument_types);
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index e08716c98a..a01fd59680 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <iostream>
-
#include "src/torque/types.h"
+#include <cmath>
+#include <iostream>
+
#include "src/base/bits.h"
+#include "src/base/optional.h"
#include "src/torque/ast.h"
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
@@ -471,25 +473,16 @@ void StructType::Finalize() const {
CheckForDuplicateFields();
}
-constexpr ClassFlags ClassType::kInternalFlags;
-
ClassType::ClassType(const Type* parent, Namespace* nspace,
const std::string& name, ClassFlags flags,
const std::string& generates, const ClassDeclaration* decl,
const TypeAlias* alias)
: AggregateType(Kind::kClassType, parent, nspace, name),
size_(ResidueClass::Unknown()),
- flags_(flags & ~(kInternalFlags)),
+ flags_(flags),
generates_(generates),
decl_(decl),
- alias_(alias) {
- DCHECK_EQ(flags & kInternalFlags, 0);
-}
-
-bool ClassType::HasIndexedField() const {
- if (!is_finalized_) Finalize();
- return flags_ & ClassFlag::kHasIndexedField;
-}
+ alias_(alias) {}
std::string ClassType::GetGeneratedTNodeTypeNameImpl() const {
return generates_;
@@ -510,11 +503,6 @@ void ClassType::Finalize() const {
if (is_finalized_) return;
CurrentScope::Scope scope_activator(alias_->ParentScope());
CurrentSourcePosition::Scope position_activator(decl_->pos);
- if (parent()) {
- if (const ClassType* super_class = ClassType::DynamicCast(parent())) {
- if (super_class->HasIndexedField()) flags_ |= ClassFlag::kHasIndexedField;
- }
- }
TypeVisitor::VisitClassFieldsAndMethods(const_cast<ClassType*>(this),
this->decl_);
is_finalized_ = true;
@@ -542,6 +530,122 @@ std::vector<Field> ClassType::ComputeAllFields() const {
return all_fields;
}
+std::vector<Field> ClassType::ComputeHeaderFields() const {
+ std::vector<Field> result;
+ for (Field& field : ComputeAllFields()) {
+ if (field.index) break;
+ DCHECK(*field.offset < header_size());
+ result.push_back(std::move(field));
+ }
+ return result;
+}
+
+std::vector<Field> ClassType::ComputeArrayFields() const {
+ std::vector<Field> result;
+ for (Field& field : ComputeAllFields()) {
+ if (!field.index) {
+ DCHECK(*field.offset < header_size());
+ continue;
+ }
+ result.push_back(std::move(field));
+ }
+ return result;
+}
+
+void ClassType::InitializeInstanceTypes(
+ base::Optional<int> own, base::Optional<std::pair<int, int>> range) const {
+ DCHECK(!own_instance_type_.has_value());
+ DCHECK(!instance_type_range_.has_value());
+ own_instance_type_ = own;
+ instance_type_range_ = range;
+}
+
+base::Optional<int> ClassType::OwnInstanceType() const {
+ DCHECK(GlobalContext::IsInstanceTypesInitialized());
+ return own_instance_type_;
+}
+
+base::Optional<std::pair<int, int>> ClassType::InstanceTypeRange() const {
+ DCHECK(GlobalContext::IsInstanceTypesInitialized());
+ return instance_type_range_;
+}
+
+namespace {
+void ComputeSlotKindsHelper(std::vector<ObjectSlotKind>* slots,
+ size_t start_offset,
+ const std::vector<Field>& fields) {
+ size_t offset = start_offset;
+ for (const Field& field : fields) {
+ size_t field_size = std::get<0>(field.GetFieldSizeInformation());
+ size_t slot_index = offset / TargetArchitecture::TaggedSize();
+ // Rounding-up division to find the number of slots occupied by all the
+ // fields up to and including the current one.
+ size_t used_slots =
+ (offset + field_size + TargetArchitecture::TaggedSize() - 1) /
+ TargetArchitecture::TaggedSize();
+ while (used_slots > slots->size()) {
+ slots->push_back(ObjectSlotKind::kNoPointer);
+ }
+ const Type* type = field.name_and_type.type;
+ if (auto struct_type = type->StructSupertype()) {
+ ComputeSlotKindsHelper(slots, offset, (*struct_type)->fields());
+ } else {
+ ObjectSlotKind kind;
+ if (type->IsSubtypeOf(TypeOracle::GetObjectType())) {
+ if (field.is_weak) {
+ kind = ObjectSlotKind::kCustomWeakPointer;
+ } else {
+ kind = ObjectSlotKind::kStrongPointer;
+ }
+ } else if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ DCHECK(!field.is_weak);
+ kind = ObjectSlotKind::kMaybeObjectPointer;
+ } else {
+ kind = ObjectSlotKind::kNoPointer;
+ }
+ DCHECK(slots->at(slot_index) == ObjectSlotKind::kNoPointer);
+ slots->at(slot_index) = kind;
+ }
+
+ offset += field_size;
+ }
+}
+} // namespace
+
+std::vector<ObjectSlotKind> ClassType::ComputeHeaderSlotKinds() const {
+ std::vector<ObjectSlotKind> result;
+ std::vector<Field> header_fields = ComputeHeaderFields();
+ ComputeSlotKindsHelper(&result, 0, header_fields);
+ DCHECK_EQ(std::ceil(static_cast<double>(header_size()) /
+ TargetArchitecture::TaggedSize()),
+ result.size());
+ return result;
+}
+
+base::Optional<ObjectSlotKind> ClassType::ComputeArraySlotKind() const {
+ std::vector<ObjectSlotKind> kinds;
+ ComputeSlotKindsHelper(&kinds, 0, ComputeArrayFields());
+ if (kinds.empty()) return base::nullopt;
+ std::sort(kinds.begin(), kinds.end());
+ if (kinds.front() == kinds.back()) return {kinds.front()};
+ if (kinds.front() == ObjectSlotKind::kStrongPointer &&
+ kinds.back() == ObjectSlotKind::kMaybeObjectPointer) {
+ return ObjectSlotKind::kMaybeObjectPointer;
+ }
+ Error("Array fields mix types with different GC visitation requirements.")
+ .Throw();
+}
+
+bool ClassType::HasNoPointerSlots() const {
+ for (ObjectSlotKind slot : ComputeHeaderSlotKinds()) {
+ if (slot != ObjectSlotKind::kNoPointer) return false;
+ }
+ if (auto slot = ComputeArraySlotKind()) {
+ if (*slot != ObjectSlotKind::kNoPointer) return false;
+ }
+ return true;
+}
+
void ClassType::GenerateAccessors() {
// For each field, construct AST snippets that implement a CSA accessor
// function. The implementation iterator will turn the snippets into code.
@@ -620,11 +724,11 @@ void ClassType::GenerateAccessors() {
}
bool ClassType::HasStaticSize() const {
- if (IsShape()) return true;
- if (IsSubtypeOf(TypeOracle::GetJSObjectType())) return false;
- if (IsAbstract()) return false;
- if (HasIndexedField()) return false;
- return true;
+ // Abstract classes don't have instances directly, so asking this question
+ // doesn't make sense.
+ DCHECK(!IsAbstract());
+ if (IsSubtypeOf(TypeOracle::GetJSObjectType()) && !IsShape()) return false;
+ return size().SingleValue().has_value();
}
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
@@ -719,6 +823,21 @@ bool Signature::HasSameTypesAs(const Signature& other,
return true;
}
+namespace {
+bool FirstTypeIsContext(const std::vector<const Type*> parameter_types) {
+ return !parameter_types.empty() &&
+ parameter_types[0] == TypeOracle::GetContextType();
+}
+} // namespace
+
+bool Signature::HasContextParameter() const {
+ return FirstTypeIsContext(types());
+}
+
+bool BuiltinPointerType::HasContextParameter() const {
+ return FirstTypeIsContext(parameter_types());
+}
+
bool IsAssignableFrom(const Type* to, const Type* from) {
if (to == from) return true;
if (from->IsSubtypeOf(to)) return true;
@@ -814,6 +933,8 @@ size_t AbstractType::AlignmentLog2() const {
alignment = TargetArchitecture::TaggedSize();
} else if (this == TypeOracle::GetRawPtrType()) {
alignment = TargetArchitecture::RawPtrSize();
+ } else if (this == TypeOracle::GetExternalPointerType()) {
+ alignment = TargetArchitecture::ExternalPointerSize();
} else if (this == TypeOracle::GetVoidType()) {
alignment = 1;
} else if (this == TypeOracle::GetInt8Type()) {
@@ -881,6 +1002,9 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
} else if (type->IsSubtypeOf(TypeOracle::GetRawPtrType())) {
size = TargetArchitecture::RawPtrSize();
size_string = "kSystemPointerSize";
+ } else if (type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
+ size = TargetArchitecture::ExternalPointerSize();
+ size_string = "kExternalPointerSize";
} else if (type->IsSubtypeOf(TypeOracle::GetVoidType())) {
size = 0;
size_string = "0";
@@ -942,10 +1066,17 @@ bool IsAllowedAsBitField(const Type* type) {
// Any integer-ish type, including bools and enums which inherit from integer
// types, are allowed. Note, however, that we always zero-extend during
// decoding regardless of signedness.
+ return IsPointerSizeIntegralType(type) || Is32BitIntegralType(type);
+}
+
+bool IsPointerSizeIntegralType(const Type* type) {
+ return type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) ||
+ type->IsSubtypeOf(TypeOracle::GetIntPtrType());
+}
+
+bool Is32BitIntegralType(const Type* type) {
return type->IsSubtypeOf(TypeOracle::GetUint32Type()) ||
- type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) ||
type->IsSubtypeOf(TypeOracle::GetInt32Type()) ||
- type->IsSubtypeOf(TypeOracle::GetIntPtrType()) ||
type->IsSubtypeOf(TypeOracle::GetBoolType());
}
@@ -960,6 +1091,13 @@ base::Optional<NameAndType> ExtractSimpleFieldArraySize(
return class_type.LookupField(identifier->name->value).name_and_type;
}
+std::string Type::GetRuntimeType() const {
+ // TODO(tebbi): Other types are currently unsupported, since there the TNode
+ // types and the C++ runtime types disagree.
+ DCHECK(this->IsSubtypeOf(TypeOracle::GetTaggedType()));
+ return GetGeneratedTNodeTypeName();
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index b60879ce85..c01d55ccff 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -135,6 +135,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
base::Optional<const ClassType*> ClassSupertype() const;
base::Optional<const StructType*> StructSupertype() const;
virtual std::vector<RuntimeType> GetRuntimeTypes() const { return {}; }
+ virtual std::string GetRuntimeType() const;
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
size_t id() const { return id_; }
@@ -274,6 +275,7 @@ class AbstractType final : public Type {
const Type* NonConstexprVersion() const override {
if (non_constexpr_version_) return non_constexpr_version_;
if (!IsConstexpr()) return this;
+ if (parent()) return parent()->NonConstexprVersion();
return nullptr;
}
@@ -299,8 +301,13 @@ class AbstractType final : public Type {
}
std::string SimpleNameImpl() const override {
- if (IsConstexpr())
- return "constexpr_" + NonConstexprVersion()->SimpleName();
+ if (IsConstexpr()) {
+ const Type* non_constexpr_version = NonConstexprVersion();
+ if (non_constexpr_version == nullptr) {
+ ReportError("Cannot find non-constexpr type corresponding to ", *this);
+ }
+ return "constexpr_" + non_constexpr_version->SimpleName();
+ }
return name();
}
@@ -346,6 +353,8 @@ class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
return {{"Smi", ""}};
}
+ bool HasContextParameter() const;
+
private:
friend class TypeOracle;
BuiltinPointerType(const Type* parent, TypeVector parameter_types,
@@ -375,6 +384,9 @@ class V8_EXPORT_PRIVATE UnionType final : public Type {
return "TNode<" + GetGeneratedTNodeTypeName() + ">";
}
std::string GetGeneratedTNodeTypeNameImpl() const override;
+ std::string GetRuntimeType() const override {
+ return parent()->GetRuntimeType();
+ }
friend size_t hash_value(const UnionType& p) {
size_t result = 0;
@@ -420,6 +432,13 @@ class V8_EXPORT_PRIVATE UnionType final : public Type {
return false;
}
+ bool IsConstexpr() const override { return parent()->IsConstexpr(); }
+
+ const Type* NonConstexprVersion() const override {
+ if (!IsConstexpr()) return this;
+ return parent()->NonConstexprVersion();
+ }
+
void Extend(const Type* t) {
if (const UnionType* union_type = UnionType::DynamicCast(t)) {
for (const Type* member : union_type->types_) {
@@ -517,8 +536,6 @@ class AggregateType : public Type {
virtual void Finalize() const = 0;
- virtual bool HasIndexedField() const { return false; }
-
void SetFields(std::vector<Field> fields) { fields_ = std::move(fields); }
const std::vector<Field>& fields() const {
if (!is_finalized_) Finalize();
@@ -607,10 +624,25 @@ class StructType final : public AggregateType {
class TypeAlias;
+enum class ObjectSlotKind : uint8_t {
+ kNoPointer,
+ kStrongPointer,
+ kMaybeObjectPointer,
+ kCustomWeakPointer
+};
+
+inline base::Optional<ObjectSlotKind> Combine(ObjectSlotKind a,
+ ObjectSlotKind b) {
+ if (a == b) return {a};
+ if (std::min(a, b) == ObjectSlotKind::kStrongPointer &&
+ std::max(a, b) == ObjectSlotKind::kMaybeObjectPointer) {
+ return {ObjectSlotKind::kMaybeObjectPointer};
+ }
+ return base::nullopt;
+}
+
class ClassType final : public AggregateType {
public:
- static constexpr ClassFlags kInternalFlags = ClassFlag::kHasIndexedField;
-
DECLARE_TYPE_BOILERPLATE(ClassType)
std::string ToExplicitString() const override;
std::string GetGeneratedTypeNameImpl() const override;
@@ -625,6 +657,7 @@ class ClassType final : public AggregateType {
(!HasUndefinedLayout() && !IsShape()));
}
bool ShouldGenerateBodyDescriptor() const {
+ if (IsAbstract()) return false;
return flags_ & ClassFlag::kGenerateBodyDescriptor || !IsExtern();
}
bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
@@ -639,7 +672,6 @@ class ClassType final : public AggregateType {
bool ShouldExport() const { return flags_ & ClassFlag::kExport; }
bool IsShape() const { return flags_ & ClassFlag::kIsShape; }
bool HasStaticSize() const;
- bool HasIndexedField() const override;
size_t header_size() const {
if (!is_finalized_) Finalize();
return header_size_;
@@ -655,14 +687,19 @@ class ClassType final : public AggregateType {
void GenerateAccessors();
bool AllowInstantiation() const;
const Field& RegisterField(Field field) override {
- if (field.index) {
- flags_ |= ClassFlag::kHasIndexedField;
- }
return AggregateType::RegisterField(field);
}
void Finalize() const override;
std::vector<Field> ComputeAllFields() const;
+ std::vector<Field> ComputeHeaderFields() const;
+ std::vector<Field> ComputeArrayFields() const;
+ // The slots of an object are the tagged pointer sized offsets in an object
+ // that may or may not require GC visiting. These helper functions determine
+ // what kind of GC visiting the individual slots require.
+ std::vector<ObjectSlotKind> ComputeHeaderSlotKinds() const;
+ base::Optional<ObjectSlotKind> ComputeArraySlotKind() const;
+ bool HasNoPointerSlots() const;
const InstanceTypeConstraints& GetInstanceTypeConstraints() const {
return decl_->instance_type_constraints;
@@ -678,6 +715,14 @@ class ClassType final : public AggregateType {
}
SourcePosition GetPosition() const { return decl_->pos; }
+ // TODO(tebbi): We should no longer pass around types as const pointers, so
+ // that we can avoid mutable fields and const initializers for
+ // late-initialized portions of types like this one.
+ void InitializeInstanceTypes(base::Optional<int> own,
+ base::Optional<std::pair<int, int>> range) const;
+ base::Optional<int> OwnInstanceType() const;
+ base::Optional<std::pair<int, int>> InstanceTypeRange() const;
+
private:
friend class TypeOracle;
friend class TypeVisitor;
@@ -691,6 +736,8 @@ class ClassType final : public AggregateType {
const std::string generates_;
const ClassDeclaration* decl_;
const TypeAlias* alias_;
+ mutable base::Optional<int> own_instance_type_;
+ mutable base::Optional<std::pair<int, int>> instance_type_range_;
};
inline std::ostream& operator<<(std::ostream& os, const Type& t) {
@@ -804,6 +851,7 @@ struct Signature {
return TypeVector(parameter_types.types.begin() + implicit_count,
parameter_types.types.end());
}
+ bool HasContextParameter() const;
};
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names);
@@ -820,6 +868,8 @@ TypeVector LowerParameterTypes(const ParameterTypes& parameter_types,
base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type);
bool IsAnyUnsignedInteger(const Type* type);
bool IsAllowedAsBitField(const Type* type);
+bool IsPointerSizeIntegralType(const Type* type);
+bool Is32BitIntegralType(const Type* type);
base::Optional<NameAndType> ExtractSimpleFieldArraySize(
const ClassType& class_type, Expression* array_size);
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 1c5b3079f5..689d242ab0 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -340,7 +340,7 @@ void EraseIf(Container* container, F f) {
class NullStreambuf : public std::streambuf {
public:
- virtual int overflow(int c) {
+ int overflow(int c) override {
setp(buffer_, buffer_ + sizeof(buffer_));
return (c == traits_type::eof()) ? '\0' : c;
}
diff --git a/deps/v8/src/tracing/DEPS b/deps/v8/src/tracing/DEPS
new file mode 100644
index 0000000000..fd3531bfcc
--- /dev/null
+++ b/deps/v8/src/tracing/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+perfetto/tracing.h",
+ "+protos/perfetto"
+]
diff --git a/deps/v8/src/tracing/trace-categories.cc b/deps/v8/src/tracing/trace-categories.cc
new file mode 100644
index 0000000000..98c41e41e8
--- /dev/null
+++ b/deps/v8/src/tracing/trace-categories.cc
@@ -0,0 +1,9 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/trace-categories.h"
+
+#if defined(V8_USE_PERFETTO)
+PERFETTO_TRACK_EVENT_STATIC_STORAGE();
+#endif
diff --git a/deps/v8/src/tracing/trace-categories.h b/deps/v8/src/tracing/trace-categories.h
new file mode 100644
index 0000000000..91bf3da96e
--- /dev/null
+++ b/deps/v8/src/tracing/trace-categories.h
@@ -0,0 +1,58 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRACING_TRACE_CATEGORIES_H_
+#define V8_TRACING_TRACE_CATEGORIES_H_
+
+#include "src/base/macros.h"
+
+#if defined(V8_USE_PERFETTO)
+
+// Exports tracks events into the v8 namespace to avoid conflicts with embedders
+// like Chrome.
+#define PERFETTO_TRACK_EVENT_NAMESPACE v8
+
+// Export trace categories and the track event data source in components builds.
+#define PERFETTO_COMPONENT_EXPORT V8_EXPORT_PRIVATE
+
+// For now most of v8 uses legacy trace events.
+#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 1
+
+#include "perfetto/tracing.h"
+
+// Trace category prefixes used in tests.
+PERFETTO_DEFINE_TEST_CATEGORY_PREFIXES("v8-cat", "cat", "v8.Test2");
+
+// List of categories used by built-in V8 trace events.
+// clang-format off
+PERFETTO_DEFINE_CATEGORIES(
+ perfetto::Category("V8.HandleInterrupts"),
+ perfetto::Category("v8"),
+ perfetto::Category("v8.console"),
+ perfetto::Category("v8.execute"),
+ perfetto::Category("v8.runtime"),
+ perfetto::Category::Group("devtools.timeline,v8"),
+ perfetto::Category::Group("devtools.timeline,"
+ TRACE_DISABLED_BY_DEFAULT("v8.gc")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("devtools.timeline")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.compile")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.gc")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.turbofan")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.wasm")),
+ perfetto::Category::Group("v8,devtools.timeline"),
+ perfetto::Category::Group(TRACE_DISABLED_BY_DEFAULT("v8.turbofan") ","
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm")));
+// clang-format on
+
+#endif // defined(V8_USE_PERFETTO)
+
+#endif // V8_TRACING_TRACE_CATEGORIES_H_
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
index 6c631b1f3c..8e69fe5520 100644
--- a/deps/v8/src/tracing/trace-event.cc
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -15,6 +15,7 @@ namespace v8 {
namespace internal {
namespace tracing {
+#if !defined(V8_USE_PERFETTO)
v8::TracingController* TraceEventHelper::GetTracingController() {
return v8::internal::V8::GetCurrentPlatform()->GetTracingController();
}
@@ -51,6 +52,7 @@ void CallStatsScopedTracer::Initialize(v8::internal::Isolate* isolate,
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
TRACE_EVENT_FLAG_NONE, v8::internal::tracing::kNoId);
}
+#endif // !defined(V8_USE_PERFETTO)
} // namespace tracing
} // namespace internal
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 0a650a8ebb..816e79c523 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -8,7 +8,13 @@
#include <stddef.h>
#include <memory>
+#if defined(V8_USE_PERFETTO)
+#include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+#include "src/tracing/trace-categories.h"
+#else
#include "base/trace_event/common/trace_event_common.h"
+#endif // !defined(V8_USE_PERFETTO)
+
#include "include/v8-platform.h"
#include "src/base/atomicops.h"
#include "src/base/macros.h"
@@ -32,6 +38,8 @@ enum CategoryGroupEnabledFlags {
kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
};
+#if !defined(V8_USE_PERFETTO)
+
// TODO(petermarshall): Remove with the old tracing implementation - Perfetto
// copies const char* arguments by default.
// By default, const char* argument values are assumed to have long-lived scope
@@ -284,8 +292,8 @@ class Isolate;
namespace tracing {
-// Specify these values when the corresponding argument of AddTraceEvent is not
-// used.
+// Specify these values when the corresponding argument of AddTraceEvent
+// is not used.
const int kZeroNumArgs = 0;
const decltype(nullptr) kGlobalScope = nullptr;
const uint64_t kNoId = 0;
@@ -605,4 +613,39 @@ class CallStatsScopedTracer {
} // namespace internal
} // namespace v8
+#else // defined(V8_USE_PERFETTO)
+
+#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name) \
+ struct PERFETTO_UID(ScopedEvent) { \
+ struct ScopedStats { \
+ ScopedStats(v8::internal::Isolate* isolate_arg, int) { \
+ TRACE_EVENT_BEGIN(category, name, [&](perfetto::EventContext) { \
+ isolate_ = isolate_arg; \
+ internal::RuntimeCallStats* table = \
+ isolate_->counters()->runtime_call_stats(); \
+ has_parent_scope_ = table->InUse(); \
+ if (!has_parent_scope_) table->Reset(); \
+ }); \
+ } \
+ ~ScopedStats() { \
+ TRACE_EVENT_END(category, [&](perfetto::EventContext ctx) { \
+ if (!has_parent_scope_ && isolate_) { \
+ /* TODO(skyostil): Write as typed event instead of JSON */ \
+ auto value = v8::tracing::TracedValue::Create(); \
+ isolate_->counters()->runtime_call_stats()->Dump(value.get()); \
+ auto annotation = ctx.event()->add_debug_annotations(); \
+ annotation->set_name("runtime-call-stats"); \
+ value->Add(annotation); \
+ } \
+ }); \
+ } \
+ v8::internal::Isolate* isolate_; \
+ bool has_parent_scope_; \
+ } stats; \
+ } PERFETTO_UID(scoped_event) { \
+ { isolate, 0 } \
+ }
+
+#endif // defined(V8_USE_PERFETTO)
+
#endif // V8_TRACING_TRACE_EVENT_H_
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index 9011b51f48..bc5398a567 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -8,6 +8,10 @@
#include "src/numbers/conversions.h"
#include "src/utils/vector.h"
+#ifdef V8_USE_PERFETTO
+#include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+#endif
+
namespace v8 {
namespace tracing {
@@ -207,5 +211,16 @@ void TracedValue::AppendAsTraceFormat(std::string* out) const {
*out += '}';
}
+#ifdef V8_USE_PERFETTO
+void TracedValue::Add(
+ perfetto::protos::pbzero::DebugAnnotation* annotation) const {
+ std::string json;
+ json += "{";
+ json += data_;
+ json += "}";
+ annotation->set_legacy_json_value(json);
+}
+#endif // V8_USE_PERFETTO
+
} // namespace tracing
} // namespace v8
diff --git a/deps/v8/src/tracing/traced-value.h b/deps/v8/src/tracing/traced-value.h
index 3edfbc23b2..3b1d09059d 100644
--- a/deps/v8/src/tracing/traced-value.h
+++ b/deps/v8/src/tracing/traced-value.h
@@ -12,11 +12,17 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace tracing {
-class V8_EXPORT_PRIVATE TracedValue : public ConvertableToTraceFormat {
+class V8_EXPORT_PRIVATE TracedValue : public ConvertableToTraceFormat
+#ifdef V8_USE_PERFETTO
+ ,
+ public perfetto::DebugAnnotation
+#endif // V8_USE_PERFETTO
+{
public:
~TracedValue() override;
@@ -54,6 +60,11 @@ class V8_EXPORT_PRIVATE TracedValue : public ConvertableToTraceFormat {
// ConvertableToTraceFormat implementation.
void AppendAsTraceFormat(std::string* out) const override;
+#ifdef V8_USE_PERFETTO
+ // DebugAnnotation implementation.
+ void Add(perfetto::protos::pbzero::DebugAnnotation*) const override;
+#endif // V8_USE_PERFETTO
+
private:
TracedValue();
diff --git a/deps/v8/src/utils/ostreams.h b/deps/v8/src/utils/ostreams.h
index e87675d541..118dfc282a 100644
--- a/deps/v8/src/utils/ostreams.h
+++ b/deps/v8/src/utils/ostreams.h
@@ -35,7 +35,7 @@ class V8_EXPORT_PRIVATE OFStreamBase : public std::streambuf {
class V8_EXPORT_PRIVATE DbgStreamBuf : public std::streambuf {
public:
DbgStreamBuf();
- ~DbgStreamBuf();
+ ~DbgStreamBuf() override;
private:
int sync() override;
@@ -47,7 +47,7 @@ class V8_EXPORT_PRIVATE DbgStreamBuf : public std::streambuf {
class DbgStdoutStream : public std::ostream {
public:
DbgStdoutStream();
- ~DbgStdoutStream() = default;
+ ~DbgStdoutStream() override = default;
private:
DbgStreamBuf streambuf_;
diff --git a/deps/v8/src/utils/pointer-with-payload.h b/deps/v8/src/utils/pointer-with-payload.h
index 1c140ff684..3dbd6acac0 100644
--- a/deps/v8/src/utils/pointer-with-payload.h
+++ b/deps/v8/src/utils/pointer-with-payload.h
@@ -46,7 +46,7 @@ class PointerWithPayload {
static constexpr uintptr_t kPointerMask = ~kPayloadMask;
public:
- PointerWithPayload() {}
+ PointerWithPayload() = default;
explicit PointerWithPayload(PointerType* pointer)
: pointer_(reinterpret_cast<uintptr_t>(pointer)) {
diff --git a/deps/v8/src/utils/vector.h b/deps/v8/src/utils/vector.h
index 71dadc6c0e..38202d804f 100644
--- a/deps/v8/src/utils/vector.h
+++ b/deps/v8/src/utils/vector.h
@@ -21,6 +21,10 @@ namespace internal {
template <typename T>
class Vector {
public:
+ using value_type = T;
+ using iterator = T*;
+ using const_iterator = const T*;
+
constexpr Vector() : start_(nullptr), length_(0) {}
constexpr Vector(T* data, size_t length) : start_(data), length_(length) {
@@ -116,17 +120,15 @@ class Vector {
}
template <typename S>
- static constexpr Vector<T> cast(Vector<S> input) {
+ static Vector<T> cast(Vector<S> input) {
// Casting is potentially dangerous, so be really restrictive here. This
// might be lifted once we have use cases for that.
STATIC_ASSERT(std::is_pod<S>::value);
STATIC_ASSERT(std::is_pod<T>::value);
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_EQ(0, (input.length() * sizeof(S)) % sizeof(T));
+ DCHECK_EQ(0, (input.size() * sizeof(S)) % sizeof(T));
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(input.begin()) % alignof(T));
-#endif
return Vector<T>(reinterpret_cast<T*>(input.begin()),
- input.length() * sizeof(S) / sizeof(T));
+ input.size() * sizeof(S) / sizeof(T));
}
bool operator==(const Vector<const T> other) const {
@@ -236,37 +238,48 @@ class OwnedVector {
size_t length_ = 0;
};
+// The vectors returned by {StaticCharVector}, {CStrVector}, or {OneByteVector}
+// do not contain a null-termination byte. If you want the null byte, use
+// {ArrayVector}.
+
+// Known length, constexpr.
template <size_t N>
-constexpr Vector<const uint8_t> StaticCharVector(const char (&array)[N]) {
- return Vector<const uint8_t>::cast(Vector<const char>(array, N - 1));
+constexpr Vector<const char> StaticCharVector(const char (&array)[N]) {
+ return {array, N - 1};
}
-// The resulting vector does not contain a null-termination byte. If you want
-// the null byte, use ArrayVector("foo").
+// Unknown length, not constexpr.
inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, strlen(data));
+ return {data, strlen(data)};
}
+// OneByteVector is never constexpr because the data pointer is
+// {reinterpret_cast}ed.
inline Vector<const uint8_t> OneByteVector(const char* data, size_t length) {
- return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
+ return {reinterpret_cast<const uint8_t*>(data), length};
}
inline Vector<const uint8_t> OneByteVector(const char* data) {
return OneByteVector(data, strlen(data));
}
+template <size_t N>
+Vector<const uint8_t> StaticOneByteVector(const char (&array)[N]) {
+ return OneByteVector(array, N - 1);
+}
+
// For string literals, ArrayVector("foo") returns a vector ['f', 'o', 'o', \0]
// with length 4 and null-termination.
// If you want ['f', 'o', 'o'], use CStrVector("foo").
template <typename T, size_t N>
inline constexpr Vector<T> ArrayVector(T (&arr)[N]) {
- return Vector<T>{arr, N};
+ return {arr, N};
}
// Construct a Vector from a start pointer and a size.
template <typename T>
inline constexpr Vector<T> VectorOf(T* start, size_t size) {
- return Vector<T>(start, size);
+ return {start, size};
}
// Construct a Vector from anything providing a {data()} and {size()} accessor.
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index fe921c6406..d69797886a 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -6,6 +6,7 @@
#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/baseline/liftoff-register.h"
namespace v8 {
namespace internal {
@@ -25,7 +26,7 @@ namespace liftoff {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM_COMPILED |
+// -1 | 0xa: WASM |
// -2 | instance |
// -----+--------------------+---------------------------
// -3 | slot 0 (high) | ^
@@ -42,13 +43,16 @@ constexpr int kInstanceOffset = 2 * kSystemPointerSize;
// PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
+constexpr int kHalfStackSlotSize = LiftoffAssembler::kStackSlotSize >> 1;
-inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+inline MemOperand GetStackSlot(int offset) {
+ return MemOperand(offset > 0 ? fp : sp, -offset);
+}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
- return MemOperand(fp, -offset + half_offset);
+ return MemOperand(offset > 0 ? fp : sp, -offset + half_offset);
}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
@@ -68,11 +72,18 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
inline Register CalculateActualAddress(LiftoffAssembler* assm,
UseScratchRegisterScope* temps,
Register addr_reg, Register offset_reg,
- int32_t offset_imm) {
+ int32_t offset_imm,
+ Register result_reg = no_reg) {
if (offset_reg == no_reg && offset_imm == 0) {
- return addr_reg;
+ if (result_reg == no_reg) {
+ return addr_reg;
+ } else {
+ assm->mov(result_reg, addr_reg);
+ return result_reg;
+ }
}
- Register actual_addr_reg = temps->Acquire();
+ Register actual_addr_reg =
+ result_reg != no_reg ? result_reg : temps->Acquire();
if (offset_reg == no_reg) {
assm->add(actual_addr_reg, addr_reg, Operand(offset_imm));
} else {
@@ -111,19 +122,11 @@ template <void (Assembler::*op)(Register, Register, Register, SBit, Condition),
SBit, Condition)>
inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister rhs) {
- UseScratchRegisterScope temps(assm);
- Register scratch = dst.low_gp();
- bool can_use_dst =
- dst.low_gp() != lhs.high_gp() && dst.low_gp() != rhs.high_gp();
- if (!can_use_dst) {
- scratch = temps.Acquire();
- }
- (assm->*op)(scratch, lhs.low_gp(), rhs.low_gp(), SetCC, al);
+ DCHECK_NE(dst.low_gp(), lhs.high_gp());
+ DCHECK_NE(dst.low_gp(), rhs.high_gp());
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp(), SetCC, al);
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(rhs.high_gp()),
LeaveCC, al);
- if (!can_use_dst) {
- assm->mov(dst.low_gp(), scratch);
- }
}
template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
@@ -132,20 +135,12 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
- UseScratchRegisterScope temps(assm);
- Register scratch = dst.low_gp();
- bool can_use_dst = dst.low_gp() != lhs.high_gp();
- if (!can_use_dst) {
- scratch = temps.Acquire();
- }
- (assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
+ DCHECK_NE(dst.low_gp(), lhs.high_gp());
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(sign_extend),
LeaveCC, al);
- if (!can_use_dst) {
- assm->mov(dst.low_gp(), scratch);
- }
}
template <void (TurboAssembler::*op)(Register, Register, Register, Register,
@@ -185,6 +180,10 @@ inline Simd128Register GetSimd128Register(DoubleRegister reg) {
return QwNeonRegister::from_code(reg.code() / 2);
}
+inline Simd128Register GetSimd128Register(LiftoffRegister reg) {
+ return liftoff::GetSimd128Register(reg.low_fp());
+}
+
enum class MinOrMax : uint8_t { kMin, kMax };
template <typename RegisterType>
inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
@@ -218,6 +217,116 @@ inline Register EnsureNoAlias(Assembler* assm, Register reg,
return tmp;
}
+inline void S128NarrowOp(LiftoffAssembler* assm, NeonDataType dt,
+ NeonDataType sdt, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ if (dst == lhs) {
+ assm->vqmovn(dt, sdt, dst.low_fp(), liftoff::GetSimd128Register(lhs));
+ assm->vqmovn(dt, sdt, dst.high_fp(), liftoff::GetSimd128Register(rhs));
+ } else {
+ assm->vqmovn(dt, sdt, dst.high_fp(), liftoff::GetSimd128Register(rhs));
+ assm->vqmovn(dt, sdt, dst.low_fp(), liftoff::GetSimd128Register(lhs));
+ }
+}
+
+inline void F64x2Compare(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs,
+ Condition cond) {
+ DCHECK(cond == eq || cond == ne || cond == lt || cond == le);
+
+ QwNeonRegister dest = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+ UseScratchRegisterScope temps(assm);
+ Register scratch = temps.Acquire();
+
+ assm->mov(scratch, Operand(0));
+ assm->VFPCompareAndSetFlags(left.low(), right.low());
+ assm->mov(scratch, Operand(-1), LeaveCC, cond);
+ if (cond == lt || cond == le) {
+ // Check for NaN.
+ assm->mov(scratch, Operand(0), LeaveCC, vs);
+ }
+ assm->vmov(dest.low(), scratch, scratch);
+
+ assm->mov(scratch, Operand(0));
+ assm->VFPCompareAndSetFlags(left.high(), right.high());
+ assm->mov(scratch, Operand(-1), LeaveCC, cond);
+ if (cond == lt || cond == le) {
+ // Check for NaN.
+ assm->mov(scratch, Operand(0), LeaveCC, vs);
+ }
+ assm->vmov(dest.high(), scratch, scratch);
+}
+
+inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
+ ValueType type) {
+#ifdef DEBUG
+ // The {str} instruction needs a temp register when the immediate in the
+ // provided MemOperand does not fit into 12 bits. This happens for large stack
+ // frames. This DCHECK checks that the temp register is available when needed.
+ DCHECK(UseScratchRegisterScope{assm}.CanAcquire());
+#endif
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->str(src.gp(), dst);
+ break;
+ case ValueType::kI64:
+ // Positive offsets should be lowered to kI32.
+ assm->str(src.low_gp(), MemOperand(dst.rn(), dst.offset()));
+ assm->str(
+ src.high_gp(),
+ MemOperand(dst.rn(), dst.offset() + liftoff::kHalfStackSlotSize));
+ break;
+ case ValueType::kF32:
+ assm->vstr(liftoff::GetFloatRegister(src.fp()), dst);
+ break;
+ case ValueType::kF64:
+ assm->vstr(src.fp(), dst);
+ break;
+ case ValueType::kS128: {
+ UseScratchRegisterScope temps(assm);
+ Register addr = liftoff::CalculateActualAddress(assm, &temps, dst.rn(),
+ no_reg, dst.offset());
+ assm->vst1(Neon8, NeonListOperand(src.low_fp(), 2), NeonMemOperand(addr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->ldr(dst.gp(), src);
+ break;
+ case ValueType::kI64:
+ assm->ldr(dst.low_gp(), MemOperand(src.rn(), src.offset()));
+ assm->ldr(
+ dst.high_gp(),
+ MemOperand(src.rn(), src.offset() + liftoff::kHalfStackSlotSize));
+ break;
+ case ValueType::kF32:
+ assm->vldr(liftoff::GetFloatRegister(dst.fp()), src);
+ break;
+ case ValueType::kF64:
+ assm->vldr(dst.fp(), src);
+ break;
+ case ValueType::kS128: {
+ // Get memory address of slot to fill from.
+ UseScratchRegisterScope temps(assm);
+ Register addr = liftoff::CalculateActualAddress(assm, &temps, src.rn(),
+ no_reg, src.offset());
+ assm->vld1(Neon8, NeonListOperand(dst.low_fp(), 2), NeonMemOperand(addr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -324,7 +433,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break;
case ValueType::kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg).gp();
- vmov(reg.fp(), Double(value.to_f64_boxed().get_scalar()), extra_scratch);
+ vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
}
default:
@@ -540,92 +649,518 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
+namespace liftoff {
+#define __ lasm->
+
+inline void AtomicOp32(
+ LiftoffAssembler* lasm, Register dst_addr, Register offset_reg,
+ uint32_t offset_imm, LiftoffRegister value, LiftoffRegister result,
+ LiftoffRegList pinned,
+ void (Assembler::*load)(Register, Register, Condition),
+ void (Assembler::*store)(Register, Register, Register, Condition),
+ void (*op)(LiftoffAssembler*, Register, Register, Register)) {
+ Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+
+ // Allocate an additional {temp} register to hold the result that should be
+ // stored to memory. Note that {temp} and {store_result} are not allowed to be
+ // the same register.
+ Register temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+
+ // Make sure that {result} is unique.
+ Register result_reg = result.gp();
+ if (result_reg == value.gp() || result_reg == dst_addr ||
+ result_reg == offset_reg) {
+ result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(lasm);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ lasm, &temps, dst_addr, offset_reg, offset_imm);
+
+ __ dmb(ISH);
+ Label retry;
+ __ bind(&retry);
+ (lasm->*load)(result_reg, actual_addr, al);
+ op(lasm, temp, result_reg, value.gp());
+ (lasm->*store)(store_result, temp, actual_addr, al);
+ __ cmp(store_result, Operand(0));
+ __ b(ne, &retry);
+ __ dmb(ISH);
+ if (result_reg != result.gp()) {
+ __ mov(result.gp(), result_reg);
+ }
+}
+
+inline void Add(LiftoffAssembler* lasm, Register dst, Register lhs,
+ Register rhs) {
+ __ add(dst, lhs, rhs);
+}
+
+inline void Sub(LiftoffAssembler* lasm, Register dst, Register lhs,
+ Register rhs) {
+ __ sub(dst, lhs, rhs);
+}
+
+inline void And(LiftoffAssembler* lasm, Register dst, Register lhs,
+ Register rhs) {
+ __ and_(dst, lhs, rhs);
+}
+
+inline void Or(LiftoffAssembler* lasm, Register dst, Register lhs,
+ Register rhs) {
+ __ orr(dst, lhs, rhs);
+}
+
+inline void Xor(LiftoffAssembler* lasm, Register dst, Register lhs,
+ Register rhs) {
+ __ eor(dst, lhs, rhs);
+}
+
+inline void Exchange(LiftoffAssembler* lasm, Register dst, Register lhs,
+ Register rhs) {
+ __ mov(dst, rhs);
+}
+
+inline void AtomicBinop32(LiftoffAssembler* lasm, Register dst_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister value, LiftoffRegister result,
+ StoreType type,
+ void (*op)(LiftoffAssembler*, Register, Register,
+ Register)) {
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ __ LoadConstant(result.high(), WasmValue(0));
+ result = result.low();
+ value = value.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ liftoff::AtomicOp32(lasm, dst_addr, offset_reg, offset_imm, value, result,
+ pinned, &Assembler::ldrexb, &Assembler::strexb, op);
+ return;
+ case StoreType::kI64Store16:
+ __ LoadConstant(result.high(), WasmValue(0));
+ result = result.low();
+ value = value.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store16:
+ liftoff::AtomicOp32(lasm, dst_addr, offset_reg, offset_imm, value, result,
+ pinned, &Assembler::ldrexh, &Assembler::strexh, op);
+ return;
+ case StoreType::kI64Store32:
+ __ LoadConstant(result.high(), WasmValue(0));
+ result = result.low();
+ value = value.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ liftoff::AtomicOp32(lasm, dst_addr, offset_reg, offset_imm, value, result,
+ pinned, &Assembler::ldrex, &Assembler::strex, op);
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void AtomicOp64(LiftoffAssembler* lasm, Register dst_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister value,
+ base::Optional<LiftoffRegister> result,
+ void (*op)(LiftoffAssembler*, LiftoffRegister,
+ LiftoffRegister, LiftoffRegister)) {
+ // strexd loads a 64 bit word into two registers. The first register needs
+ // to have an even index, e.g. r8, the second register needs to be the one
+ // with the next higher index, e.g. r9 if the first register is r8. In the
+ // following code we use the fixed register pair r8/r9 to make the code here
+ // simpler, even though other register pairs would also be possible.
+ constexpr Register dst_low = r8;
+ constexpr Register dst_high = r9;
+
+ // Make sure {dst_low} and {dst_high} are not occupied by any other value.
+ Register value_low = value.low_gp();
+ Register value_high = value.high_gp();
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(
+ dst_addr, offset_reg, value_low, value_high, dst_low, dst_high);
+ __ ClearRegister(dst_low, {&dst_addr, &offset_reg, &value_low, &value_high},
+ pinned);
+ pinned = pinned |
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value_low, value_high);
+ __ ClearRegister(dst_high, {&dst_addr, &offset_reg, &value_low, &value_high},
+ pinned);
+ pinned = pinned |
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value_low, value_high);
+
+ // Make sure that {result}, if it exists, also does not overlap with
+ // {dst_low} and {dst_high}. We don't have to transfer the value stored in
+ // {result}.
+ Register result_low = no_reg;
+ Register result_high = no_reg;
+ if (result.has_value()) {
+ result_low = result.value().low_gp();
+ if (pinned.has(result_low)) {
+ result_low = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+ pinned.set(result_low);
+
+ result_high = result.value().high_gp();
+ if (pinned.has(result_high)) {
+ result_high = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+ pinned.set(result_high);
+ }
+
+ Register store_result = __ GetUnusedRegister(kGpReg, pinned).gp();
+
+ UseScratchRegisterScope temps(lasm);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ lasm, &temps, dst_addr, offset_reg, offset_imm);
+
+ __ dmb(ISH);
+ Label retry;
+ __ bind(&retry);
+ // {ldrexd} is needed here so that the {strexd} instruction below can
+ // succeed. We don't need the value we are reading. We use {dst_low} and
+ // {dst_high} as the destination registers because {ldrexd} has the same
+ // restrictions on registers as {strexd}, see the comment above.
+ __ ldrexd(dst_low, dst_high, actual_addr);
+ if (result.has_value()) {
+ __ mov(result_low, dst_low);
+ __ mov(result_high, dst_high);
+ }
+ op(lasm, LiftoffRegister::ForPair(dst_low, dst_high),
+ LiftoffRegister::ForPair(dst_low, dst_high),
+ LiftoffRegister::ForPair(value_low, value_high));
+ __ strexd(store_result, dst_low, dst_high, actual_addr);
+ __ cmp(store_result, Operand(0));
+ __ b(ne, &retry);
+ __ dmb(ISH);
+
+ if (result.has_value()) {
+ if (result_low != result.value().low_gp()) {
+ __ mov(result.value().low_gp(), result_low);
+ }
+ if (result_high != result.value().high_gp()) {
+ __ mov(result.value().high_gp(), result_high);
+ }
+ }
+}
+
+inline void I64Store(LiftoffAssembler* lasm, LiftoffRegister dst,
+ LiftoffRegister, LiftoffRegister src) {
+ __ mov(dst.low_gp(), src.low_gp());
+ __ mov(dst.high_gp(), src.high_gp());
+}
+
+#undef __
+} // namespace liftoff
+
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ if (type.value() != LoadType::kI64Load) {
+ Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
+ dmb(ISH);
+ return;
+ }
+ // ldrexd loads a 64 bit word into two registers. The first register needs to
+ // have an even index, e.g. r8, the second register needs to be the one with
+ // the next higher index, e.g. r9 if the first register is r8. In the
+ // following code we use the fixed register pair r8/r9 to make the code here
+ // simpler, even though other register pairs would also be possible.
+ constexpr Register dst_low = r8;
+ constexpr Register dst_high = r9;
+ if (cache_state()->is_used(LiftoffRegister(dst_low))) {
+ SpillRegister(LiftoffRegister(dst_low));
+ }
+ if (cache_state()->is_used(LiftoffRegister(dst_high))) {
+ SpillRegister(LiftoffRegister(dst_high));
+ }
+ UseScratchRegisterScope temps(this);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ ldrexd(dst_low, dst_high, actual_addr);
+ dmb(ISH);
+
+ LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
+ {dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}};
+ ParallelRegisterMove(ArrayVector(reg_moves));
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, src, {},
+ liftoff::I64Store);
+ return;
+ }
+
+ dmb(ISH);
+ Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true);
+ dmb(ISH);
+ return;
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ LiftoffRegister result, StoreType type) {
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
+ liftoff::I64Binop<&Assembler::add, &Assembler::adc>);
+ return;
+ }
+ liftoff::AtomicBinop32(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, &liftoff::Add);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ LiftoffRegister result, StoreType type) {
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
+ liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>);
+ return;
+ }
+ liftoff::AtomicBinop32(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, &liftoff::Sub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ LiftoffRegister result, StoreType type) {
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
+ liftoff::I64Binop<&Assembler::and_, &Assembler::and_>);
+ return;
+ }
+ liftoff::AtomicBinop32(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, &liftoff::And);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ LiftoffRegister result, StoreType type) {
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
+ liftoff::I64Binop<&Assembler::orr, &Assembler::orr>);
+ return;
+ }
+ liftoff::AtomicBinop32(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, &liftoff::Or);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ LiftoffRegister result, StoreType type) {
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
+ liftoff::I64Binop<&Assembler::eor, &Assembler::eor>);
+ return;
+ }
+ liftoff::AtomicBinop32(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, &liftoff::Xor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicOp64(this, dst_addr, offset_reg, offset_imm, value, {result},
+ liftoff::I64Store);
+ return;
+ }
+ liftoff::AtomicBinop32(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, &liftoff::Exchange);
}
+namespace liftoff {
+#define __ lasm->
+
+inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
+ Register dst_addr_reg, Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegister expected,
+ LiftoffRegister new_value,
+ LiftoffRegister result) {
+ // To implement I64AtomicCompareExchange, we nearly need all registers, with
+ // some registers having special constraints, e.g. like for {new_value} and
+ // {result} the low-word register has to have an even register code, and the
+ // high-word has to be in the next higher register. To avoid complicated
+ // register allocation code here, we just assign fixed registers to all
+ // values here, and then move all values into the correct register.
+ Register dst_addr = r0;
+ Register offset = r1;
+ Register result_low = r4;
+ Register result_high = r5;
+ Register new_value_low = r2;
+ Register new_value_high = r3;
+ Register store_result = r6;
+ Register expected_low = r8;
+ Register expected_high = r9;
+
+ // We spill all registers, so that we can re-assign them afterwards.
+ __ SpillRegisters(dst_addr, offset, result_low, result_high, new_value_low,
+ new_value_high, store_result, expected_low, expected_high);
+
+ LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
+ {LiftoffRegister::ForPair(new_value_low, new_value_high), new_value,
+ kWasmI64},
+ {LiftoffRegister::ForPair(expected_low, expected_high), expected,
+ kWasmI64},
+ {LiftoffRegister(dst_addr), LiftoffRegister(dst_addr_reg), kWasmI32},
+ {LiftoffRegister(offset),
+ LiftoffRegister(offset_reg != no_reg ? offset_reg : offset), kWasmI32}};
+ __ ParallelRegisterMove(ArrayVector(reg_moves));
+
+ {
+ UseScratchRegisterScope temps(lasm);
+ Register temp = liftoff::CalculateActualAddress(
+ lasm, &temps, dst_addr, offset_reg == no_reg ? no_reg : offset,
+ offset_imm, dst_addr);
+ // Make sure the actual address is stored in the right register.
+ DCHECK_EQ(dst_addr, temp);
+ USE(temp);
+ }
+
+ Label retry;
+ Label done;
+ __ dmb(ISH);
+ __ bind(&retry);
+ __ ldrexd(result_low, result_high, dst_addr);
+ __ cmp(result_low, expected_low);
+ __ b(ne, &done);
+ __ cmp(result_high, expected_high);
+ __ b(ne, &done);
+ __ strexd(store_result, new_value_low, new_value_high, dst_addr);
+ __ cmp(store_result, Operand(0));
+ __ b(ne, &retry);
+ __ dmb(ISH);
+ __ bind(&done);
+
+ LiftoffAssembler::ParallelRegisterMoveTuple reg_moves_result[]{
+ {result, LiftoffRegister::ForPair(result_low, result_high), kWasmI64}};
+ __ ParallelRegisterMove(ArrayVector(reg_moves_result));
+}
+#undef __
+} // namespace liftoff
+
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
-}
-
-void LiftoffAssembler::AtomicFence() { dmb(ISH); }
+ if (type.value() == StoreType::kI64Store) {
+ liftoff::AtomicI64CompareExchange(this, dst_addr, offset_reg, offset_imm,
+ expected, new_value, result);
+ return;
+ }
-void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx,
- ValueType type) {
- int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
- MemOperand src(fp, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- ldr(dst.gp(), src);
- break;
- case ValueType::kI64:
- ldr(dst.low_gp(), src);
- ldr(dst.high_gp(), MemOperand(fp, offset + kSystemPointerSize));
- break;
- case ValueType::kF32:
- vldr(liftoff::GetFloatRegister(dst.fp()), src);
+ // The other versions of CompareExchange can share code, but need special load
+ // and store instructions.
+ void (Assembler::*load)(Register, Register, Condition) = nullptr;
+ void (Assembler::*store)(Register, Register, Register, Condition) = nullptr;
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst_addr, offset_reg);
+ // We need to remember the high word of {result}, so we can set it to zero in
+ // the end if necessary.
+ Register result_high = no_reg;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ result_high = result.high_gp();
+ result = result.low();
+ new_value = new_value.low();
+ expected = expected.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ load = &Assembler::ldrexb;
+ store = &Assembler::strexb;
+ // We have to clear the high bits of {expected}, as we can only do a
+ // 32-bit comparison. If the {expected} register is used, we spill it
+ // first.
+ if (cache_state()->is_used(expected)) {
+ SpillRegister(expected);
+ }
+ uxtb(expected.gp(), expected.gp());
break;
- case ValueType::kF64:
- vldr(dst.fp(), src);
+ case StoreType::kI64Store16:
+ result_high = result.high_gp();
+ result = result.low();
+ new_value = new_value.low();
+ expected = expected.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store16:
+ load = &Assembler::ldrexh;
+ store = &Assembler::strexh;
+ // We have to clear the high bits of {expected}, as we can only do a
+ // 32-bit comparison. If the {expected} register is used, we spill it
+ // first.
+ if (cache_state()->is_used(expected)) {
+ SpillRegister(expected);
+ }
+ uxth(expected.gp(), expected.gp());
break;
- case ValueType::kS128: {
- UseScratchRegisterScope temps(this);
- Register addr = liftoff::CalculateActualAddress(this, &temps, src.rn(),
- no_reg, src.offset());
- vld1(Neon8, NeonListOperand(dst.low_fp(), 2), NeonMemOperand(addr));
+ case StoreType::kI64Store32:
+ result_high = result.high_gp();
+ result = result.low();
+ new_value = new_value.low();
+ expected = expected.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ load = &Assembler::ldrex;
+ store = &Assembler::strex;
break;
- }
default:
UNREACHABLE();
}
+ pinned.set(new_value);
+ pinned.set(expected);
+
+ Register result_reg = result.gp();
+ if (pinned.has(result)) {
+ result_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+ pinned.set(LiftoffRegister(result));
+ Register store_result = GetUnusedRegister(kGpReg, pinned).gp();
+
+ UseScratchRegisterScope temps(this);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, &temps, dst_addr, offset_reg, offset_imm);
+
+ Label retry;
+ Label done;
+ dmb(ISH);
+ bind(&retry);
+ (this->*load)(result.gp(), actual_addr, al);
+ cmp(result.gp(), expected.gp());
+ b(ne, &done);
+ (this->*store)(store_result, new_value.gp(), actual_addr, al);
+ cmp(store_result, Operand(0));
+ b(ne, &retry);
+ dmb(ISH);
+ bind(&done);
+
+ if (result.gp() != result_reg) {
+ mov(result.gp(), result_reg);
+ }
+ if (result_high != no_reg) {
+ LoadConstant(LiftoffRegister(result_high), WasmValue(0));
+ }
+}
+
+void LiftoffAssembler::AtomicFence() { dmb(ISH); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ MemOperand src(fp, (caller_slot_idx + 1) * kSystemPointerSize);
+ liftoff::Load(this, dst, src, type);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ MemOperand dst(fp, (caller_slot_idx + 1) * kSystemPointerSize);
+ liftoff::Store(this, src, dst, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
@@ -656,32 +1191,16 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+#ifdef DEBUG
+ // The {str} instruction needs a temp register when the immediate in the
+ // provided MemOperand does not fit into 12 bits. This happens for large stack
+ // frames. This DCHECK checks that the temp register is available when needed.
+ DCHECK(UseScratchRegisterScope{this}.CanAcquire());
+#endif
+ DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
- MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- str(reg.gp(), dst);
- break;
- case ValueType::kI64:
- str(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
- str(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
- break;
- case ValueType::kF32:
- vstr(liftoff::GetFloatRegister(reg.fp()), dst);
- break;
- case ValueType::kF64:
- vstr(reg.fp(), dst);
- break;
- case ValueType::kS128: {
- UseScratchRegisterScope temps(this);
- Register addr = liftoff::CalculateActualAddress(this, &temps, dst.rn(),
- no_reg, dst.offset());
- vst1(Neon8, NeonListOperand(reg.low_fp(), 2), NeonMemOperand(addr));
- break;
- }
- default:
- UNREACHABLE();
- }
+ MemOperand dst(fp, -offset);
+ liftoff::Store(this, reg, dst, type);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
@@ -717,32 +1236,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- ldr(reg.gp(), liftoff::GetStackSlot(offset));
- break;
- case ValueType::kI64:
- ldr(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
- ldr(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
- break;
- case ValueType::kF32:
- vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(offset));
- break;
- case ValueType::kF64:
- vldr(reg.fp(), liftoff::GetStackSlot(offset));
- break;
- case ValueType::kS128: {
- // Get memory address of slot to fill from.
- MemOperand slot = liftoff::GetStackSlot(offset);
- UseScratchRegisterScope temps(this);
- Register addr = liftoff::CalculateActualAddress(this, &temps, slot.rn(),
- no_reg, slot.offset());
- vld1(Neon8, NeonListOperand(reg.low_fp(), 2), NeonMemOperand(addr));
- break;
- }
- default:
- UNREACHABLE();
- }
+ liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
@@ -791,27 +1285,27 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
Register rhs) { \
instruction(dst, lhs, rhs); \
}
-#define I32_BINOP_I(name, instruction) \
- I32_BINOP(name, instruction) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- int32_t imm) { \
- instruction(dst, lhs, Operand(imm)); \
- }
-#define I32_SHIFTOP(name, instruction) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- UseScratchRegisterScope temps(this); \
- Register scratch = temps.Acquire(); \
- and_(scratch, amount, Operand(0x1f)); \
- instruction(dst, src, Operand(scratch)); \
- } \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- int32_t amount) { \
- if (V8_LIKELY((amount & 31) != 0)) { \
- instruction(dst, src, Operand(amount & 31)); \
- } else if (dst != src) { \
- mov(dst, src); \
- } \
+#define I32_BINOP_I(name, instruction) \
+ I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount) { \
+ UseScratchRegisterScope temps(this); \
+ Register scratch = temps.Acquire(); \
+ and_(scratch, amount, Operand(0x1f)); \
+ instruction(dst, src, Operand(scratch)); \
+ } \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
+ int32_t amount) { \
+ if (V8_LIKELY((amount & 31) != 0)) { \
+ instruction(dst, src, Operand(amount & 31)); \
+ } else if (dst != src) { \
+ mov(dst, src); \
+ } \
}
#define FP32_UNOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
@@ -997,8 +1491,8 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
@@ -1057,8 +1551,8 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount);
}
-void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
UseScratchRegisterScope temps(this);
// {src.low_gp()} will still be needed after writing {dst.high_gp()}.
Register src_low =
@@ -1072,8 +1566,8 @@ void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount);
}
-void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
UseScratchRegisterScope temps(this);
// {src.high_gp()} will still be needed after writing {dst.low_gp()}.
Register src_high =
@@ -1087,8 +1581,8 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount);
}
-void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
UseScratchRegisterScope temps(this);
// {src.high_gp()} will still be needed after writing {dst.low_gp()}.
Register src_high =
@@ -1562,17 +2056,33 @@ void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.fp(), liftoff::GetSimd128Register(lhs.low_fp()),
- imm_lane_idx);
+ ExtractLane(dst.fp(), liftoff::GetSimd128Register(lhs), imm_lane_idx);
}
void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(src1.low_fp()), src2.fp(),
- imm_lane_idx);
+ ReplaceLane(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1), src2.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vabs(dst.low_fp(), src.low_fp());
+ vabs(dst.high_fp(), src.high_fp());
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vneg(dst.low_fp(), src.low_fp());
+ vneg(dst.high_fp(), src.high_fp());
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vsqrt(dst.low_fp(), src.low_fp());
+ vsqrt(dst.high_fp(), src.high_fp());
}
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1589,52 +2099,138 @@ void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "f64x2mul");
+ vmul(dst.low_fp(), lhs.low_fp(), rhs.low_fp());
+ vmul(dst.high_fp(), lhs.high_fp(), rhs.high_fp());
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vdiv(dst.low_fp(), lhs.low_fp(), rhs.low_fp());
+ vdiv(dst.high_fp(), lhs.high_fp(), rhs.high_fp());
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ Simd128Register left = liftoff::GetSimd128Register(lhs);
+ Simd128Register right = liftoff::GetSimd128Register(rhs);
+
+ liftoff::EmitFloatMinOrMax(this, dest.low(), left.low(), right.low(),
+ liftoff::MinOrMax::kMin);
+ liftoff::EmitFloatMinOrMax(this, dest.high(), left.high(), right.high(),
+ liftoff::MinOrMax::kMin);
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ Simd128Register left = liftoff::GetSimd128Register(lhs);
+ Simd128Register right = liftoff::GetSimd128Register(rhs);
+
+ liftoff::EmitFloatMinOrMax(this, dest.low(), left.low(), right.low(),
+ liftoff::MinOrMax::kMax);
+ liftoff::EmitFloatMinOrMax(this, dest.high(), left.high(), right.high(),
+ liftoff::MinOrMax::kMax);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- vdup(Neon32, liftoff::GetSimd128Register(dst.low_fp()), src.fp(), 0);
+ vdup(Neon32, liftoff::GetSimd128Register(dst), src.fp(), 0);
}
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
ExtractLane(liftoff::GetFloatRegister(dst.fp()),
- liftoff::GetSimd128Register(lhs.low_fp()), imm_lane_idx);
+ liftoff::GetSimd128Register(lhs), imm_lane_idx);
}
void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(src1.low_fp()),
+ ReplaceLane(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1),
liftoff::GetFloatRegister(src2.fp()), imm_lane_idx);
}
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vabs(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vneg(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // The list of d registers available to us is from d0 to d15, which always
+ // maps to 2 s registers.
+ LowDwVfpRegister dst_low = LowDwVfpRegister::from_code(dst.low_fp().code());
+ LowDwVfpRegister src_low = LowDwVfpRegister::from_code(src.low_fp().code());
+
+ LowDwVfpRegister dst_high = LowDwVfpRegister::from_code(dst.high_fp().code());
+ LowDwVfpRegister src_high = LowDwVfpRegister::from_code(src.high_fp().code());
+
+ vsqrt(dst_low.low(), src_low.low());
+ vsqrt(dst_low.high(), src_low.high());
+ vsqrt(dst_high.low(), src_high.low());
+ vsqrt(dst_high.high(), src_high.high());
+}
+
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vadd(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vadd(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vsub(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vsub(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "f32x4mul");
+ vmul(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // The list of d registers available to us is from d0 to d15, which always
+ // maps to 2 s registers.
+ LowDwVfpRegister dst_low = LowDwVfpRegister::from_code(dst.low_fp().code());
+ LowDwVfpRegister lhs_low = LowDwVfpRegister::from_code(lhs.low_fp().code());
+ LowDwVfpRegister rhs_low = LowDwVfpRegister::from_code(rhs.low_fp().code());
+
+ LowDwVfpRegister dst_high = LowDwVfpRegister::from_code(dst.high_fp().code());
+ LowDwVfpRegister lhs_high = LowDwVfpRegister::from_code(lhs.high_fp().code());
+ LowDwVfpRegister rhs_high = LowDwVfpRegister::from_code(rhs.high_fp().code());
+
+ vdiv(dst_low.low(), lhs_low.low(), rhs_low.low());
+ vdiv(dst_low.high(), lhs_low.high(), rhs_low.high());
+ vdiv(dst_high.low(), lhs_high.low(), rhs_high.low());
+ vdiv(dst_high.high(), lhs_high.high(), rhs_high.high());
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Simd128Register dst_simd = liftoff::GetSimd128Register(dst.low_fp());
+ Simd128Register dst_simd = liftoff::GetSimd128Register(dst);
vdup(Neon32, dst_simd, src.low_gp());
ReplaceLane(dst_simd, dst_simd, src.high_gp(), NeonS32, 1);
ReplaceLane(dst_simd, dst_simd, src.high_gp(), NeonS32, 3);
@@ -1643,9 +2239,9 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.low_gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS32,
+ ExtractLane(dst.low_gp(), liftoff::GetSimd128Register(lhs), NeonS32,
imm_lane_idx * 2);
- ExtractLane(dst.high_gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS32,
+ ExtractLane(dst.high_gp(), liftoff::GetSimd128Register(lhs), NeonS32,
imm_lane_idx * 2 + 1);
}
@@ -1653,41 +2249,97 @@ void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- Simd128Register dst_simd = liftoff::GetSimd128Register(dst.low_fp());
- Simd128Register src1_simd = liftoff::GetSimd128Register(src1.low_fp());
+ Simd128Register dst_simd = liftoff::GetSimd128Register(dst);
+ Simd128Register src1_simd = liftoff::GetSimd128Register(src1);
ReplaceLane(dst_simd, src1_simd, src2.low_gp(), NeonS32, imm_lane_idx * 2);
ReplaceLane(dst_simd, dst_simd, src2.high_gp(), NeonS32,
imm_lane_idx * 2 + 1);
}
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ QwNeonRegister zero =
+ dst == src ? temps.AcquireQ() : liftoff::GetSimd128Register(dst);
+ vmov(zero, uint64_t{0});
+ vqsub(NeonS64, liftoff::GetSimd128Register(dst), zero,
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i64x2_shli");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vadd(Neon64, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vadd(Neon64, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vsub(Neon64, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vsub(Neon64, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2mul");
+ UseScratchRegisterScope temps(this);
+
+ QwNeonRegister dst_neon = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+
+ // These temporary registers will be modified. We can directly modify lhs and
+ // rhs if they are not uesd, saving on temporaries.
+ QwNeonRegister tmp1 = left;
+ QwNeonRegister tmp2 = right;
+
+ if (cache_state()->is_used(lhs) && cache_state()->is_used(rhs)) {
+ tmp1 = temps.AcquireQ();
+ // We only have 1 scratch Q register, so acquire another ourselves.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ tmp2 = liftoff::GetSimd128Register(unused_pair);
+ } else if (cache_state()->is_used(lhs)) {
+ tmp1 = temps.AcquireQ();
+ } else if (cache_state()->is_used(rhs)) {
+ tmp2 = temps.AcquireQ();
+ }
+
+ // Algorithm from code-generator-arm.cc, refer to comments there for details.
+ if (tmp1 != left) {
+ vmov(tmp1, left);
+ }
+ if (tmp2 != right) {
+ vmov(tmp2, right);
+ }
+
+ vtrn(Neon32, tmp1.low(), tmp1.high());
+ vtrn(Neon32, tmp2.low(), tmp2.high());
+
+ vmull(NeonU32, dst_neon, tmp1.low(), tmp2.high());
+ vmlal(NeonU32, dst_neon, tmp1.high(), tmp2.low());
+ vshl(NeonU64, dst_neon, dst_neon, 32);
+
+ vmlal(NeonU32, dst_neon, tmp1.low(), tmp2.low());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- vdup(Neon32, liftoff::GetSimd128Register(dst.low_fp()), src.gp());
+ vdup(Neon32, liftoff::GetSimd128Register(dst), src.gp());
}
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS32,
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs), NeonS32,
imm_lane_idx);
}
@@ -1695,65 +2347,179 @@ void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(src1.low_fp()), src2.gp(), NeonS32,
+ ReplaceLane(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1), src2.gp(), NeonS32,
imm_lane_idx);
}
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vneg(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i32x4_shli");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vadd(Neon32, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vadd(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vsub(Neon32, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vsub(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4mul");
+ vmul(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(NeonS32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(NeonU32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(NeonS32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(NeonU32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- vdup(Neon16, liftoff::GetSimd128Register(dst.low_fp()), src.gp());
+ vdup(Neon16, liftoff::GetSimd128Register(dst), src.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vneg(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i16x8_shli");
}
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vadd(Neon16, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vadd(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqadd(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vsub(Neon16, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vsub(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqsub(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqsub(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i16x8mul");
+ vmul(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqadd(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonU16,
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs), NeonU16,
imm_lane_idx);
}
void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS16,
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs), NeonS16,
imm_lane_idx);
}
@@ -1761,56 +2527,417 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(src1.low_fp()), src2.gp(), NeonS16,
+ ReplaceLane(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1), src2.gp(), NeonS16,
imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- vdup(Neon8, liftoff::GetSimd128Register(dst.low_fp()), src.gp());
+ vdup(Neon8, liftoff::GetSimd128Register(dst), src.gp());
}
void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonU8,
- imm_lane_idx);
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs), NeonU8, imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs.low_fp()), NeonS8,
- imm_lane_idx);
+ ExtractLane(dst.gp(), liftoff::GetSimd128Register(lhs), NeonS8, imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- ReplaceLane(liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(src1.low_fp()), src2.gp(), NeonS8,
+ ReplaceLane(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1), src2.gp(), NeonS8,
imm_lane_idx);
}
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vneg(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i8x16_shli");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vadd(Neon8, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vadd(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqadd(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- vsub(Neon8, liftoff::GetSimd128Register(dst.low_fp()),
- liftoff::GetSimd128Register(lhs.low_fp()),
- liftoff::GetSimd128Register(rhs.low_fp()));
+ vsub(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqsub(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqsub(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i8x16mul");
+ vmul(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vqadd(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmin(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vmax(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+ vmvn(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(dst));
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+ vmvn(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(dst));
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+ vmvn(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(dst));
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(NeonS32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(NeonU32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(NeonS32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(NeonU32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vceq(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+ vmvn(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(dst));
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcgt(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(rhs),
+ liftoff::GetSimd128Register(lhs));
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vcge(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(rhs),
+ liftoff::GetSimd128Register(lhs));
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::F64x2Compare(this, dst, lhs, rhs, eq);
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::F64x2Compare(this, dst, lhs, rhs, ne);
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::F64x2Compare(this, dst, lhs, rhs, lt);
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::F64x2Compare(this, dst, lhs, rhs, le);
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ vmvn(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vand(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vorr(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ veor(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ if (dst != mask) {
+ vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(mask));
+ }
+ vbsl(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src1),
+ liftoff::GetSimd128Register(src2));
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::S128NarrowOp(this, NeonS8, NeonS8, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::S128NarrowOp(this, NeonU8, NeonS8, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::S128NarrowOp(this, NeonS16, NeonS16, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::S128NarrowOp(this, NeonU16, NeonS16, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS8, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS8, liftoff::GetSimd128Register(dst), src.high_fp());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU8, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU8, liftoff::GetSimd128Register(dst), src.high_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS16, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS16, liftoff::GetSimd128Register(dst), src.high_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU16, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU16, liftoff::GetSimd128Register(dst), src.high_fp());
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vbic(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vrhadd(NeonU8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ vrhadd(NeonU16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vabs(Neon8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vabs(Neon16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vabs(Neon32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -2038,7 +3165,7 @@ void LiftoffStackSlots::Construct() {
asm_->vpush(src.reg().fp());
break;
case ValueType::kS128:
- asm_->vpush(liftoff::GetSimd128Register(src.reg().low_fp()));
+ asm_->vpush(liftoff::GetSimd128Register(src.reg()));
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index bcf78184b1..9c142e4ad0 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -27,7 +27,7 @@ namespace liftoff {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM_COMPILED |
+// -1 | 0xa: WASM |
// -2 | instance |
// -----+--------------------+---------------------------
// -3 | slot 0 | ^
@@ -41,7 +41,9 @@ namespace liftoff {
constexpr int kInstanceOffset = 2 * kSystemPointerSize;
-inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+inline MemOperand GetStackSlot(int offset) {
+ return MemOperand(offset > 0 ? fp : sp, -offset);
+}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
@@ -354,37 +356,38 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
@@ -404,6 +407,13 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
+ Str(liftoff::GetRegFromType(src, type), MemOperand(fp, offset));
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
UseScratchRegisterScope temps(this);
@@ -478,13 +488,21 @@ void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
}
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ // Zero 'size' bytes *below* start, byte at offset 'start' is untouched.
+ DCHECK_LE(0, start);
DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
int max_stp_offset = -start - size;
+ // We check IsImmLSUnscaled(-start-12) because str only allows for unscaled
+ // 9-bit immediate offset [-256,256]. If start is large enough, which can
+ // happen when a function has many params (>=32 i64), str cannot be encoded
+ // properly. We can use Str, which will generate more instructions, so
+ // fallback to the general case below.
if (size <= 12 * kStackSlotSize &&
- IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
+ IsImmLSPair(max_stp_offset, kXRegSizeLog2) &&
+ IsImmLSUnscaled(-start - 12)) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per two slots (<= 7 instructions total).
STATIC_ASSERT(kStackSlotSize == kSystemPointerSize);
@@ -532,22 +550,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
Register rhs) { \
instruction(dst.W(), lhs.W(), rhs.W()); \
}
-#define I32_BINOP_I(name, instruction) \
- I32_BINOP(name, instruction) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- int32_t imm) { \
- instruction(dst.W(), lhs.W(), Immediate(imm)); \
+#define I32_BINOP_I(name, instruction) \
+ I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst.W(), lhs.W(), Immediate(imm)); \
}
#define I64_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
instruction(dst.gp().X(), lhs.gp().X(), rhs.gp().X()); \
}
-#define I64_BINOP_I(name, instruction) \
- I64_BINOP(name, instruction) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- int32_t imm) { \
- instruction(dst.gp().X(), lhs.gp().X(), imm); \
+#define I64_BINOP_I(name, instruction) \
+ I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
+ LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp().X(), lhs.gp().X(), imm); \
}
#define FP32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
@@ -577,22 +595,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
instruction(dst.D(), src.D()); \
return true; \
}
-#define I32_SHIFTOP(name, instruction) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- instruction(dst.W(), src.W(), amount.W()); \
- } \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- int32_t amount) { \
- instruction(dst.W(), src.W(), amount & 31); \
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst.W(), src.W(), amount.W()); \
+ } \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
+ int32_t amount) { \
+ instruction(dst.W(), src.W(), amount & 31); \
}
#define I64_SHIFTOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount) { \
instruction(dst.gp().X(), src.gp().X(), amount.X()); \
} \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- int32_t amount) { \
+ void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int32_t amount) { \
instruction(dst.gp().X(), src.gp().X(), amount & 63); \
}
@@ -1105,6 +1123,21 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
Mov(dst.fp().V2D(), imm_lane_idx, src2.fp().V2D(), 0);
}
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fabs(dst.fp().V2D(), src.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fneg(dst.fp().V2D(), src.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fsqrt(dst.fp().V2D(), src.fp().V2D());
+}
+
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Fadd(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
@@ -1117,7 +1150,22 @@ void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "f64x2mul");
+ Fmul(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fdiv(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fmin(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fmax(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -1141,6 +1189,21 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
Mov(dst.fp().V4S(), imm_lane_idx, src2.fp().V4S(), 0);
}
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fabs(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fneg(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fsqrt(dst.fp().V4S(), src.fp().V4S());
+}
+
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Fadd(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
@@ -1153,7 +1216,22 @@ void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "f32x4mul");
+ Fmul(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fdiv(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fmin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fmax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
@@ -1177,6 +1255,21 @@ void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
Mov(dst.fp().V2D(), imm_lane_idx, src2.gp().X());
}
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Neg(dst.fp().V2D(), src.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i64x2_shli");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Add(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
@@ -1189,7 +1282,23 @@ void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2mul");
+ UseScratchRegisterScope temps(this);
+ VRegister tmp1 = temps.AcquireV(kFormat2D);
+ VRegister tmp2 = temps.AcquireV(kFormat2D);
+
+ // Algorithm copied from code-generator-arm64.cc with minor modifications:
+ // - 2 (max number of scratch registers in Liftoff) temporaries instead of 3
+ // - 1 more Umull instruction to calculate | cg | ae |,
+ // - so, we can no longer use Umlal in the last step, and use Add instead.
+ // Refer to comments there for details.
+ Xtn(tmp1.V2S(), lhs.fp().V2D());
+ Xtn(tmp2.V2S(), rhs.fp().V2D());
+ Umull(tmp1.V2D(), tmp1.V2S(), tmp2.V2S());
+ Rev64(tmp2.V4S(), rhs.fp().V4S());
+ Mul(tmp2.V4S(), tmp2.V4S(), lhs.fp().V4S());
+ Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S());
+ Shll(dst.fp().V2D(), tmp2.V2S(), 32);
+ Add(dst.fp().V2D(), dst.fp().V2D(), tmp1.V2D());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -1213,6 +1322,21 @@ void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
Mov(dst.fp().V4S(), imm_lane_idx, src2.gp().W());
}
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Neg(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i32x4_shli");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Add(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
@@ -1225,7 +1349,31 @@ void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4mul");
+ Mul(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Smin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Umin(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Smax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Umax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
@@ -1255,19 +1403,82 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
Mov(dst.fp().V8H(), imm_lane_idx, src2.gp().W());
}
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Neg(dst.fp().V8H(), src.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i16x8_shli");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Add(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Sub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Uqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i16x8mul");
+ Mul(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Uqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Smin(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Umin(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Smax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Umax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -1297,19 +1508,375 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
Mov(dst.fp().V16B(), imm_lane_idx, src2.gp().W());
}
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Neg(dst.fp().V16B(), src.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i8x16_shli");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Add(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Sub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Sqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Uqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i8x16mul");
+ Mul(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Uqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Smin(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Umin(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Smax(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Umax(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+ Mvn(dst.fp().V16B(), dst.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmgt(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmhi(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmge(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmhs(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+ Mvn(dst.fp().V8H(), dst.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmgt(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmhi(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmge(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmhs(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+ Mvn(dst.fp().V4S(), dst.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmgt(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmhi(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmge(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmhs(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
+ Mvn(dst.fp().V4S(), dst.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmgt(dst.fp().V4S(), rhs.fp().V4S(), lhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmge(dst.fp().V4S(), rhs.fp().V4S(), lhs.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+ Mvn(dst.fp().V2D(), dst.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmgt(dst.fp().V2D(), rhs.fp().V2D(), lhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Fcmge(dst.fp().V2D(), rhs.fp().V2D(), lhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ Mvn(dst.fp().V16B(), src.fp().V16B());
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ And(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Orr(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Eor(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ if (dst != mask) {
+ Mov(dst.fp().V16B(), mask.fp().V16B());
+ }
+ Bsl(dst.fp().V16B(), src1.fp().V16B(), src2.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireV(kFormat8H);
+ VRegister right = rhs.fp().V8H();
+ if (dst == rhs) {
+ Mov(tmp, right);
+ right = tmp;
+ }
+ Sqxtn(dst.fp().V8B(), lhs.fp().V8H());
+ Sqxtn2(dst.fp().V16B(), right);
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireV(kFormat8H);
+ VRegister right = rhs.fp().V8H();
+ if (dst == rhs) {
+ Mov(tmp, right);
+ right = tmp;
+ }
+ Sqxtun(dst.fp().V8B(), lhs.fp().V8H());
+ Sqxtun2(dst.fp().V16B(), right);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireV(kFormat4S);
+ VRegister right = rhs.fp().V4S();
+ if (dst == rhs) {
+ Mov(tmp, right);
+ right = tmp;
+ }
+ Sqxtn(dst.fp().V4H(), lhs.fp().V4S());
+ Sqxtn2(dst.fp().V8H(), right);
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireV(kFormat4S);
+ VRegister right = rhs.fp().V4S();
+ if (dst == rhs) {
+ Mov(tmp, right);
+ right = tmp;
+ }
+ Sqxtun(dst.fp().V4H(), lhs.fp().V4S());
+ Sqxtun2(dst.fp().V8H(), right);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl(dst.fp().V8H(), src.fp().V8B());
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl2(dst.fp().V8H(), src.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl(dst.fp().V8H(), src.fp().V8B());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl2(dst.fp().V8H(), src.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl(dst.fp().V4S(), src.fp().V4H());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl2(dst.fp().V4S(), src.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl(dst.fp().V4S(), src.fp().V4H());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl2(dst.fp().V4S(), src.fp().V8H());
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Bic(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Urhadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Urhadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Abs(dst.fp().V16B(), src.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Abs(dst.fp().V8H(), src.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Abs(dst.fp().V4S(), src.fp().V4S());
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 0172b282dc..7a1d629bf2 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -23,12 +23,14 @@ namespace liftoff {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter.
constexpr int kInstanceOffset = 8;
-inline Operand GetStackSlot(int offset) { return Operand(ebp, -offset); }
+inline Operand GetStackSlot(int offset) {
+ return Operand(offset > 0 ? ebp : esp, -offset);
+}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
- return Operand(ebp, -offset + half_offset);
+ return Operand(offset > 0 ? ebp : esp, -offset + half_offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
@@ -491,37 +493,38 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
@@ -541,6 +544,13 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
type);
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ liftoff::Store(this, ebp, kSystemPointerSize * (caller_slot_idx + 1), src,
+ type);
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
if (needs_gp_reg_pair(type)) {
@@ -686,7 +696,7 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_addi(Register dst, Register lhs, int32_t imm) {
if (lhs != dst) {
lea(dst, Operand(lhs, imm));
} else {
@@ -829,7 +839,7 @@ void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::and_>(this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_andi(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::and_>(this, dst, lhs, imm);
}
@@ -837,7 +847,7 @@ void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::or_>(this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_ori(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::or_>(this, dst, lhs, imm);
}
@@ -845,7 +855,7 @@ void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xor_>(this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::xor_>(this, dst, lhs, imm);
}
@@ -891,8 +901,8 @@ void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
liftoff::EmitShiftOperation(this, dst, src, amount, &Assembler::shl_cl);
}
-void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
- int32_t amount) {
+void LiftoffAssembler::emit_i32_shli(Register dst, Register src,
+ int32_t amount) {
if (dst != src) mov(dst, src);
shl(dst, amount & 31);
}
@@ -902,8 +912,8 @@ void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
liftoff::EmitShiftOperation(this, dst, src, amount, &Assembler::sar_cl);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
- int32_t amount) {
+void LiftoffAssembler::emit_i32_sari(Register dst, Register src,
+ int32_t amount) {
if (dst != src) mov(dst, src);
sar(dst, amount & 31);
}
@@ -913,8 +923,8 @@ void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
liftoff::EmitShiftOperation(this, dst, src, amount, &Assembler::shr_cl);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
- int32_t amount) {
+void LiftoffAssembler::emit_i32_shri(Register dst, Register src,
+ int32_t amount) {
if (dst != src) mov(dst, src);
shr(dst, amount & 31);
}
@@ -1001,8 +1011,8 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
@@ -1137,8 +1147,8 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShlPair_cl);
}
-void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
amount &= 63;
if (amount >= 32) {
if (dst.high_gp() != src.low_gp()) mov(dst.high_gp(), src.low_gp());
@@ -1156,8 +1166,8 @@ void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::SarPair_cl);
}
-void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
amount &= 63;
if (amount >= 32) {
if (dst.low_gp() != src.high_gp()) mov(dst.low_gp(), src.high_gp());
@@ -1175,8 +1185,8 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair_cl);
}
-void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
amount &= 63;
if (amount >= 32) {
if (dst.low_gp() != src.high_gp()) mov(dst.low_gp(), src.high_gp());
@@ -1950,14 +1960,19 @@ void EmitSimdCommutativeBinOp(
template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
-void EmitSimdSub(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
+void EmitSimdNonCommutativeBinOp(
+ LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs, base::Optional<CpuFeature> feature = base::nullopt) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
(assm->*avx_op)(dst.fp(), lhs.fp(), rhs.fp());
- } else if (lhs.fp() == rhs.fp()) {
- assm->pxor(dst.fp(), dst.fp());
- } else if (dst.fp() == rhs.fp()) {
+ return;
+ }
+
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
+ if (dst.fp() == rhs.fp()) {
assm->movaps(kScratchDoubleReg, rhs.fp());
assm->movaps(dst.fp(), lhs.fp());
(assm->*sse_op)(dst.fp(), kScratchDoubleReg);
@@ -1966,68 +1981,69 @@ void EmitSimdSub(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*sse_op)(dst.fp(), rhs.fp());
}
}
-} // namespace liftoff
-void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movddup(dst.fp(), src.fp());
-}
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
+ void (Assembler::*sse_op)(XMMRegister, XMMRegister), uint8_t width>
+void EmitSimdShiftOp(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister operand, LiftoffRegister count) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ LiftoffRegister tmp =
+ assm->GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(count));
+ constexpr int mask = (1 << width) - 1;
-void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
+ assm->mov(tmp.gp(), count.gp());
+ assm->and_(tmp.gp(), Immediate(mask));
+ assm->Movd(kScratchDoubleReg, tmp.gp());
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufpd(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), operand.fp(), kScratchDoubleReg);
} else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufpd(dst.fp(), dst.fp(), imm_lane_idx);
+ if (dst.fp() != operand.fp()) assm->movaps(dst.fp(), operand.fp());
+ (assm->*sse_op)(dst.fp(), kScratchDoubleReg);
}
}
-void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(fanchenk): Use movlhps and blendpd
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, byte),
+ void (Assembler::*sse_op)(XMMRegister, byte), uint8_t width>
+void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister operand, int32_t count) {
+ constexpr int mask = (1 << width) - 1;
+ byte shift = static_cast<byte>(count & mask);
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- if (imm_lane_idx == 0) {
- vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00000000);
- vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01010000);
- } else {
- vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00100000);
- vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01110000);
- }
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), operand.fp(), shift);
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- if (imm_lane_idx == 0) {
- insertps(dst.fp(), src2.fp(), 0b00000000);
- insertps(dst.fp(), src2.fp(), 0b01010000);
- } else {
- insertps(dst.fp(), src2.fp(), 0b00100000);
- insertps(dst.fp(), src2.fp(), 0b01110000);
- }
+ if (dst.fp() != operand.fp()) assm->movaps(dst.fp(), operand.fp());
+ (assm->*sse_op)(dst.fp(), shift);
}
}
+} // namespace liftoff
-void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddpd, &Assembler::addpd>(
- this, dst, lhs, rhs);
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
}
-void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vsubpd, &Assembler::subpd>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshuflw(dst.fp(), dst.fp(), 0);
+ Pshufd(dst.fp(), dst.fp(), 0);
}
-void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulpd, &Assembler::mulpd>(
- this, dst, lhs, rhs);
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshufd(dst.fp(), dst.fp(), 0);
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pinsrd(dst.fp(), src.low_gp(), 0);
+ Pinsrd(dst.fp(), src.high_gp(), 1);
+ Pshufd(dst.fp(), dst.fp(), 0x44);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -2043,80 +2059,670 @@ void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movddup(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqb, &Assembler::pcmpeqb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqb, &Assembler::pcmpeqb>(
+ this, dst, lhs, rhs);
+ Pcmpeqb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpcmpgtb,
+ &Assembler::pcmpgtb>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxub, &Assembler::pmaxub>(
+ this, dst, lhs, rhs);
+ Pcmpeqb(dst.fp(), ref);
+ Pcmpeqb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsb, &Assembler::pminsb>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqb(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminub, &Assembler::pminub>(
+ this, dst, lhs, rhs);
+ Pcmpeqb(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqw, &Assembler::pcmpeqw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqw, &Assembler::pcmpeqw>(
+ this, dst, lhs, rhs);
+ Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpcmpgtw,
+ &Assembler::pcmpgtw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxuw, &Assembler::pmaxuw>(
+ this, dst, lhs, rhs);
+ Pcmpeqw(dst.fp(), ref);
+ Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsw, &Assembler::pminsw>(
+ this, dst, lhs, rhs);
+ Pcmpeqw(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminuw, &Assembler::pminuw>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqw(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqd, &Assembler::pcmpeqd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqd, &Assembler::pcmpeqd>(
+ this, dst, lhs, rhs);
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpcmpgtd,
+ &Assembler::pcmpgtd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxud, &Assembler::pmaxud>(
+ this, dst, lhs, rhs);
+ Pcmpeqd(dst.fp(), ref);
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsd, &Assembler::pminsd>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqd(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ ref = liftoff::kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminud, &Assembler::pminud>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqd(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqps, &Assembler::cmpeqps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpneqps,
+ &Assembler::cmpneqps>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmpltps,
+ &Assembler::cmpltps>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmpleps,
+ &Assembler::cmpleps>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqpd, &Assembler::cmpeqpd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpneqpd,
+ &Assembler::cmpneqpd>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmpltpd,
+ &Assembler::cmpltpd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmplepd,
+ &Assembler::cmplepd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ if (dst.fp() != src.fp()) {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Pxor(dst.fp(), src.fp());
+ } else {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpand, &Assembler::pand>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpor, &Assembler::por>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpxor, &Assembler::pxor>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ vxorps(liftoff::kScratchDoubleReg, src1.fp(), src2.fp());
+ vandps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, mask.fp());
+ vxorps(dst.fp(), liftoff::kScratchDoubleReg, src2.fp());
+ } else {
+ movaps(liftoff::kScratchDoubleReg, src1.fp());
+ xorps(liftoff::kScratchDoubleReg, src2.fp());
+ andps(liftoff::kScratchDoubleReg, mask.fp());
+ if (dst.fp() != src2.fp()) movaps(dst.fp(), src2.fp());
+ xorps(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psignb(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pxor(dst.fp(), dst.fp());
+ Psubb(dst.fp(), src.fp());
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp_simd =
+ GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
+ // Mask off the unwanted bits before word-shifting.
+ Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ mov(tmp.gp(), rhs.gp());
+ and_(tmp.gp(), Immediate(7));
+ add(tmp.gp(), Immediate(8));
+ Movd(tmp_simd.fp(), tmp.gp());
+ Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp_simd.fp());
+ Packuswb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
} else {
if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
+ pand(dst.fp(), liftoff::kScratchDoubleReg);
}
+ sub(tmp.gp(), Immediate(8));
+ Movd(tmp_simd.fp(), tmp.gp());
+ Psllw(dst.fp(), dst.fp(), tmp_simd.fp());
}
-void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ LiftoffRegister tmp = GetUnusedRegister(tmp_rc);
+ byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vinsertps(dst.fp(), src1.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ vpsllw(dst.fp(), lhs.fp(), shift);
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- insertps(dst.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ psllw(dst.fp(), shift);
}
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ mov(tmp.gp(), mask);
+ Movd(liftoff::kScratchDoubleReg, tmp.gp());
+ Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
+ Pand(dst.fp(), liftoff::kScratchDoubleReg);
}
-void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddps, &Assembler::addps>(
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vsubps, &Assembler::subps>(this, dst, lhs,
- rhs);
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubb, &Assembler::psubb>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
+ &Assembler::psubusb>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulps, &Assembler::mulps>(
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ //Ā I16x8Ā viewĀ ofĀ I8x16
+ //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
+ //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
+ //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
+ //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
+ vpsrlw(tmp.fp(), lhs.fp(), 8);
+ vpsrlw(liftoff::kScratchDoubleReg, rhs.fp(), 8);
+ //Ā tĀ =Ā I16x8Mul(t0,Ā t1)
+ //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
+ vpmullw(tmp.fp(), tmp.fp(), liftoff::kScratchDoubleReg);
+ //Ā sĀ =Ā leftĀ *Ā 256
+ vpsllw(liftoff::kScratchDoubleReg, lhs.fp(), 8);
+ //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
+ //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
+ vpmullw(dst.fp(), liftoff::kScratchDoubleReg, rhs.fp());
+ //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
+ //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
+ vpsrlw(dst.fp(), dst.fp(), 8);
+ //Ā tĀ =Ā I16x8Shl(t,Ā 8)
+ //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
+ vpsllw(tmp.fp(), tmp.fp(), 8);
+ //Ā dstĀ =Ā I16x8Or(dst,Ā t)
+ //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
+ vpor(dst.fp(), dst.fp(), tmp.fp());
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ //Ā I16x8Ā viewĀ ofĀ I8x16
+ //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
+ //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
+ //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
+ //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
+ movaps(tmp.fp(), dst.fp());
+ movaps(liftoff::kScratchDoubleReg, rhs.fp());
+ psrlw(tmp.fp(), 8);
+ psrlw(liftoff::kScratchDoubleReg, 8);
+ //Ā dstĀ =Ā leftĀ *Ā 256
+ psllw(dst.fp(), 8);
+ //Ā tĀ =Ā I16x8Mul(t,Ā s)
+ //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
+ pmullw(tmp.fp(), liftoff::kScratchDoubleReg);
+ //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
+ //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
+ pmullw(dst.fp(), rhs.fp());
+ //Ā tĀ =Ā I16x8Shl(t,Ā 8)
+ //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
+ psllw(tmp.fp(), 8);
+ //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
+ //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
+ psrlw(dst.fp(), 8);
+ //Ā dstĀ =Ā I16x8Or(dst,Ā t)
+ //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
+ por(dst.fp(), tmp.fp());
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsb, &Assembler::pminsb>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminub, &Assembler::pminub>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Pinsrd(dst.fp(), src.low_gp(), 0);
- Pinsrd(dst.fp(), src.high_gp(), 1);
- Pshufd(dst.fp(), dst.fp(), 0x44);
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxsb, &Assembler::pmaxsb>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
-void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrd(dst.low_gp(), lhs.fp(), imm_lane_idx * 2);
- Pextrd(dst.high_gp(), lhs.fp(), imm_lane_idx * 2 + 1);
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxub, &Assembler::pmaxub>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psignw(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pxor(dst.fp(), dst.fp());
+ Psubw(dst.fp(), src.fp());
+ }
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsllw, &Assembler::psllw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsllw, &Assembler::psllw, 4>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubw, &Assembler::psubw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
+ &Assembler::psubusw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmullw, &Assembler::pmullw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsw, &Assembler::pminsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminuw, &Assembler::pminuw>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxsw, &Assembler::pmaxsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxuw, &Assembler::pmaxuw>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psignd(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pxor(dst.fp(), dst.fp());
+ Psubd(dst.fp(), src.fp());
+ }
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpslld, &Assembler::pslld, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpslld, &Assembler::pslld, 5>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubd, &Assembler::psubd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmulld, &Assembler::pmulld>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsd, &Assembler::pminsd>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminud, &Assembler::pminud>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxsd, &Assembler::pmaxsd>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxud, &Assembler::pmaxud>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ DoubleRegister reg =
+ dst.fp() == src.fp() ? liftoff::kScratchDoubleReg : dst.fp();
+ Pxor(reg, reg);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrd(dst.fp(), src1.fp(), src2.low_gp(), imm_lane_idx * 2);
- vpinsrd(dst.fp(), dst.fp(), src2.high_gp(), imm_lane_idx * 2 + 1);
+ vpsubq(dst.fp(), reg, src.fp());
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- pinsrd(dst.fp(), src2.low_gp(), imm_lane_idx * 2);
- pinsrd(dst.fp(), src2.high_gp(), imm_lane_idx * 2 + 1);
+ psubq(reg, src.fp());
+ if (dst.fp() != reg) movapd(dst.fp(), reg);
}
}
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsllq, &Assembler::psllq, 6>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsllq, &Assembler::psllq, 6>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
@@ -2125,8 +2731,8 @@ void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubq, &Assembler::psubq>(this, dst, lhs,
- rhs);
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubq, &Assembler::psubq>(
+ this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2156,106 +2762,352 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Paddq(dst.fp(), dst.fp(), tmp2.fp());
}
-void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshufd(dst.fp(), dst.fp(), 0);
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 1);
+ Andps(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Psrld(dst.fp(), dst.fp(), 1);
+ Andps(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrd(dst.gp(), lhs.fp(), imm_lane_idx);
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 31);
+ Xorps(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Pslld(dst.fp(), dst.fp(), 31);
+ Xorps(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sqrtps(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddps, &Assembler::addps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vsubps, &Assembler::subps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulps, &Assembler::mulps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vdivps, &Assembler::divps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // The minps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minps in both orders, merge the results, and adjust.
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrd(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ vminps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vminps(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movaps(liftoff::kScratchDoubleReg, src);
+ minps(liftoff::kScratchDoubleReg, dst.fp());
+ minps(dst.fp(), src);
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- pinsrd(dst.fp(), src2.gp(), imm_lane_idx);
+ movaps(liftoff::kScratchDoubleReg, lhs.fp());
+ minps(liftoff::kScratchDoubleReg, rhs.fp());
+ movaps(dst.fp(), rhs.fp());
+ minps(dst.fp(), lhs.fp());
}
+ // propagate -0's and NaNs, which may be non-canonical.
+ Orps(liftoff::kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by quieting and clearing the payload.
+ Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ Orps(liftoff::kScratchDoubleReg, dst.fp());
+ Psrld(dst.fp(), dst.fp(), byte{10});
+ Andnps(dst.fp(), liftoff::kScratchDoubleReg);
}
-void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
- this, dst, lhs, rhs);
+ // The maxps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxps in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vmaxps(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movaps(liftoff::kScratchDoubleReg, src);
+ maxps(liftoff::kScratchDoubleReg, dst.fp());
+ maxps(dst.fp(), src);
+ } else {
+ movaps(liftoff::kScratchDoubleReg, lhs.fp());
+ maxps(liftoff::kScratchDoubleReg, rhs.fp());
+ movaps(dst.fp(), rhs.fp());
+ maxps(dst.fp(), lhs.fp());
+ }
+ // Find discrepancies.
+ Xorps(dst.fp(), liftoff::kScratchDoubleReg);
+ // Propagate NaNs, which may be non-canonical.
+ Orps(liftoff::kScratchDoubleReg, dst.fp());
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ Subps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ Psrld(dst.fp(), dst.fp(), byte{10});
+ Andnps(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 1);
+ Andpd(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Psrlq(dst.fp(), dst.fp(), 1);
+ Andpd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubd, &Assembler::psubd>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 63);
+ Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Psllq(dst.fp(), dst.fp(), 63);
+ Xorpd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sqrtpd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmulld, &Assembler::pmulld>(
- this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddpd, &Assembler::addpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), 0);
- Pshufd(dst.fp(), dst.fp(), 0);
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vsubpd, &Assembler::subpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulpd, &Assembler::mulpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
- movsx_w(dst.gp(), dst.gp());
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vdivpd, &Assembler::divpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the results, and adjust.
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrw(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ vminpd(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vminpd(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movapd(liftoff::kScratchDoubleReg, src);
+ minpd(liftoff::kScratchDoubleReg, dst.fp());
+ minpd(dst.fp(), src);
} else {
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- pinsrw(dst.fp(), src2.gp(), imm_lane_idx);
+ movapd(liftoff::kScratchDoubleReg, lhs.fp());
+ minpd(liftoff::kScratchDoubleReg, rhs.fp());
+ movapd(dst.fp(), rhs.fp());
+ minpd(dst.fp(), lhs.fp());
}
+ // propagate -0's and NaNs, which may be non-canonical.
+ Orpd(liftoff::kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by quieting and clearing the payload.
+ Cmpunordpd(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ Orpd(liftoff::kScratchDoubleReg, dst.fp());
+ Psrlq(dst.fp(), 13);
+ Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
}
-void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
- this, dst, lhs, rhs);
+ // The maxpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxpd in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxpd(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movapd(liftoff::kScratchDoubleReg, src);
+ maxpd(liftoff::kScratchDoubleReg, dst.fp());
+ maxpd(dst.fp(), src);
+ } else {
+ movapd(liftoff::kScratchDoubleReg, lhs.fp());
+ maxpd(liftoff::kScratchDoubleReg, rhs.fp());
+ movapd(dst.fp(), rhs.fp());
+ maxpd(dst.fp(), lhs.fp());
+ }
+ // Find discrepancies.
+ Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
+ // Propagate NaNs, which may be non-canonical.
+ Orpd(liftoff::kScratchDoubleReg, dst.fp());
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ Subpd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ Cmpunordpd(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ Psrlq(dst.fp(), 13);
+ Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpacksswb,
+ &Assembler::packsswb>(this, dst, lhs,
+ rhs);
}
-void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubw, &Assembler::psubw>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpackuswb,
+ &Assembler::packuswb>(this, dst, lhs,
+ rhs);
}
-void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmullw, &Assembler::pmullw>(
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpackssdw,
+ &Assembler::packssdw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpackusdw,
+ &Assembler::packusdw>(this, dst, lhs,
+ rhs, SSE4_1);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxbw(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovsxbw(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxbw(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovzxbw(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxwd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovsxwd(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxwd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovzxwd(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vandnps, &Assembler::andnps>(
+ this, dst, rhs, lhs);
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpavgb, &Assembler::pavgb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpavgw, &Assembler::pavgw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pabsb(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pabsw(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pabsd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Register byte_reg = liftoff::GetTmpByteRegister(this, dst.gp());
+ Pextrb(byte_reg, lhs.fp(), imm_lane_idx);
+ movsx_b(dst.gp(), byte_reg);
}
void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
@@ -2264,11 +3116,54 @@ void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
}
-void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
- movsx_b(dst.gp(), dst.gp());
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsx_w(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrd(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrd(dst.low_gp(), lhs.fp(), imm_lane_idx * 2);
+ Pextrd(dst.high_gp(), lhs.fp(), imm_lane_idx * 2 + 1);
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vshufpd(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ if (imm_lane_idx != 0) shufpd(dst.fp(), dst.fp(), imm_lane_idx);
+ }
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -2285,77 +3180,87 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
- this, dst, lhs, rhs);
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrw(dst.fp(), src2.gp(), imm_lane_idx);
+ }
}
-void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubb, &Assembler::psubb>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrd(dst.fp(), src2.gp(), imm_lane_idx);
+ }
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
- LiftoffRegister tmp =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- //Ā I16x8Ā viewĀ ofĀ I8x16
- //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
- //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
- //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
- //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
- vpsrlw(tmp.fp(), lhs.fp(), 8);
- vpsrlw(liftoff::kScratchDoubleReg, rhs.fp(), 8);
- //Ā tĀ =Ā I16x8Mul(t0,Ā t1)
- //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
- vpmullw(tmp.fp(), tmp.fp(), liftoff::kScratchDoubleReg);
- //Ā sĀ =Ā leftĀ *Ā 256
- vpsllw(liftoff::kScratchDoubleReg, lhs.fp(), 8);
- //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
- //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
- vpmullw(dst.fp(), liftoff::kScratchDoubleReg, rhs.fp());
- //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
- //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
- vpsrlw(dst.fp(), dst.fp(), 8);
- //Ā tĀ =Ā I16x8Shl(t,Ā 8)
- //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
- vpsllw(tmp.fp(), tmp.fp(), 8);
- //Ā dstĀ =Ā I16x8Or(dst,Ā t)
- //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
- vpor(dst.fp(), dst.fp(), tmp.fp());
+ vpinsrd(dst.fp(), src1.fp(), src2.low_gp(), imm_lane_idx * 2);
+ vpinsrd(dst.fp(), dst.fp(), src2.high_gp(), imm_lane_idx * 2 + 1);
} else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- //Ā I16x8Ā viewĀ ofĀ I8x16
- //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
- //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
- //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
- //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
- movaps(tmp.fp(), dst.fp());
- movaps(liftoff::kScratchDoubleReg, rhs.fp());
- psrlw(tmp.fp(), 8);
- psrlw(liftoff::kScratchDoubleReg, 8);
- //Ā dstĀ =Ā leftĀ *Ā 256
- psllw(dst.fp(), 8);
- //Ā tĀ =Ā I16x8Mul(t,Ā s)
- //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
- pmullw(tmp.fp(), liftoff::kScratchDoubleReg);
- //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
- //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
- pmullw(dst.fp(), rhs.fp());
- //Ā tĀ =Ā I16x8Shl(t,Ā 8)
- //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
- psllw(tmp.fp(), 8);
- //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
- //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
- psrlw(dst.fp(), 8);
- //Ā dstĀ =Ā I16x8Or(dst,Ā t)
- //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
- por(dst.fp(), tmp.fp());
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrd(dst.fp(), src2.low_gp(), imm_lane_idx * 2);
+ pinsrd(dst.fp(), src2.high_gp(), imm_lane_idx * 2 + 1);
+ }
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ insertps(dst.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ // TODO(fanchenk): Use movlhps and blendpd
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ if (imm_lane_idx == 0) {
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00000000);
+ vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01010000);
+ } else {
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00100000);
+ vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01110000);
+ }
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ if (imm_lane_idx == 0) {
+ insertps(dst.fp(), src2.fp(), 0b00000000);
+ insertps(dst.fp(), src2.fp(), 0b01010000);
+ } else {
+ insertps(dst.fp(), src2.fp(), 0b00100000);
+ insertps(dst.fp(), src2.fp(), 0b01110000);
+ }
}
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 74df00590f..923d375064 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -12,6 +12,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"
@@ -517,7 +518,6 @@ LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
return reg;
}
case VarState::kRegister:
- cache_state_.dec_used(slot.reg());
return slot.reg();
case VarState::kIntConst: {
RegClass rc =
@@ -530,9 +530,28 @@ LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
UNREACHABLE();
}
+LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
+ RegPairHalf half) {
+ if (slot.is_reg()) {
+ return half == kLowWord ? slot.reg().low() : slot.reg().high();
+ }
+ LiftoffRegister dst = GetUnusedRegister(kGpReg);
+ if (slot.is_stack()) {
+ FillI64Half(dst.gp(), slot.offset(), half);
+ return dst;
+ }
+ DCHECK(slot.is_const());
+ int32_t half_word =
+ static_cast<int32_t>(half == kLowWord ? slot.constant().to_i64()
+ : slot.constant().to_i64() >> 32);
+ LoadConstant(dst, WasmValue(half_word));
+ return dst;
+}
+
LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
+ if (slot.is_reg()) cache_state_.dec_used(slot.reg());
cache_state_.stack_state.pop_back();
return LoadToRegister(slot, pinned);
}
@@ -541,6 +560,7 @@ LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
LiftoffRegList pinned) {
DCHECK_LT(index, cache_state_.stack_state.size());
VarState& slot = cache_state_.stack_state.end()[-1 - index];
+ if (slot.is_reg()) cache_state_.dec_used(slot.reg());
LiftoffRegister reg = LoadToRegister(slot, pinned);
if (!slot.is_reg()) {
slot.MakeRegister(reg);
@@ -548,6 +568,19 @@ LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
return reg;
}
+void LiftoffAssembler::PrepareLoopArgs(int num) {
+ for (int i = 0; i < num; ++i) {
+ VarState& slot = cache_state_.stack_state.end()[-1 - i];
+ if (!slot.is_const()) continue;
+ RegClass rc =
+ kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
+ LiftoffRegister reg = GetUnusedRegister(rc);
+ LoadConstant(reg, slot.constant());
+ slot.MakeRegister(reg);
+ cache_state_.inc_used(reg);
+ }
+}
+
void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
const CacheState& source) {
DCHECK_EQ(source.stack_height(), target.stack_height());
@@ -614,6 +647,24 @@ void LiftoffAssembler::SpillAllRegisters() {
cache_state_.reset_used_registers();
}
+void LiftoffAssembler::ClearRegister(
+ Register reg, std::initializer_list<Register*> possible_uses,
+ LiftoffRegList pinned) {
+ if (cache_state()->is_used(LiftoffRegister(reg))) {
+ SpillRegister(LiftoffRegister(reg));
+ }
+ Register replacement = no_reg;
+ for (Register* use : possible_uses) {
+ if (reg != *use) continue;
+ if (replacement == no_reg) {
+ replacement = GetUnusedRegister(kGpReg, pinned).gp();
+ Move(replacement, reg, LiftoffAssembler::kWasmIntPtr);
+ }
+ // We cannot leave this loop early. There may be multiple uses of {reg}.
+ *use = replacement;
+ }
+}
+
namespace {
void PrepareStackTransfers(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
@@ -645,25 +696,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
DCHECK(!loc.IsAnyRegister());
RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
int reg_code = loc.AsRegister();
-
- // Initialize to anything, will be set in all branches below.
- LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
- if (!kSimpleFPAliasing && type == kWasmF32) {
- // Liftoff assumes a one-to-one mapping between float registers and
- // double registers, and so does not distinguish between f32 and f64
- // registers. The f32 register code must therefore be halved in order
- // to pass the f64 code to Liftoff.
- DCHECK_EQ(0, reg_code % 2);
- reg = LiftoffRegister::from_code(rc, (reg_code / 2));
- } else if (kNeedS128RegPair && type == kWasmS128) {
- // Similarly for double registers and SIMD registers, the SIMD code
- // needs to be doubled to pass the f64 code to Liftoff.
- reg = LiftoffRegister::ForFpPair(
- DoubleRegister::from_code(reg_code * 2));
- } else {
- reg = LiftoffRegister::from_code(rc, reg_code);
- }
-
+ LiftoffRegister reg =
+ LiftoffRegister::from_external_code(rc, type, reg_code);
param_regs->set(reg);
if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
@@ -761,7 +795,6 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
stack_slots.Construct();
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
-
// Pop parameters from the value stack.
cache_state_.stack_state.pop_back(num_params);
@@ -776,36 +809,46 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
void LiftoffAssembler::FinishCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
- const size_t return_count = sig->return_count();
- if (return_count != 0) {
- DCHECK_EQ(1, return_count);
- ValueType return_type = sig->GetReturn(0);
+ // Offset of the current return value relative to the stack pointer.
+ int return_offset = 0;
+ int call_desc_return_idx = 0;
+ for (ValueType return_type : sig->returns()) {
+ DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
const bool needs_gp_pair = needs_gp_reg_pair(return_type);
- const bool needs_fp_pair = needs_fp_reg_pair(return_type);
- DCHECK_EQ(needs_gp_pair ? 2 : 1, call_descriptor->ReturnCount());
- RegClass rc = needs_gp_pair
- ? kGpReg
- : needs_fp_pair ? kFpReg : reg_class_for(return_type);
-#if V8_TARGET_ARCH_ARM
- // If the return register was not d0 for f32, the code value would have to
- // be halved as is done for the parameter registers.
- DCHECK_EQ(call_descriptor->GetReturnLocation(0).AsRegister(), 0);
-#endif
- LiftoffRegister return_reg = LiftoffRegister::from_code(
- rc, call_descriptor->GetReturnLocation(0).AsRegister());
- DCHECK(GetCacheRegList(rc).has(return_reg));
- if (needs_gp_pair) {
- LiftoffRegister high_reg = LiftoffRegister::from_code(
- rc, call_descriptor->GetReturnLocation(1).AsRegister());
- DCHECK(GetCacheRegList(rc).has(high_reg));
- return_reg = LiftoffRegister::ForPair(return_reg.gp(), high_reg.gp());
- } else if (needs_fp_pair) {
- DCHECK_EQ(0, return_reg.fp().code() % 2);
- return_reg = LiftoffRegister::ForFpPair(return_reg.fp());
- }
- DCHECK(!cache_state_.is_used(return_reg));
- PushRegister(return_type, return_reg);
+ const int num_lowered_params = 1 + needs_gp_pair;
+ const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
+ const RegClass rc = reg_class_for(lowered_type);
+ // Initialize to anything, will be set in the loop and used afterwards.
+ LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
+ kGpCacheRegList.GetFirstRegSet()};
+ LiftoffRegList pinned;
+ for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
+ compiler::LinkageLocation loc =
+ call_descriptor->GetReturnLocation(call_desc_return_idx++);
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ reg_pair[pair_idx] = LiftoffRegister::from_external_code(
+ rc, lowered_type, loc.AsRegister());
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
+ Fill(reg_pair[pair_idx], -return_offset, lowered_type);
+ const int type_size = lowered_type.element_size_bytes();
+ const int slot_size = RoundUp<kSystemPointerSize>(type_size);
+ return_offset += slot_size;
+ }
+ if (pair_idx == 0) {
+ pinned.set(reg_pair[0]);
+ }
+ }
+ if (num_lowered_params == 1) {
+ PushRegister(return_type, reg_pair[0]);
+ } else {
+ PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(),
+ reg_pair[1].gp()));
+ }
}
+ RecordUsedSpillOffset(TopSpillOffset() + return_offset);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
@@ -832,26 +875,59 @@ void LiftoffAssembler::ParallelRegisterMove(
}
}
-void LiftoffAssembler::MoveToReturnRegisters(const FunctionSig* sig) {
- // We do not support multi-value yet.
- DCHECK_EQ(1, sig->return_count());
- ValueType return_type = sig->GetReturn(0);
+void LiftoffAssembler::MoveToReturnLocations(
+ const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
+ int call_desc_return_idx = 0;
+ DCHECK_LE(sig->return_count(), cache_state_.stack_height());
+ VarState* slots = cache_state_.stack_state.end() - sig->return_count();
+ // Fill return frame slots first to ensure that all potential spills happen
+ // before we prepare the stack transfers.
+ for (size_t i = 0; i < sig->return_count(); ++i) {
+ ValueType return_type = sig->GetReturn(i);
+ bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ int num_lowered_params = 1 + needs_gp_pair;
+ for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
+ compiler::LinkageLocation loc =
+ descriptor->GetReturnLocation(call_desc_return_idx++);
+ if (loc.IsCallerFrameSlot()) {
+ RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord;
+ VarState& slot = slots[i];
+ LiftoffRegister reg = needs_gp_pair
+ ? LoadI64HalfIntoRegister(slot, half)
+ : LoadToRegister(slot, {});
+ ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
+ StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_type);
+ }
+ }
+ }
+ // Prepare and execute stack transfers.
+ call_desc_return_idx = 0;
StackTransferRecipe stack_transfers(this);
- // Defaults to a gp reg, will be set below if return type is not gp.
- LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
-
- if (needs_gp_reg_pair(return_type)) {
- return_reg =
- LiftoffRegister::ForPair(kGpReturnRegisters[0], kGpReturnRegisters[1]);
- } else if (needs_fp_reg_pair(return_type)) {
- return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
- } else if (reg_class_for(return_type) == kFpReg) {
- return_reg = LiftoffRegister(kFpReturnRegisters[0]);
- } else {
- DCHECK_EQ(kGpReg, reg_class_for(return_type));
+ for (size_t i = 0; i < sig->return_count(); ++i) {
+ ValueType return_type = sig->GetReturn(i);
+ bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ int num_lowered_params = 1 + needs_gp_pair;
+ for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
+ RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord;
+ compiler::LinkageLocation loc =
+ descriptor->GetReturnLocation(call_desc_return_idx++);
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ int reg_code = loc.AsRegister();
+ ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
+ RegClass rc = reg_class_for(lowered_type);
+ LiftoffRegister reg =
+ LiftoffRegister::from_external_code(rc, return_type, reg_code);
+ VarState& slot = slots[i];
+ if (needs_gp_pair) {
+ stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(),
+ half);
+ } else {
+ stack_transfers.LoadIntoRegister(reg, slot, slot.offset());
+ }
+ }
+ }
}
- stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
- cache_state_.stack_state.back().offset());
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -950,12 +1026,15 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
// {clear_used} call below only clears one of them.
cache_state_.dec_used(slot->reg().low());
cache_state_.dec_used(slot->reg().high());
+ cache_state_.last_spilled_regs.set(slot->reg().low());
+ cache_state_.last_spilled_regs.set(slot->reg().high());
}
Spill(slot->offset(), slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
cache_state_.clear_used(reg);
+ cache_state_.last_spilled_regs.set(reg);
}
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 6573ff4aa4..f5190e3678 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -259,7 +259,6 @@ class LiftoffAssembler : public TurboAssembler {
last_spilled_regs = {};
}
LiftoffRegister reg = unspilled.GetFirstRegSet();
- last_spilled_regs.set(reg);
return reg;
}
@@ -297,6 +296,10 @@ class LiftoffAssembler : public TurboAssembler {
// but discarded with {stack_state.pop_back(count)}.
LiftoffRegister PeekToRegister(int index, LiftoffRegList pinned);
+ // Ensure that the loop inputs are either in a register or spilled to the
+ // stack, so that we can merge different values on the back-edge.
+ void PrepareLoopArgs(int num);
+
int NextSpillOffset(ValueType type) {
int offset = TopSpillOffset() + SlotSizeForType(type);
if (NeedsAlignment(type)) {
@@ -351,6 +354,15 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
Register high = GetUnusedRegister(candidates, pinned).gp();
+ if (low.code() > high.code()) {
+ // Establish the invariant that the register of the low word always has
+ // a lower code than the register of the high word. This guarantees that
+ // if a register pair of an input is reused for the result, the low
+ // word and high word registers are not swapped, i.e. the low word
+ // register of the result is not the high word register of the input,
+ // and vice versa.
+ std::swap(low, high);
+ }
return LiftoffRegister::ForPair(low, high);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
// kFpRegPair specific logic here because we need adjacent registers, not
@@ -382,6 +394,22 @@ class LiftoffAssembler : public TurboAssembler {
void SpillLocals();
void SpillAllRegisters();
+ // Clear any uses of {reg} in both the cache and in {possible_uses}.
+ // Any use in the stack is spilled. If any register in {possible_uses} matches
+ // {reg}, then the content of {reg} is moved to a new temporary register, and
+ // all matches in {possible_uses} are rewritten to that temporary register.
+ void ClearRegister(Register reg,
+ std::initializer_list<Register*> possible_uses,
+ LiftoffRegList pinned);
+
+ // Spills all passed registers.
+ template <typename... Regs>
+ void SpillRegisters(Regs... regs) {
+ for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
+ if (cache_state()->is_used(r)) SpillRegister(r);
+ }
+ }
+
// Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillOffset(int offset) {
@@ -418,7 +446,8 @@ class LiftoffAssembler : public TurboAssembler {
};
void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
- void MoveToReturnRegisters(const FunctionSig*);
+ void MoveToReturnLocations(const FunctionSig*,
+ compiler::CallDescriptor* descriptor);
#ifdef ENABLE_SLOW_DCHECKS
// Validate that the register use counts reflect the state of the cache.
@@ -466,28 +495,28 @@ class LiftoffAssembler : public TurboAssembler {
StoreType type, LiftoffRegList pinned);
inline void AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister result,
- StoreType type);
+ uint32_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type);
inline void AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister result,
- StoreType type);
+ uint32_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type);
inline void AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister result,
- StoreType type);
+ uint32_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type);
inline void AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister result,
- StoreType type);
+ uint32_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type);
inline void AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister result,
- StoreType type);
+ uint32_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type);
inline void AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister result,
- StoreType type);
+ uint32_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type);
inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
@@ -499,6 +528,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
+ inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
+ ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType);
@@ -515,7 +546,7 @@ class LiftoffAssembler : public TurboAssembler {
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
- inline void emit_i32_add(Register dst, Register lhs, int32_t imm);
+ inline void emit_i32_addi(Register dst, Register lhs, int32_t imm);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
@@ -528,17 +559,17 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_rem_by_zero);
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
- inline void emit_i32_and(Register dst, Register lhs, int32_t imm);
+ inline void emit_i32_andi(Register dst, Register lhs, int32_t imm);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
- inline void emit_i32_or(Register dst, Register lhs, int32_t imm);
+ inline void emit_i32_ori(Register dst, Register lhs, int32_t imm);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
- inline void emit_i32_xor(Register dst, Register lhs, int32_t imm);
+ inline void emit_i32_xori(Register dst, Register lhs, int32_t imm);
inline void emit_i32_shl(Register dst, Register src, Register amount);
- inline void emit_i32_shl(Register dst, Register src, int32_t amount);
+ inline void emit_i32_shli(Register dst, Register src, int32_t amount);
inline void emit_i32_sar(Register dst, Register src, Register amount);
- inline void emit_i32_sar(Register dst, Register src, int32_t amount);
+ inline void emit_i32_sari(Register dst, Register src, int32_t amount);
inline void emit_i32_shr(Register dst, Register src, Register amount);
- inline void emit_i32_shr(Register dst, Register src, int32_t amount);
+ inline void emit_i32_shri(Register dst, Register src, int32_t amount);
// i32 unops.
inline void emit_i32_clz(Register dst, Register src);
@@ -548,8 +579,8 @@ class LiftoffAssembler : public TurboAssembler {
// i64 binops.
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm);
+ inline void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -565,28 +596,28 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs, Label* trap_rem_by_zero);
inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm);
+ inline void emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm);
+ inline void emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm);
+ inline void emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount);
- inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount);
+ inline void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount);
inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount);
- inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount);
+ inline void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount);
inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount);
- inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount);
+ inline void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount);
// i64 unops.
inline void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src);
@@ -619,19 +650,19 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_and(dst, lhs, rhs);
}
}
- inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
+ inline void emit_ptrsize_shri(Register dst, Register src, int amount) {
if (kSystemPointerSize == 8) {
- emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
+ emit_i64_shri(LiftoffRegister(dst), LiftoffRegister(src), amount);
} else {
- emit_i32_shr(dst, src, amount);
+ emit_i32_shri(dst, src, amount);
}
}
- inline void emit_ptrsize_add(Register dst, Register lhs, int32_t imm) {
+ inline void emit_ptrsize_addi(Register dst, Register lhs, int32_t imm) {
if (kSystemPointerSize == 8) {
- emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
+ emit_i64_addi(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
} else {
- emit_i32_add(dst, lhs, imm);
+ emit_i32_addi(dst, lhs, imm);
}
}
@@ -711,86 +742,272 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f64_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
+ inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
- uint8_t imm_lane_idx);
- inline void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx);
- inline void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_s128_not(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2, LiftoffRegister mask);
+ inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
- uint8_t imm_lane_idx);
- inline void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx);
- inline void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
- uint8_t imm_lane_idx);
- inline void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx);
- inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
- uint8_t imm_lane_idx);
- inline void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx);
+ inline void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_i16x8_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx);
- inline void emit_i16x8_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx);
- inline void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx);
- inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
inline void emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx);
- inline void emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ inline void emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx);
+ inline void emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
+ inline void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs,
+ uint8_t imm_lane_idx);
inline void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx);
- inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
+ inline void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx);
inline void StackCheck(Label* ool_code, Register limit_address);
@@ -867,6 +1084,7 @@ class LiftoffAssembler : public TurboAssembler {
private:
LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
+ LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
@@ -874,7 +1092,7 @@ class LiftoffAssembler : public TurboAssembler {
ValueType local_types_[kInlineLocalTypes];
ValueType* more_local_types_;
};
- static_assert(sizeof(ValueType) == 1,
+ static_assert(sizeof(ValueType) == 4,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
int max_used_spill_offset_ = StaticStackFrameSize();
@@ -957,9 +1175,9 @@ void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_and>(
+void LiftoffAssembler::emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_andi>(
this, dst, lhs, imm);
}
@@ -969,9 +1187,9 @@ void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_or>(
+void LiftoffAssembler::emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_ori>(
this, dst, lhs, imm);
}
@@ -981,9 +1199,9 @@ void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xor>(
+void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xori>(
this, dst, lhs, imm);
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 377cd1b5a8..4d0d9dbcec 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -253,6 +253,12 @@ class LiftoffCompiler {
using Value = ValueBase;
+ static constexpr auto kI32 = ValueType::kI32;
+ static constexpr auto kI64 = ValueType::kI64;
+ static constexpr auto kF32 = ValueType::kF32;
+ static constexpr auto kF64 = ValueType::kF64;
+ static constexpr auto kS128 = ValueType::kS128;
+
struct ElseState {
MovableLabel label;
LiftoffAssembler::CacheState state;
@@ -272,6 +278,17 @@ class LiftoffCompiler {
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ // For debugging, we need to spill registers before a trap, to be able to
+ // inspect them.
+ struct SpilledRegistersBeforeTrap {
+ struct Entry {
+ int offset;
+ LiftoffRegister reg;
+ ValueType type;
+ };
+ std::vector<Entry> entries;
+ };
+
struct OutOfLineCode {
MovableLabel label;
MovableLabel continuation;
@@ -279,20 +296,30 @@ class LiftoffCompiler {
WasmCodePosition position;
LiftoffRegList regs_to_save;
uint32_t pc; // for trap handler.
+ // These two pointers will only be used for debug code:
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
+ std::unique_ptr<SpilledRegistersBeforeTrap> spilled_registers;
// Named constructors:
static OutOfLineCode Trap(
WasmCode::RuntimeStubId s, WasmCodePosition pos, uint32_t pc,
- DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
+ DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder,
+ std::unique_ptr<SpilledRegistersBeforeTrap> spilled_registers) {
DCHECK_LT(0, pos);
- return {{}, {}, s, pos, {}, pc, debug_sidetable_entry_builder};
+ return {{},
+ {},
+ s,
+ pos,
+ {},
+ pc,
+ debug_sidetable_entry_builder,
+ std::move(spilled_registers)};
}
static OutOfLineCode StackCheck(
WasmCodePosition pos, LiftoffRegList regs,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
- return {{}, {}, WasmCode::kWasmStackGuard, pos,
- regs, 0, debug_sidetable_entry_builder};
+ return {{}, {}, WasmCode::kWasmStackGuard, pos,
+ regs, 0, debug_sidetable_entry_builder, {}};
}
};
@@ -300,13 +327,14 @@ class LiftoffCompiler {
CompilationEnv* env, Zone* compilation_zone,
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
- Vector<int> breakpoints = {},
+ ForDebugging for_debugging, Vector<int> breakpoints = {},
Vector<int> extra_source_pos = {})
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
debug_sidetable_builder_(debug_sidetable_builder),
+ for_debugging_(for_debugging),
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_),
next_breakpoint_ptr_(breakpoints.begin()),
@@ -445,45 +473,8 @@ class LiftoffCompiler {
LiftoffRegister in_reg = kGpCacheRegList.GetFirstRegSet();
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
- int reg_code = param_loc.AsRegister();
- if (!kSimpleFPAliasing && type == kWasmF32) {
- // Liftoff assumes a one-to-one mapping between float registers and
- // double registers, and so does not distinguish between f32 and f64
- // registers. The f32 register code must therefore be halved in order
- // to pass the f64 code to Liftoff.
- DCHECK_EQ(0, reg_code % 2);
- reg_code /= 2;
- } else if (kNeedS128RegPair && type == kWasmS128) {
- // Similarly for double registers and SIMD registers, the SIMD code
- // needs to be doubled to pass the f64 code to Liftoff.
- reg_code *= 2;
- }
- RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
- : kLiftoffAssemblerFpCacheRegs;
- if (cache_regs & (1ULL << reg_code)) {
- // This is a cache register, just use it.
- if (kNeedS128RegPair && rc == kFpRegPair) {
- in_reg =
- LiftoffRegister::ForFpPair(DoubleRegister::from_code(reg_code));
- } else {
- in_reg = LiftoffRegister::from_code(rc, reg_code);
- }
- } else {
- // Move to a cache register (spill one if necessary).
- // Note that we cannot create a {LiftoffRegister} for reg_code, since
- // {LiftoffRegister} can only store cache regs.
- in_reg = __ GetUnusedRegister(rc, pinned);
- if (rc == kGpReg) {
- __ Move(in_reg.gp(), Register::from_code(reg_code), lowered_type);
- } else if (kNeedS128RegPair && rc == kFpRegPair) {
- __ Move(in_reg.low_fp(), DoubleRegister::from_code(reg_code),
- lowered_type);
- } else {
- DCHECK_EQ(kFpReg, rc);
- __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code),
- lowered_type);
- }
- }
+ in_reg = LiftoffRegister::from_external_code(rc, type,
+ param_loc.AsRegister());
} else if (param_loc.IsCallerFrameSlot()) {
in_reg = __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
@@ -552,7 +543,7 @@ class LiftoffCompiler {
__ CodeEntry();
DEBUG_CODE_COMMENT("enter frame");
- __ EnterFrame(StackFrame::WASM_COMPILED);
+ __ EnterFrame(StackFrame::WASM);
__ set_has_frame(true);
pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
// {PrepareStackFrame} is the first platform-specific assembler method.
@@ -604,7 +595,7 @@ class LiftoffCompiler {
// If we are generating debug code, do check the "hook on function call"
// flag. If set, trigger a break.
- if (V8_UNLIKELY(env_->debug)) {
+ if (V8_UNLIKELY(for_debugging_)) {
// If there is a breakpoint set on the first instruction (== start of the
// function), then skip the check for "hook on function call", since we
// will unconditionally break there anyway.
@@ -628,6 +619,8 @@ class LiftoffCompiler {
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
+ DEBUG_CODE_COMMENT(
+ (std::string("Out of line: ") + GetRuntimeStubName(ool->stub)).c_str());
__ bind(ool->label.get());
const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
const bool is_mem_out_of_bounds =
@@ -647,13 +640,21 @@ class LiftoffCompiler {
DCHECK(!is_stack_check);
__ CallTrapCallbackForTesting();
DEBUG_CODE_COMMENT("leave frame");
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
return;
}
- if (!ool->regs_to_save.is_empty()) __ PushRegisters(ool->regs_to_save);
+ // We cannot both push and spill registers.
+ DCHECK(ool->regs_to_save.is_empty() || ool->spilled_registers == nullptr);
+ if (!ool->regs_to_save.is_empty()) {
+ __ PushRegisters(ool->regs_to_save);
+ } else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
+ for (auto& entry : ool->spilled_registers->entries) {
+ __ Spill(entry.offset, entry.reg, entry.type);
+ }
+ }
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool->position), true);
@@ -722,10 +723,8 @@ class LiftoffCompiler {
TraceCacheState(decoder);
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
- if (WasmOpcodes::IsPrefixOpcode(opcode) &&
- decoder->pc() + 1 < decoder->end()) {
- byte op_index = *(decoder->pc() + 1);
- opcode = static_cast<WasmOpcode>(opcode << 8 | op_index);
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = decoder->read_prefixed_opcode<Decoder::kValidate>(decoder->pc());
}
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
#endif
@@ -733,7 +732,7 @@ class LiftoffCompiler {
void EmitBreakpoint(FullDecoder* decoder) {
DEBUG_CODE_COMMENT("breakpoint");
- DCHECK(env_->debug);
+ DCHECK(for_debugging_);
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
__ CallRuntimeStub(WasmCode::kWasmDebugBreak);
@@ -744,10 +743,6 @@ class LiftoffCompiler {
void Block(FullDecoder* decoder, Control* block) {}
void Loop(FullDecoder* decoder, Control* loop) {
- if (loop->start_merge.arity > 0 || loop->end_merge.arity > 1) {
- return unsupported(decoder, kMultiValue, "multi-value loop");
- }
-
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
@@ -755,6 +750,8 @@ class LiftoffCompiler {
// pre-analysis of the function.
__ SpillLocals();
+ __ PrepareLoopArgs(loop->start_merge.arity);
+
// Loop labels bind at the beginning of the block.
__ bind(loop->label.get());
@@ -777,10 +774,6 @@ class LiftoffCompiler {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
- if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1) {
- return unsupported(decoder, kMultiValue, "multi-value if");
- }
-
// Allocate the else state.
if_block->else_state = std::make_unique<ElseState>();
@@ -819,9 +812,9 @@ class LiftoffCompiler {
// No merge yet at the end of the if, but we need to create a merge for
// the both arms of this if. Thus init the merge point from the else
// state, then merge the if state into that.
- DCHECK_EQ(0, c->end_merge.arity);
- c->label_state.InitMerge(c->else_state->state, __ num_locals(), 0,
- c->stack_depth);
+ DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
+ c->label_state.InitMerge(c->else_state->state, __ num_locals(),
+ c->start_merge.arity, c->stack_depth);
__ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
// Merge the else state into the end state.
@@ -878,6 +871,52 @@ class LiftoffCompiler {
ext_ref);
}
+ template <typename EmitFn, typename... Args>
+ typename std::enable_if<!std::is_member_function_pointer<EmitFn>::value>::type
+ CallEmitFn(EmitFn fn, Args... args) {
+ fn(args...);
+ }
+
+ template <typename EmitFn, typename... Args>
+ typename std::enable_if<std::is_member_function_pointer<EmitFn>::value>::type
+ CallEmitFn(EmitFn fn, Args... args) {
+ (asm_.*fn)(ConvertAssemblerArg(args)...);
+ }
+
+ // Wrap a {LiftoffRegister} with implicit conversions to {Register} and
+ // {DoubleRegister}.
+ struct AssemblerRegisterConverter {
+ LiftoffRegister reg;
+ operator LiftoffRegister() { return reg; }
+ operator Register() { return reg.gp(); }
+ operator DoubleRegister() { return reg.fp(); }
+ };
+
+ // Convert {LiftoffRegister} to {AssemblerRegisterConverter}, other types stay
+ // unchanged.
+ template <typename T>
+ typename std::conditional<std::is_same<LiftoffRegister, T>::value,
+ AssemblerRegisterConverter, T>::type
+ ConvertAssemblerArg(T t) {
+ return {t};
+ }
+
+ template <typename EmitFn, typename ArgType>
+ struct EmitFnWithFirstArg {
+ EmitFn fn;
+ ArgType first_arg;
+ };
+
+ template <typename EmitFn, typename ArgType>
+ EmitFnWithFirstArg<EmitFn, ArgType> BindFirst(EmitFn fn, ArgType arg) {
+ return {fn, arg};
+ }
+
+ template <typename EmitFn, typename T, typename... Args>
+ void CallEmitFn(EmitFnWithFirstArg<EmitFn, T> bound_fn, Args... args) {
+ CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
+ }
+
template <ValueType::Kind src_type, ValueType::Kind result_type, class EmitFn>
void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_type);
@@ -886,7 +925,7 @@ class LiftoffCompiler {
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src})
: __ GetUnusedRegister(result_rc);
- fn(dst, src);
+ CallEmitFn(fn, dst, src);
__ PushRegister(ValueType(result_type), dst);
}
@@ -942,38 +981,23 @@ class LiftoffCompiler {
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
-#define CASE_I32_UNOP(opcode, fn) \
- case kExpr##opcode: \
- EmitUnOp<ValueType::kI32, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister src) { \
- __ emit_##fn(dst.gp(), src.gp()); \
- }); \
- break;
-#define CASE_I64_UNOP(opcode, fn) \
- case kExpr##opcode: \
- EmitUnOp<ValueType::kI64, ValueType::kI64>( \
- [=](LiftoffRegister dst, LiftoffRegister src) { \
- __ emit_##fn(dst, src); \
- }); \
- break;
-#define CASE_FLOAT_UNOP(opcode, type, fn) \
- case kExpr##opcode: \
- EmitUnOp<ValueType::k##type, ValueType::k##type>( \
- [=](LiftoffRegister dst, LiftoffRegister src) { \
- __ emit_##fn(dst.fp(), src.fp()); \
- }); \
- break;
-#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
- case kExpr##opcode: \
- EmitFloatUnOpWithCFallback<ValueType::k##type>( \
- &LiftoffAssembler::emit_##fn, &ExternalReference::wasm_##fn); \
- break;
+#define CASE_I32_UNOP(opcode, fn) \
+ case kExpr##opcode: \
+ return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_##fn);
+#define CASE_I64_UNOP(opcode, fn) \
+ case kExpr##opcode: \
+ return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
+#define CASE_FLOAT_UNOP(opcode, type, fn) \
+ case kExpr##opcode: \
+ return EmitUnOp<k##type, k##type>(&LiftoffAssembler::emit_##fn);
+#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
+ case kExpr##opcode: \
+ return EmitFloatUnOpWithCFallback<k##type>(&LiftoffAssembler::emit_##fn, \
+ &ExternalReference::wasm_##fn);
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
case kExpr##opcode: \
- EmitTypeConversion<ValueType::k##dst_type, ValueType::k##src_type, \
- can_trap>(kExpr##opcode, ext_ref, \
- can_trap ? decoder->position() : 0); \
- break;
+ return EmitTypeConversion<k##dst_type, k##src_type, can_trap>( \
+ kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
@@ -1038,19 +1062,11 @@ class LiftoffCompiler {
outstanding_op_ = kExprI32Eqz;
break;
}
- EmitUnOp<ValueType::kI32, ValueType::kI32>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i32_eqz(dst.gp(), src.gp());
- });
- break;
+ return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_i32_eqz);
case kExprI64Eqz:
- EmitUnOp<ValueType::kI64, ValueType::kI32>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i64_eqz(dst.gp(), src);
- });
- break;
+ return EmitUnOp<kI64, kI32>(&LiftoffAssembler::emit_i64_eqz);
case kExprI32Popcnt:
- EmitUnOp<ValueType::kI32, ValueType::kI32>(
+ return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
@@ -1058,9 +1074,8 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src,
ExternalReference::wasm_word32_popcnt());
});
- break;
case kExprI64Popcnt:
- EmitUnOp<ValueType::kI64, ValueType::kI64>(
+ return EmitUnOp<kI64, kI64>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
@@ -1073,7 +1088,6 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
- break;
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
case kExprI32SConvertSatF64:
@@ -1111,22 +1125,16 @@ class LiftoffCompiler {
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
- fnImm(dst, lhs, imm);
+ CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType(result_type), dst);
} else {
// The RHS was not an immediate.
- LiftoffRegister rhs = __ PopToRegister();
- LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
- LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs, rhs})
- : __ GetUnusedRegister(result_rc);
- fn(dst, lhs, rhs);
- __ PushRegister(ValueType(result_type), dst);
+ EmitBinOp<src_type, result_type>(fn);
}
}
template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn>
+ bool swap_lhs_rhs = false, typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
@@ -1135,7 +1143,10 @@ class LiftoffCompiler {
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
- fn(dst, lhs, rhs);
+
+ if (swap_lhs_rhs) std::swap(lhs, rhs);
+
+ CallEmitFn(fn, dst, lhs, rhs);
__ PushRegister(ValueType(result_type), dst);
}
@@ -1163,89 +1174,32 @@ class LiftoffCompiler {
}
}
+ template <WasmOpcode opcode>
+ void EmitI32CmpOp(FullDecoder* decoder) {
+ DCHECK(decoder->lookahead(0, opcode));
+ if (decoder->lookahead(1, kExprBrIf)) {
+ DCHECK(!has_outstanding_op());
+ outstanding_op_ = opcode;
+ return;
+ }
+ return EmitBinOp<kI32, kI32>(BindFirst(&LiftoffAssembler::emit_i32_set_cond,
+ GetCompareCondition(opcode)));
+ }
+
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
const Value& rhs, Value* result) {
-#define CASE_I32_BINOP(opcode, fn) \
- case kExpr##opcode: \
- return EmitBinOp<ValueType::kI32, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
- });
-#define CASE_I32_BINOPI(opcode, fn) \
- case kExpr##opcode: \
- return EmitBinOpImm<ValueType::kI32, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
- }, \
- [=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
- __ emit_##fn(dst.gp(), lhs.gp(), imm); \
- });
-#define CASE_I64_BINOP(opcode, fn) \
- case kExpr##opcode: \
- return EmitBinOp<ValueType::kI64, ValueType::kI64>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_##fn(dst, lhs, rhs); \
- });
-#define CASE_I64_BINOPI(opcode, fn) \
- case kExpr##opcode: \
- return EmitBinOpImm<ValueType::kI64, ValueType::kI64>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_##fn(dst, lhs, rhs); \
- }, \
- [=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
- __ emit_##fn(dst, lhs, imm); \
- });
-#define CASE_FLOAT_BINOP(opcode, type, fn) \
- case kExpr##opcode: \
- return EmitBinOp<ValueType::k##type, ValueType::k##type>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
- });
-#define CASE_I32_CMPOP(opcode) \
- case kExpr##opcode: \
- DCHECK(decoder->lookahead(0, kExpr##opcode)); \
- if (decoder->lookahead(1, kExprBrIf)) { \
- DCHECK(!has_outstanding_op()); \
- outstanding_op_ = kExpr##opcode; \
- break; \
- } \
- return EmitBinOp<ValueType::kI32, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- constexpr Condition cond = GetCompareCondition(kExpr##opcode); \
- __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
- });
-#define CASE_I64_CMPOP(opcode, cond) \
- case kExpr##opcode: \
- return EmitBinOp<ValueType::kI64, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_i64_set_cond(cond, dst.gp(), lhs, rhs); \
- });
-#define CASE_F32_CMPOP(opcode, cond) \
- case kExpr##opcode: \
- return EmitBinOp<ValueType::kF32, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
- });
-#define CASE_F64_CMPOP(opcode, cond) \
- case kExpr##opcode: \
- return EmitBinOp<ValueType::kF64, ValueType::kI32>( \
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- __ emit_f64_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
- });
#define CASE_I64_SHIFTOP(opcode, fn) \
case kExpr##opcode: \
- return EmitBinOpImm<ValueType::kI64, ValueType::kI64>( \
+ return EmitBinOpImm<kI64, kI64>( \
[=](LiftoffRegister dst, LiftoffRegister src, \
LiftoffRegister amount) { \
__ emit_##fn(dst, src, \
amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
}, \
- [=](LiftoffRegister dst, LiftoffRegister src, int32_t amount) { \
- __ emit_##fn(dst, src, amount); \
- });
+ &LiftoffAssembler::emit_##fn##i);
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
case kExpr##opcode: \
- return EmitBinOp<ValueType::k##type, ValueType::k##type>( \
+ return EmitBinOp<k##type, k##type>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
@@ -1256,79 +1210,172 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig, out_arg_type, args, ext_ref); \
});
switch (opcode) {
- CASE_I32_BINOPI(I32Add, i32_add)
- CASE_I32_BINOP(I32Sub, i32_sub)
- CASE_I32_BINOP(I32Mul, i32_mul)
- CASE_I32_BINOPI(I32And, i32_and)
- CASE_I32_BINOPI(I32Ior, i32_or)
- CASE_I32_BINOPI(I32Xor, i32_xor)
- CASE_I64_BINOPI(I64And, i64_and)
- CASE_I64_BINOPI(I64Ior, i64_or)
- CASE_I64_BINOPI(I64Xor, i64_xor)
- CASE_I32_CMPOP(I32Eq)
- CASE_I32_CMPOP(I32Ne)
- CASE_I32_CMPOP(I32LtS)
- CASE_I32_CMPOP(I32LtU)
- CASE_I32_CMPOP(I32GtS)
- CASE_I32_CMPOP(I32GtU)
- CASE_I32_CMPOP(I32LeS)
- CASE_I32_CMPOP(I32LeU)
- CASE_I32_CMPOP(I32GeS)
- CASE_I32_CMPOP(I32GeU)
- CASE_I64_BINOPI(I64Add, i64_add)
- CASE_I64_BINOP(I64Sub, i64_sub)
- CASE_I64_BINOP(I64Mul, i64_mul)
- CASE_I64_CMPOP(I64Eq, kEqual)
- CASE_I64_CMPOP(I64Ne, kUnequal)
- CASE_I64_CMPOP(I64LtS, kSignedLessThan)
- CASE_I64_CMPOP(I64LtU, kUnsignedLessThan)
- CASE_I64_CMPOP(I64GtS, kSignedGreaterThan)
- CASE_I64_CMPOP(I64GtU, kUnsignedGreaterThan)
- CASE_I64_CMPOP(I64LeS, kSignedLessEqual)
- CASE_I64_CMPOP(I64LeU, kUnsignedLessEqual)
- CASE_I64_CMPOP(I64GeS, kSignedGreaterEqual)
- CASE_I64_CMPOP(I64GeU, kUnsignedGreaterEqual)
- CASE_F32_CMPOP(F32Eq, kEqual)
- CASE_F32_CMPOP(F32Ne, kUnequal)
- CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
- CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
- CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
- CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
- CASE_F64_CMPOP(F64Eq, kEqual)
- CASE_F64_CMPOP(F64Ne, kUnequal)
- CASE_F64_CMPOP(F64Lt, kUnsignedLessThan)
- CASE_F64_CMPOP(F64Gt, kUnsignedGreaterThan)
- CASE_F64_CMPOP(F64Le, kUnsignedLessEqual)
- CASE_F64_CMPOP(F64Ge, kUnsignedGreaterEqual)
- CASE_I32_BINOPI(I32Shl, i32_shl)
- CASE_I32_BINOPI(I32ShrS, i32_sar)
- CASE_I32_BINOPI(I32ShrU, i32_shr)
- CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
- CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
- CASE_I64_SHIFTOP(I64Shl, i64_shl)
- CASE_I64_SHIFTOP(I64ShrS, i64_sar)
- CASE_I64_SHIFTOP(I64ShrU, i64_shr)
- CASE_CCALL_BINOP(I64Rol, I64, wasm_word64_rol)
- CASE_CCALL_BINOP(I64Ror, I64, wasm_word64_ror)
- CASE_FLOAT_BINOP(F32Add, F32, f32_add)
- CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
- CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
- CASE_FLOAT_BINOP(F32Div, F32, f32_div)
- CASE_FLOAT_BINOP(F32Min, F32, f32_min)
- CASE_FLOAT_BINOP(F32Max, F32, f32_max)
- CASE_FLOAT_BINOP(F32CopySign, F32, f32_copysign)
- CASE_FLOAT_BINOP(F64Add, F64, f64_add)
- CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
- CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
- CASE_FLOAT_BINOP(F64Div, F64, f64_div)
- CASE_FLOAT_BINOP(F64Min, F64, f64_min)
- CASE_FLOAT_BINOP(F64Max, F64, f64_max)
- CASE_FLOAT_BINOP(F64CopySign, F64, f64_copysign)
+ case kExprI32Add:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_add,
+ &LiftoffAssembler::emit_i32_addi);
+ case kExprI32Sub:
+ return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_sub);
+ case kExprI32Mul:
+ return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_mul);
+ case kExprI32And:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_and,
+ &LiftoffAssembler::emit_i32_andi);
+ case kExprI32Ior:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_or,
+ &LiftoffAssembler::emit_i32_ori);
+ case kExprI32Xor:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_xor,
+ &LiftoffAssembler::emit_i32_xori);
+ case kExprI32Eq:
+ return EmitI32CmpOp<kExprI32Eq>(decoder);
+ case kExprI32Ne:
+ return EmitI32CmpOp<kExprI32Ne>(decoder);
+ case kExprI32LtS:
+ return EmitI32CmpOp<kExprI32LtS>(decoder);
+ case kExprI32LtU:
+ return EmitI32CmpOp<kExprI32LtU>(decoder);
+ case kExprI32GtS:
+ return EmitI32CmpOp<kExprI32GtS>(decoder);
+ case kExprI32GtU:
+ return EmitI32CmpOp<kExprI32GtU>(decoder);
+ case kExprI32LeS:
+ return EmitI32CmpOp<kExprI32LeS>(decoder);
+ case kExprI32LeU:
+ return EmitI32CmpOp<kExprI32LeU>(decoder);
+ case kExprI32GeS:
+ return EmitI32CmpOp<kExprI32GeS>(decoder);
+ case kExprI32GeU:
+ return EmitI32CmpOp<kExprI32GeU>(decoder);
+ case kExprI64Add:
+ return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_add,
+ &LiftoffAssembler::emit_i64_addi);
+ case kExprI64Sub:
+ return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_sub);
+ case kExprI64Mul:
+ return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_mul);
+ case kExprI64And:
+ return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_and,
+ &LiftoffAssembler::emit_i64_andi);
+ case kExprI64Ior:
+ return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_or,
+ &LiftoffAssembler::emit_i64_ori);
+ case kExprI64Xor:
+ return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_xor,
+ &LiftoffAssembler::emit_i64_xori);
+ case kExprI64Eq:
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kEqual));
+ case kExprI64Ne:
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnequal));
+ case kExprI64LtS:
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessThan));
+ case kExprI64LtU:
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThan));
+ case kExprI64GtS:
+ return EmitBinOp<kI64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterThan));
+ case kExprI64GtU:
+ return EmitBinOp<kI64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThan));
+ case kExprI64LeS:
+ return EmitBinOp<kI64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessEqual));
+ case kExprI64LeU:
+ return EmitBinOp<kI64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_i64_set_cond, kUnsignedLessEqual));
+ case kExprI64GeS:
+ return EmitBinOp<kI64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterEqual));
+ case kExprI64GeU:
+ return EmitBinOp<kI64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterEqual));
+ case kExprF32Eq:
+ return EmitBinOp<kF32, kI32>(
+ BindFirst(&LiftoffAssembler::emit_f32_set_cond, kEqual));
+ case kExprF32Ne:
+ return EmitBinOp<kF32, kI32>(
+ BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnequal));
+ case kExprF32Lt:
+ return EmitBinOp<kF32, kI32>(
+ BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThan));
+ case kExprF32Gt:
+ return EmitBinOp<kF32, kI32>(BindFirst(
+ &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThan));
+ case kExprF32Le:
+ return EmitBinOp<kF32, kI32>(BindFirst(
+ &LiftoffAssembler::emit_f32_set_cond, kUnsignedLessEqual));
+ case kExprF32Ge:
+ return EmitBinOp<kF32, kI32>(BindFirst(
+ &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterEqual));
+ case kExprF64Eq:
+ return EmitBinOp<kF64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_f64_set_cond, kEqual));
+ case kExprF64Ne:
+ return EmitBinOp<kF64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnequal));
+ case kExprF64Lt:
+ return EmitBinOp<kF64, kI32>(
+ BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThan));
+ case kExprF64Gt:
+ return EmitBinOp<kF64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThan));
+ case kExprF64Le:
+ return EmitBinOp<kF64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_f64_set_cond, kUnsignedLessEqual));
+ case kExprF64Ge:
+ return EmitBinOp<kF64, kI32>(BindFirst(
+ &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterEqual));
+ case kExprI32Shl:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shl,
+ &LiftoffAssembler::emit_i32_shli);
+ case kExprI32ShrS:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_sar,
+ &LiftoffAssembler::emit_i32_sari);
+ case kExprI32ShrU:
+ return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shr,
+ &LiftoffAssembler::emit_i32_shri);
+ CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
+ CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
+ CASE_I64_SHIFTOP(I64Shl, i64_shl)
+ CASE_I64_SHIFTOP(I64ShrS, i64_sar)
+ CASE_I64_SHIFTOP(I64ShrU, i64_shr)
+ CASE_CCALL_BINOP(I64Rol, I64, wasm_word64_rol)
+ CASE_CCALL_BINOP(I64Ror, I64, wasm_word64_ror)
+ case kExprF32Add:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_add);
+ case kExprF32Sub:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_sub);
+ case kExprF32Mul:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_mul);
+ case kExprF32Div:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_div);
+ case kExprF32Min:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_min);
+ case kExprF32Max:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_max);
+ case kExprF32CopySign:
+ return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_copysign);
+ case kExprF64Add:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_add);
+ case kExprF64Sub:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_sub);
+ case kExprF64Mul:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_mul);
+ case kExprF64Div:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_div);
+ case kExprF64Min:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_min);
+ case kExprF64Max:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_max);
+ case kExprF64CopySign:
+ return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_copysign);
case kExprI32DivS:
- EmitBinOp<ValueType::kI32, ValueType::kI32>([this, decoder](
- LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
@@ -1340,39 +1387,34 @@ class LiftoffCompiler {
__ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
div_unrepresentable);
});
- break;
case kExprI32DivU:
- EmitBinOp<ValueType::kI32, ValueType::kI32>(
+ return EmitBinOp<kI32, kI32>(
[this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* div_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
__ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
});
- break;
case kExprI32RemS:
- EmitBinOp<ValueType::kI32, ValueType::kI32>(
+ return EmitBinOp<kI32, kI32>(
[this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
__ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
- break;
case kExprI32RemU:
- EmitBinOp<ValueType::kI32, ValueType::kI32>(
+ return EmitBinOp<kI32, kI32>(
[this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
__ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
- break;
case kExprI64DivS:
- EmitBinOp<ValueType::kI64, ValueType::kI64>([this, decoder](
- LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
WasmCodePosition position = decoder->position();
AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
@@ -1388,12 +1430,10 @@ class LiftoffCompiler {
div_unrepresentable);
}
});
- break;
case kExprI64DivU:
- EmitBinOp<ValueType::kI64, ValueType::kI64>([this, decoder](
- LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Label* div_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
@@ -1401,9 +1441,8 @@ class LiftoffCompiler {
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
}
});
- break;
case kExprI64RemS:
- EmitBinOp<ValueType::kI64, ValueType::kI64>(
+ return EmitBinOp<kI64, kI64>(
[this, decoder](LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
@@ -1413,12 +1452,10 @@ class LiftoffCompiler {
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
- break;
case kExprI64RemU:
- EmitBinOp<ValueType::kI64, ValueType::kI64>([this, decoder](
- LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+ return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Label* rem_by_zero = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
@@ -1426,19 +1463,9 @@ class LiftoffCompiler {
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
- break;
default:
UNREACHABLE();
}
-#undef CASE_I32_BINOP
-#undef CASE_I32_BINOPI
-#undef CASE_I64_BINOP
-#undef CASE_I64_BINOPI
-#undef CASE_FLOAT_BINOP
-#undef CASE_I32_CMPOP
-#undef CASE_I64_CMPOP
-#undef CASE_F32_CMPOP
-#undef CASE_F64_CMPOP
#undef CASE_I64_SHIFTOP
#undef CASE_CCALL_BINOP
}
@@ -1482,6 +1509,10 @@ class LiftoffCompiler {
unsupported(decoder, kAnyRef, "func");
}
+ void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
+ unsupported(decoder, kAnyRef, "ref.as_non_null");
+ }
+
void Drop(FullDecoder* decoder, const Value& value) {
auto& slot = __ cache_state()->stack_state.back();
// If the dropped slot contains a register, decrement it's use count.
@@ -1491,12 +1522,9 @@ class LiftoffCompiler {
void ReturnImpl(FullDecoder* decoder) {
size_t num_returns = decoder->sig_->return_count();
- if (num_returns > 1) {
- return unsupported(decoder, kMultiValue, "multi-return");
- }
- if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
+ if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
DEBUG_CODE_COMMENT("leave frame");
- __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
@@ -1791,13 +1819,28 @@ class LiftoffCompiler {
__ cache_state()->Steal(c->else_state->state);
}
+ std::unique_ptr<SpilledRegistersBeforeTrap> GetSpilledRegistersBeforeTrap() {
+ if (V8_LIKELY(!for_debugging_)) return nullptr;
+ // If we are generating debugging code, we really need to spill all
+ // registers to make them inspectable when stopping at the trap.
+ auto spilled = std::make_unique<SpilledRegistersBeforeTrap>();
+ for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) {
+ auto& slot = __ cache_state()->stack_state[i];
+ if (!slot.is_reg()) continue;
+ spilled->entries.push_back(SpilledRegistersBeforeTrap::Entry{
+ slot.offset(), slot.reg(), slot.type()});
+ }
+ return spilled;
+ }
+
Label* AddOutOfLineTrap(WasmCodePosition position,
WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
DCHECK(FLAG_wasm_bounds_checks);
out_of_line_code_.push_back(OutOfLineCode::Trap(
stub, position, pc,
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling),
+ GetSpilledRegistersBeforeTrap()));
return out_of_line_code_.back().label.get();
}
@@ -1882,12 +1925,12 @@ class LiftoffCompiler {
// AND of two operands. We could introduce a new variant of
// {emit_cond_jump} to use the "test" instruction without the "and" here.
// Then we can also avoid using the temp register here.
- __ emit_i32_and(address, index, align_mask);
+ __ emit_i32_andi(address, index, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
return;
}
- __ emit_i32_add(address, index, offset);
- __ emit_i32_and(address, address, align_mask);
+ __ emit_i32_addi(address, index, offset);
+ __ emit_i32_andi(address, address, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
}
@@ -1950,7 +1993,7 @@ class LiftoffCompiler {
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- __ emit_ptrsize_add(index, index, *offset);
+ __ emit_ptrsize_addi(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
@@ -2034,7 +2077,7 @@ class LiftoffCompiler {
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
- __ emit_ptrsize_shr(mem_size, mem_size, kWasmPageSizeLog2);
+ __ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
}
@@ -2081,13 +2124,10 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- if (imm.sig->return_count() > 1) {
- return unsupported(decoder, kMultiValue, "multi-return");
- }
- if (imm.sig->return_count() == 1 &&
- !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
- "return")) {
- return;
+ for (ValueType ret : imm.sig->returns()) {
+ if (!CheckSupportedType(decoder, kSupportedTypes, ret, "return")) {
+ return;
+ }
}
auto call_descriptor =
@@ -2143,16 +2183,13 @@ class LiftoffCompiler {
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- if (imm.sig->return_count() > 1) {
- return unsupported(decoder, kMultiValue, "multi-return");
- }
if (imm.table_index != 0) {
return unsupported(decoder, kAnyRef, "table index != 0");
}
- if (imm.sig->return_count() == 1 &&
- !CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
- "return")) {
- return;
+ for (ValueType ret : imm.sig->returns()) {
+ if (!CheckSupportedType(decoder, kSupportedTypes, ret, "return")) {
+ return;
+ }
}
// Place the source position before any stack manipulation, since this will
@@ -2207,7 +2244,7 @@ class LiftoffCompiler {
// 3) mask = diff & neg_index
__ emit_i32_and(mask, diff, neg_index);
// 4) mask = mask >> 31
- __ emit_i32_sar(mask, mask, 31);
+ __ emit_i32_sari(mask, mask, 31);
// Apply mask.
__ emit_i32_and(index, index, mask);
@@ -2218,7 +2255,7 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
// Shift {index} by 2 (multiply by 4) to represent kInt32Size items.
STATIC_ASSERT((1 << 2) == kInt32Size);
- __ emit_i32_shl(index, index, 2);
+ __ emit_i32_shli(index, index, 2);
__ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
pinned);
@@ -2288,156 +2325,357 @@ class LiftoffCompiler {
unsupported(decoder, kTailCall, "return_call_indirect");
}
+ void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
+ unsupported(decoder, kAnyRef, "br_on_null");
+ }
+
+ template <ValueType::Kind src_type, ValueType::Kind result_type,
+ typename EmitFn>
+ void EmitTerOp(EmitFn fn) {
+ static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass result_rc = reg_class_for(result_type);
+ LiftoffRegister src3 = __ PopToRegister();
+ LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
+ LiftoffRegister src1 =
+ __ PopToRegister(LiftoffRegList::ForRegs(src3, src2));
+ // Reusing src1 and src2 will complicate codegen for select for some
+ // backend, so we allow only reusing src3 (the mask), and pin src1 and src2.
+ LiftoffRegister dst =
+ src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {src3},
+ LiftoffRegList::ForRegs(src1, src2))
+ : __ GetUnusedRegister(result_rc);
+ CallEmitFn(fn, dst, src1, src2, src3);
+ __ PushRegister(ValueType(result_type), dst);
+ }
+
+ template <typename EmitFn, typename EmitFnImm>
+ void EmitSimdShiftOp(EmitFn fn, EmitFnImm fnImm) {
+ static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+
+ LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
+ // Check if the RHS is an immediate.
+ if (rhs_slot.is_const()) {
+ __ cache_state()->stack_state.pop_back();
+ int32_t imm = rhs_slot.i32_const();
+
+ LiftoffRegister operand = __ PopToRegister();
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+
+ CallEmitFn(fnImm, dst, operand, imm);
+ __ PushRegister(kWasmS128, dst);
+ } else {
+ LiftoffRegister count = __ PopToRegister();
+ LiftoffRegister operand = __ PopToRegister();
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+
+ CallEmitFn(fn, dst, operand, count);
+ __ PushRegister(kWasmS128, dst);
+ }
+ }
+
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
- case wasm::kExprF64x2Splat:
- EmitUnOp<ValueType::kF64, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_f64x2_splat(dst, src);
- });
- break;
- case wasm::kExprF64x2Add:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_f64x2_add(dst, lhs, rhs);
- });
- break;
- case wasm::kExprF64x2Sub:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_f64x2_sub(dst, lhs, rhs);
- });
- break;
- case wasm::kExprF64x2Mul:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_f64x2_mul(dst, lhs, rhs);
- });
- break;
- case wasm::kExprF32x4Splat:
- EmitUnOp<ValueType::kF32, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_f32x4_splat(dst, src);
- });
- break;
- case wasm::kExprF32x4Add:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_f32x4_add(dst, lhs, rhs);
- });
- break;
- case wasm::kExprF32x4Sub:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_f32x4_sub(dst, lhs, rhs);
- });
- break;
- case wasm::kExprF32x4Mul:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_f32x4_mul(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI64x2Splat:
- EmitUnOp<ValueType::kI64, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i64x2_splat(dst, src);
- });
- break;
- case wasm::kExprI64x2Add:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i64x2_add(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI64x2Sub:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i64x2_sub(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI64x2Mul:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i64x2_mul(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI32x4Splat:
- EmitUnOp<ValueType::kI32, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i32x4_splat(dst, src);
- });
- break;
- case wasm::kExprI32x4Add:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i32x4_add(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI32x4Sub:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i32x4_sub(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI32x4Mul:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i32x4_mul(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI16x8Splat:
- EmitUnOp<ValueType::kI32, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i16x8_splat(dst, src);
- });
- break;
- case wasm::kExprI16x8Add:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i16x8_add(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI16x8Sub:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i16x8_sub(dst, lhs, rhs);
- });
- break;
- case wasm::kExprI16x8Mul:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i16x8_mul(dst, lhs, rhs);
- });
- break;
case wasm::kExprI8x16Splat:
- EmitUnOp<ValueType::kI32, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i8x16_splat(dst, src);
- });
- break;
+ return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat);
+ case wasm::kExprI16x8Splat:
+ return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i16x8_splat);
+ case wasm::kExprI32x4Splat:
+ return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i32x4_splat);
+ case wasm::kExprI64x2Splat:
+ return EmitUnOp<kI64, kS128>(&LiftoffAssembler::emit_i64x2_splat);
+ case wasm::kExprF32x4Splat:
+ return EmitUnOp<kF32, kS128>(&LiftoffAssembler::emit_f32x4_splat);
+ case wasm::kExprF64x2Splat:
+ return EmitUnOp<kF64, kS128>(&LiftoffAssembler::emit_f64x2_splat);
+ case wasm::kExprI8x16Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_eq);
+ case wasm::kExprI8x16Ne:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ne);
+ case wasm::kExprI8x16LtS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i8x16_gt_s);
+ case wasm::kExprI8x16LtU:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i8x16_gt_u);
+ case wasm::kExprI8x16GtS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_s);
+ case wasm::kExprI8x16GtU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_u);
+ case wasm::kExprI8x16LeS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i8x16_ge_s);
+ case wasm::kExprI8x16LeU:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i8x16_ge_u);
+ case wasm::kExprI8x16GeS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_s);
+ case wasm::kExprI8x16GeU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_u);
+ case wasm::kExprI16x8Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_eq);
+ case wasm::kExprI16x8Ne:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ne);
+ case wasm::kExprI16x8LtS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i16x8_gt_s);
+ case wasm::kExprI16x8LtU:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i16x8_gt_u);
+ case wasm::kExprI16x8GtS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_s);
+ case wasm::kExprI16x8GtU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_u);
+ case wasm::kExprI16x8LeS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i16x8_ge_s);
+ case wasm::kExprI16x8LeU:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i16x8_ge_u);
+ case wasm::kExprI16x8GeS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_s);
+ case wasm::kExprI16x8GeU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_u);
+ case wasm::kExprI32x4Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_eq);
+ case wasm::kExprI32x4Ne:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ne);
+ case wasm::kExprI32x4LtS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i32x4_gt_s);
+ case wasm::kExprI32x4LtU:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i32x4_gt_u);
+ case wasm::kExprI32x4GtS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_gt_s);
+ case wasm::kExprI32x4GtU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_gt_u);
+ case wasm::kExprI32x4LeS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i32x4_ge_s);
+ case wasm::kExprI32x4LeU:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i32x4_ge_u);
+ case wasm::kExprI32x4GeS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_s);
+ case wasm::kExprI32x4GeU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_u);
+ case wasm::kExprF32x4Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq);
+ case wasm::kExprF32x4Ne:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_ne);
+ case wasm::kExprF32x4Lt:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_lt);
+ case wasm::kExprF32x4Gt:
+ return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f32x4_lt);
+ case wasm::kExprF32x4Le:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_le);
+ case wasm::kExprF32x4Ge:
+ return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f32x4_le);
+ case wasm::kExprF64x2Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_eq);
+ case wasm::kExprF64x2Ne:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_ne);
+ case wasm::kExprF64x2Lt:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_lt);
+ case wasm::kExprF64x2Gt:
+ return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f64x2_lt);
+ case wasm::kExprF64x2Le:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_le);
+ case wasm::kExprF64x2Ge:
+ return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f64x2_le);
+ case wasm::kExprS128Not:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_s128_not);
+ case wasm::kExprS128And:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and);
+ case wasm::kExprS128Or:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_or);
+ case wasm::kExprS128Xor:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_xor);
+ case wasm::kExprS128Select:
+ return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select);
+ case wasm::kExprI8x16Neg:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
+ case wasm::kExprI8x16Shl:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shl,
+ &LiftoffAssembler::emit_i8x16_shli);
case wasm::kExprI8x16Add:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i8x16_add(dst, lhs, rhs);
- });
- break;
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
+ case wasm::kExprI8x16AddSaturateS:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_add_saturate_s);
+ case wasm::kExprI8x16AddSaturateU:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_add_saturate_u);
case wasm::kExprI8x16Sub:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i8x16_sub(dst, lhs, rhs);
- });
- break;
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub);
+ case wasm::kExprI8x16SubSaturateS:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_sub_saturate_s);
+ case wasm::kExprI8x16SubSaturateU:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_sub_saturate_u);
case wasm::kExprI8x16Mul:
- EmitBinOp<ValueType::kS128, ValueType::kS128>(
- [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) {
- __ emit_i8x16_mul(dst, lhs, rhs);
- });
- break;
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_mul);
+ case wasm::kExprI8x16MinS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_s);
+ case wasm::kExprI8x16MinU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_u);
+ case wasm::kExprI8x16MaxS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_s);
+ case wasm::kExprI8x16MaxU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
+ case wasm::kExprI16x8Neg:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
+ case wasm::kExprI16x8Shl:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shl,
+ &LiftoffAssembler::emit_i16x8_shli);
+ case wasm::kExprI16x8Add:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
+ case wasm::kExprI16x8AddSaturateS:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_add_saturate_s);
+ case wasm::kExprI16x8AddSaturateU:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_add_saturate_u);
+ case wasm::kExprI16x8Sub:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub);
+ case wasm::kExprI16x8SubSaturateS:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_sub_saturate_s);
+ case wasm::kExprI16x8SubSaturateU:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_sub_saturate_u);
+ case wasm::kExprI16x8Mul:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_mul);
+ case wasm::kExprI16x8MinS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_min_s);
+ case wasm::kExprI16x8MinU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_min_u);
+ case wasm::kExprI16x8MaxS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_s);
+ case wasm::kExprI16x8MaxU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
+ case wasm::kExprI32x4Neg:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
+ case wasm::kExprI32x4Shl:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shl,
+ &LiftoffAssembler::emit_i32x4_shli);
+ case wasm::kExprI32x4Add:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_add);
+ case wasm::kExprI32x4Sub:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_sub);
+ case wasm::kExprI32x4Mul:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_mul);
+ case wasm::kExprI32x4MinS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_min_s);
+ case wasm::kExprI32x4MinU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_min_u);
+ case wasm::kExprI32x4MaxS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_s);
+ case wasm::kExprI32x4MaxU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_u);
+ case wasm::kExprI64x2Neg:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
+ case wasm::kExprI64x2Shl:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
+ &LiftoffAssembler::emit_i64x2_shli);
+ case wasm::kExprI64x2Add:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_add);
+ case wasm::kExprI64x2Sub:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_sub);
+ case wasm::kExprI64x2Mul:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_mul);
+ case wasm::kExprF32x4Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_abs);
+ case wasm::kExprF32x4Neg:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_neg);
+ case wasm::kExprF32x4Sqrt:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_sqrt);
+ case wasm::kExprF32x4Add:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_add);
+ case wasm::kExprF32x4Sub:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_sub);
+ case wasm::kExprF32x4Mul:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_mul);
+ case wasm::kExprF32x4Div:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_div);
+ case wasm::kExprF32x4Min:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_min);
+ case wasm::kExprF32x4Max:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_max);
+ case wasm::kExprF64x2Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_abs);
+ case wasm::kExprF64x2Neg:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_neg);
+ case wasm::kExprF64x2Sqrt:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_sqrt);
+ case wasm::kExprF64x2Add:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_add);
+ case wasm::kExprF64x2Sub:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_sub);
+ case wasm::kExprF64x2Mul:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_mul);
+ case wasm::kExprF64x2Div:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_div);
+ case wasm::kExprF64x2Min:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_min);
+ case wasm::kExprF64x2Max:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_max);
+ case wasm::kExprI8x16SConvertI16x8:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_sconvert_i16x8);
+ case wasm::kExprI8x16UConvertI16x8:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_uconvert_i16x8);
+ case wasm::kExprI16x8SConvertI32x4:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_sconvert_i32x4);
+ case wasm::kExprI16x8UConvertI32x4:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_uconvert_i32x4);
+ case wasm::kExprI16x8SConvertI8x16Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_sconvert_i8x16_low);
+ case wasm::kExprI16x8SConvertI8x16High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_sconvert_i8x16_high);
+ case wasm::kExprI16x8UConvertI8x16Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_uconvert_i8x16_low);
+ case wasm::kExprI16x8UConvertI8x16High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_uconvert_i8x16_high);
+ case wasm::kExprI32x4SConvertI16x8Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_sconvert_i16x8_low);
+ case wasm::kExprI32x4SConvertI16x8High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_sconvert_i16x8_high);
+ case wasm::kExprI32x4UConvertI16x8Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_uconvert_i16x8_low);
+ case wasm::kExprI32x4UConvertI16x8High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_uconvert_i16x8_high);
+ case wasm::kExprS128AndNot:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and_not);
+ case wasm::kExprI8x16RoundingAverageU:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i8x16_rounding_average_u);
+ case wasm::kExprI16x8RoundingAverageU:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_rounding_average_u);
+ case wasm::kExprI8x16Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_abs);
+ case wasm::kExprI16x8Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_abs);
+ case wasm::kExprI32x4Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_abs);
default:
unsupported(decoder, kSimd, "simd");
}
@@ -2460,9 +2698,9 @@ class LiftoffCompiler {
template <ValueType::Kind src2_type, typename EmitFn>
void EmitSimdReplaceLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
- static constexpr RegClass src1_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass src1_rc = reg_class_for(kS128);
static constexpr RegClass src2_rc = reg_class_for(src2_type);
- static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass result_rc = reg_class_for(kS128);
// On backends which need fp pair, src1_rc and result_rc end up being
// kFpRegPair, which is != kFpReg, but we still want to pin src2 when it is
// kFpReg, since it can overlap with those pairs.
@@ -2492,36 +2730,36 @@ class LiftoffCompiler {
switch (opcode) {
#define CASE_SIMD_EXTRACT_LANE_OP(opcode, type, fn) \
case wasm::kExpr##opcode: \
- EmitSimdExtractLaneOp<ValueType::kS128, ValueType::k##type>( \
+ EmitSimdExtractLaneOp<kS128, k##type>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
__ emit_##fn(dst, lhs, imm_lane_idx); \
}, \
imm); \
break;
- CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
- CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
- CASE_SIMD_EXTRACT_LANE_OP(I64x2ExtractLane, I64, i64x2_extract_lane)
- CASE_SIMD_EXTRACT_LANE_OP(I32x4ExtractLane, I32, i32x4_extract_lane)
- CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneU, I32, i16x8_extract_lane_u)
- CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneS, I32, i16x8_extract_lane_s)
- CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneU, I32, i8x16_extract_lane_u)
CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneS, I32, i8x16_extract_lane_s)
+ CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneU, I32, i8x16_extract_lane_u)
+ CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneS, I32, i16x8_extract_lane_s)
+ CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneU, I32, i16x8_extract_lane_u)
+ CASE_SIMD_EXTRACT_LANE_OP(I32x4ExtractLane, I32, i32x4_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(I64x2ExtractLane, I64, i64x2_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
+ CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
#undef CASE_SIMD_EXTRACT_LANE_OP
#define CASE_SIMD_REPLACE_LANE_OP(opcode, type, fn) \
case wasm::kExpr##opcode: \
- EmitSimdReplaceLaneOp<ValueType::k##type>( \
+ EmitSimdReplaceLaneOp<k##type>( \
[=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
uint8_t imm_lane_idx) { \
__ emit_##fn(dst, src1, src2, imm_lane_idx); \
}, \
imm); \
break;
- CASE_SIMD_REPLACE_LANE_OP(F64x2ReplaceLane, F64, f64x2_replace_lane)
- CASE_SIMD_REPLACE_LANE_OP(F32x4ReplaceLane, F32, f32x4_replace_lane)
- CASE_SIMD_REPLACE_LANE_OP(I64x2ReplaceLane, I64, i64x2_replace_lane)
- CASE_SIMD_REPLACE_LANE_OP(I32x4ReplaceLane, I32, i32x4_replace_lane)
- CASE_SIMD_REPLACE_LANE_OP(I16x8ReplaceLane, I32, i16x8_replace_lane)
CASE_SIMD_REPLACE_LANE_OP(I8x16ReplaceLane, I32, i8x16_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I16x8ReplaceLane, I32, i16x8_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I32x4ReplaceLane, I32, i32x4_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(I64x2ReplaceLane, I64, i64x2_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(F32x4ReplaceLane, F32, f32x4_replace_lane)
+ CASE_SIMD_REPLACE_LANE_OP(F64x2ReplaceLane, F64, f64x2_replace_lane)
#undef CASE_SIMD_REPLACE_LANE_OP
default:
unsupported(decoder, kSimd, "simd");
@@ -2601,10 +2839,12 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm,
void (LiftoffAssembler::*emit_fn)(Register, Register,
uint32_t, LiftoffRegister,
+ LiftoffRegister,
StoreType)) {
ValueType result_type = type.value_type();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
+#ifdef V8_TARGET_ARCH_IA32
// We have to reuse the value register as the result register so that we
// don't run out of registers on ia32. For this we use the value register
// as the result register if it has no other uses. Otherwise we allocate
@@ -2614,7 +2854,12 @@ class LiftoffCompiler {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
__ Move(result, value, result_type);
pinned.clear(value);
+ value = result;
}
+#else
+ LiftoffRegister result =
+ pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
+#endif
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
@@ -2627,7 +2872,7 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
- (asm_.*emit_fn)(addr, index, offset, result, type);
+ (asm_.*emit_fn)(addr, index, offset, value, result, type);
__ PushRegister(result_type, result);
}
@@ -2688,7 +2933,7 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- if (offset != 0) __ emit_i32_add(index_reg, index_reg, offset);
+ if (offset != 0) __ emit_i32_addi(index_reg, index_reg, offset);
LiftoffAssembler::VarState timeout =
__ cache_state()->stack_state.end()[-1];
@@ -2758,7 +3003,7 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
- if (offset) __ emit_i32_add(index, index, offset);
+ if (offset) __ emit_i32_addi(index, index, offset);
// TODO(ahaas): Use PrepareCall to prepare parameters.
__ SpillAllRegisters();
@@ -3137,6 +3382,52 @@ class LiftoffCompiler {
unsupported(decoder, kAnyRef, "table.fill");
}
+ void StructNew(FullDecoder* decoder,
+ const StructIndexImmediate<validate>& imm, const Value args[],
+ Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "struct.new");
+ }
+ void StructGet(FullDecoder* decoder, const Value& struct_obj,
+ const FieldIndexImmediate<validate>& field, Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "struct.get");
+ }
+ void StructSet(FullDecoder* decoder, const Value& struct_obj,
+ const FieldIndexImmediate<validate>& field,
+ const Value& field_value) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "struct.set");
+ }
+
+ void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ const Value& length, const Value& initial_value,
+ Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "array.new");
+ }
+ void ArrayGet(FullDecoder* decoder, const Value& array_obj,
+ const ArrayIndexImmediate<validate>& imm, const Value& index,
+ Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "array.get");
+ }
+ void ArraySet(FullDecoder* decoder, const Value& array_obj,
+ const ArrayIndexImmediate<validate>& imm, const Value& index,
+ const Value& value) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "array.set");
+ }
+ void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "array.len");
+ }
+
+ void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "");
+ }
+
private:
// Emit additional source positions for return addresses. Used by debugging to
// OSR frames with different sets of breakpoints.
@@ -3164,7 +3455,7 @@ class LiftoffCompiler {
// 0 foo
// 1 nop // top frame return address
// bar
- // {WasmCompiledFrame::position} would then return "0" as the source
+ // {WasmFrame::position} would then return "0" as the source
// position of the top frame instead of "1". This is fixed by explicitly
// emitting the missing position before the return address, with a nop so
// that code offsets do not collide.
@@ -3191,6 +3482,7 @@ class LiftoffCompiler {
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
DebugSideTableBuilder* const debug_sidetable_builder_;
+ const ForDebugging for_debugging_;
LiftoffBailoutReason bailout_reason_ = kSuccess;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
@@ -3239,8 +3531,8 @@ class LiftoffCompiler {
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator* allocator, CompilationEnv* env,
- const FunctionBody& func_body, int func_index, Counters* counters,
- WasmFeatures* detected, Vector<int> breakpoints,
+ const FunctionBody& func_body, int func_index, ForDebugging for_debugging,
+ Counters* counters, WasmFeatures* detected, Vector<int> breakpoints,
std::unique_ptr<DebugSideTable>* debug_sidetable,
Vector<int> extra_source_pos) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
@@ -3269,7 +3561,8 @@ WasmCompilationResult ExecuteLiftoffCompilation(
WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, detected, func_body,
call_descriptor, env, &zone, instruction_buffer->CreateView(),
- debug_sidetable_builder.get(), breakpoints, extra_source_pos);
+ debug_sidetable_builder.get(), for_debugging, breakpoints,
+ extra_source_pos);
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
@@ -3303,6 +3596,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result.func_index = func_index;
result.result_tier = ExecutionTier::kLiftoff;
+ result.for_debugging = for_debugging;
if (debug_sidetable) {
*debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
}
@@ -3322,7 +3616,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
&zone, env->module, env->enabled_features, &detected, func_body,
call_descriptor, env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
- &debug_sidetable_builder);
+ &debug_sidetable_builder, kForDebugging);
decoder.Decode();
DCHECK(decoder.ok());
DCHECK(!decoder.interface().did_bailout());
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 863fa7ee07..434172c4cf 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -45,6 +45,7 @@ enum LiftoffBailoutReason : int8_t {
kAtomics = 10,
kBulkMemory = 11,
kNonTrappingFloatToInt = 12,
+ kGC = 13,
// A little gap, for forward compatibility.
// Any other reason (use rarely; introduce new reasons if this spikes).
kOtherReason = 20,
@@ -54,8 +55,8 @@ enum LiftoffBailoutReason : int8_t {
V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
- Counters*, WasmFeatures* detected_features, Vector<int> breakpoints = {},
- std::unique_ptr<DebugSideTable>* = nullptr,
+ ForDebugging, Counters*, WasmFeatures* detected_features,
+ Vector<int> breakpoints = {}, std::unique_ptr<DebugSideTable>* = nullptr,
Vector<int> extra_source_pos = {});
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 16ad652ade..92fecda7fa 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -183,6 +183,26 @@ class LiftoffRegister {
}
}
+ // Shifts the register code depending on the type before converting to a
+ // LiftoffRegister.
+ static LiftoffRegister from_external_code(RegClass rc, ValueType type,
+ int code) {
+ if (!kSimpleFPAliasing && type == kWasmF32) {
+ // Liftoff assumes a one-to-one mapping between float registers and
+ // double registers, and so does not distinguish between f32 and f64
+ // registers. The f32 register code must therefore be halved in order
+ // to pass the f64 code to Liftoff.
+ DCHECK_EQ(0, code % 2);
+ return LiftoffRegister::from_code(rc, code >> 1);
+ }
+ if (kNeedS128RegPair && type == kWasmS128) {
+ // Similarly for double registers and SIMD registers, the SIMD code
+ // needs to be doubled to pass the f64 code to Liftoff.
+ return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
+ }
+ return LiftoffRegister::from_code(rc, code);
+ }
+
static LiftoffRegister ForPair(Register low, Register high) {
DCHECK(kNeedI64RegPair);
DCHECK_NE(low, high);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 8461e0435f..f24c95008c 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -25,7 +25,7 @@ namespace liftoff {
// 1 | return addr (ra) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM_COMPILED |
+// -1 | 0xa: WASM |
// -2 | instance |
// -----+--------------------+---------------------------
// -3 | slot 0 (high) | ^
@@ -46,12 +46,14 @@ constexpr int32_t kHighWordOffset = 4;
// fp-4 holds the stack marker, fp-8 is the instance parameter.
constexpr int kInstanceOffset = 8;
-inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+inline MemOperand GetStackSlot(int offset) {
+ return MemOperand(offset > 0 ? fp : sp, -offset);
+}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
- return MemOperand(fp, -offset + half_offset);
+ return MemOperand(offset > 0 ? fp : sp, -offset + half_offset);
}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
@@ -540,37 +542,38 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
@@ -590,6 +593,13 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, fp, offset, type);
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, type);
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
@@ -768,10 +778,10 @@ I32_BINOP(xor, xor_)
#undef I32_BINOP
-#define I32_BINOP_I(name, instruction) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- int32_t imm) { \
- instruction(dst, lhs, Operand(imm)); \
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
}
// clang-format off
@@ -801,12 +811,12 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
Register amount) { \
instruction(dst, src, amount); \
}
-#define I32_SHIFTOP_I(name, instruction) \
- I32_SHIFTOP(name, instruction##v) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
- int amount) { \
- DCHECK(is_uint5(amount)); \
- instruction(dst, src, amount); \
+#define I32_SHIFTOP_I(name, instruction) \
+ I32_SHIFTOP(name, instruction##v) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ DCHECK(is_uint5(amount)); \
+ instruction(dst, src, amount); \
}
I32_SHIFTOP_I(shl, sll)
@@ -816,8 +826,8 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
lhs.high_gp(), imm,
kScratchReg, kScratchReg2);
@@ -922,8 +932,8 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShlPair);
}
-void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
UseScratchRegisterScope temps(this);
// {src.low_gp()} will still be needed after writing {dst.high_gp()} and
// {dst.low_gp()}.
@@ -946,8 +956,8 @@ void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::SarPair);
}
-void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
UseScratchRegisterScope temps(this);
// {src.high_gp()} will still be needed after writing {dst.high_gp()} and
// {dst.low_gp()}.
@@ -965,8 +975,8 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair);
}
-void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
UseScratchRegisterScope temps(this);
// {src.high_gp()} will still be needed after writing {dst.high_gp()} and
// {dst.low_gp()}.
@@ -1532,328 +1542,707 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
bailout(kSimd, "emit_f64x2_splat");
}
-void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_extract_lane");
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
}
-void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_replace_lane");
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
}
-void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_add");
+ bailout(kSimd, "emit_i8x16_shl");
}
-void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_sub");
+ bailout(kSimd, "emit_i8x16_add");
}
-void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_saturate_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_saturate_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_mul");
+ bailout(kSimd, "emit_i8x16_sub");
}
-void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_splat");
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_saturate_s");
}
-void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_extract_lane");
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_saturate_u");
}
-void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_replace_lane");
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_mul");
}
-void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_add");
+ bailout(kSimd, "emit_i16x8_shl");
}
-void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_sub");
+ bailout(kSimd, "emit_i16x8_add");
}
-void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_saturate_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_saturate_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_mul");
+ bailout(kSimd, "emit_i16x8_sub");
}
-void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_splat");
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_saturate_s");
}
-void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_extract_lane");
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_saturate_u");
}
-void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_replace_lane");
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
bailout(kSimd, "emit_i64x2_add");
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
bailout(kSimd, "emit_i64x2_sub");
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
bailout(kSimd, "emit_i64x2_mul");
}
-void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_splat");
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
}
-void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_extract_lane");
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
}
-void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_replace_lane");
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
}
-void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_add");
+ bailout(kSimd, "emit_f32x4_add");
}
-void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_sub");
+ bailout(kSimd, "emit_f32x4_sub");
}
-void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_mul");
+ bailout(kSimd, "emit_f32x4_mul");
}
-void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_splat");
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
}
-void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_extract_lane_u");
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
}
-void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_extract_lane_s");
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
}
-void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_replace_lane");
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
}
-void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_add");
+ bailout(kSimd, "emit_f64x2_add");
}
-void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_sub");
+ bailout(kSimd, "emit_f64x2_sub");
}
-void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_mul");
+ bailout(kSimd, "emit_f64x2_mul");
}
-void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_splat");
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
}
void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
bailout(kSimd, "emit_i8x16_extract_lane_u");
}
-void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_extract_lane_s");
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
bailout(kSimd, "emit_i8x16_replace_lane");
}
-void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_add");
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
}
-void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_sub");
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_mul");
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index a3f8d60680..292f8032b8 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -27,7 +27,7 @@ namespace liftoff {
// 1 | return addr (ra) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM_COMPILED |
+// -1 | 0xa: WASM |
// -2 | instance |
// -----+--------------------+---------------------------
// -3 | slot 0 | ^
@@ -42,7 +42,9 @@ namespace liftoff {
// fp-8 holds the stack marker, fp-16 is the instance parameter.
constexpr int kInstanceOffset = 16;
-inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+inline MemOperand GetStackSlot(int offset) {
+ return MemOperand(offset > 0 ? fp : sp, -offset);
+}
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
@@ -61,6 +63,9 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case ValueType::kF64:
assm->Ldc1(dst.fp(), src);
break;
+ case ValueType::kS128:
+ assm->ld_b(dst.fp().toW(), src);
+ break;
default:
UNREACHABLE();
}
@@ -104,6 +109,10 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
+ case ValueType::kS128:
+ assm->daddiu(sp, sp, -kSystemPointerSize * 2);
+ assm->st_b(reg.fp().toW(), MemOperand(sp, 0));
+ break;
default:
UNREACHABLE();
}
@@ -374,6 +383,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kF64Load:
TurboAssembler::Uldc1(dst.fp(), src_op, t8);
break;
+ case LoadType::kS128Load:
+ TurboAssembler::ld_b(dst.fp().toW(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -437,6 +449,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kF64Store:
TurboAssembler::Usdc1(src.fp(), dst_op, t8);
break;
+ case StoreType::kS128Store:
+ TurboAssembler::st_b(src.fp().toW(), dst_op);
+ break;
default:
UNREACHABLE();
}
@@ -456,37 +471,38 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
@@ -506,6 +522,13 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type);
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, type);
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
@@ -523,7 +546,11 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
DCHECK_NE(dst, src);
- TurboAssembler::Move(dst, src);
+ if (type != kWasmS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ TurboAssembler::move_v(dst.toW(), src.toW());
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
@@ -542,6 +569,9 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
+ case ValueType::kS128:
+ TurboAssembler::st_b(reg.fp().toW(), dst);
+ break;
default:
UNREACHABLE();
}
@@ -585,6 +615,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
case ValueType::kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
+ case ValueType::kS128:
+ TurboAssembler::ld_b(reg.fp().toW(), src);
+ break;
default:
UNREACHABLE();
}
@@ -695,10 +728,10 @@ I32_BINOP(xor, xor_)
#undef I32_BINOP
-#define I32_BINOP_I(name, instruction) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- int32_t imm) { \
- instruction(dst, lhs, Operand(imm)); \
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
}
// clang-format off
@@ -728,12 +761,12 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
Register amount) { \
instruction(dst, src, amount); \
}
-#define I32_SHIFTOP_I(name, instruction) \
- I32_SHIFTOP(name, instruction##v) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
- int amount) { \
- DCHECK(is_uint5(amount)); \
- instruction(dst, src, amount); \
+#define I32_SHIFTOP_I(name, instruction) \
+ I32_SHIFTOP(name, instruction##v) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ DCHECK(is_uint5(amount)); \
+ instruction(dst, src, amount); \
}
I32_SHIFTOP_I(shl, sll)
@@ -808,10 +841,10 @@ I64_BINOP(xor, xor_)
#undef I64_BINOP
-#define I64_BINOP_I(name, instruction) \
- void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
- LiftoffRegister lhs, int32_t imm) { \
- instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
}
// clang-format off
@@ -828,15 +861,15 @@ I64_BINOP_I(xor, Xor)
LiftoffRegister dst, LiftoffRegister src, Register amount) { \
instruction(dst.gp(), src.gp(), amount); \
}
-#define I64_SHIFTOP_I(name, instruction) \
- I64_SHIFTOP(name, instruction##v) \
- void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
- LiftoffRegister src, int amount) { \
- DCHECK(is_uint6(amount)); \
- if (amount < 32) \
- instruction(dst.gp(), src.gp(), amount); \
- else \
- instruction##32(dst.gp(), src.gp(), amount - 32); \
+#define I64_SHIFTOP_I(name, instruction) \
+ I64_SHIFTOP(name, instruction##v) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ DCHECK(is_uint6(amount)); \
+ if (amount < 32) \
+ instruction(dst.gp(), src.gp(), amount); \
+ else \
+ instruction##32(dst.gp(), src.gp(), amount - 32); \
}
I64_SHIFTOP_I(shl, dsll)
@@ -1324,328 +1357,838 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fill_b(dst.fp().toW(), src.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fill_h(dst.fp().toW(), src.gp());
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fill_w(dst.fp().toW(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fill_d(dst.fp().toW(), src.gp());
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::FmoveLow(kScratchReg, src.fp());
+ fill_w(dst.fp().toW(), kScratchReg);
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_splat");
+ TurboAssembler::Move(kScratchReg, src.fp());
+ fill_d(dst.fp().toW(), kScratchReg);
}
-void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_exract_lane");
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_replace_lane");
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+ nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
}
-void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_s_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_u_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_s_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_u_b(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+ nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_s_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_u_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_s_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_u_h(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+ nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_s_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_u_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_s_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_u_w(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fceq_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fcune_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fclt_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fcle_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fcune_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fclt_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fcle_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ nor_v(dst.fp().toW(), src.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ and_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ or_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ xor_v(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ nor_v(kSimd128ScratchReg, rhs.fp().toW(), rhs.fp().toW());
+ and_v(dst.fp().toW(), kSimd128ScratchReg, lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ if (dst == mask) {
+ bsel_v(dst.fp().toW(), src2.fp().toW(), src1.fp().toW());
+ } else {
+ xor_v(kSimd128ScratchReg, src1.fp().toW(), src2.fp().toW());
+ and_v(kSimd128ScratchReg, kSimd128ScratchReg, mask.fp().toW());
+ xor_v(dst.fp().toW(), kSimd128ScratchReg, src2.fp().toW());
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_add");
+ fill_b(kSimd128ScratchReg, rhs.gp());
+ sll_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
}
-void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ slli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_sub");
+ addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ adds_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f64x2_mul");
+ subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_splat");
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ subs_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_exract_lane");
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_replace_lane");
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ mulv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ min_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ min_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ max_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ max_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_add");
+ fill_h(kSimd128ScratchReg, rhs.gp());
+ sll_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
}
-void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ slli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_sub");
+ addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ adds_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_f32x4_mul");
+ subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_splat");
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ subs_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_exract_lane");
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_replace_lane");
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ mulv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ min_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ min_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ max_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ max_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_w(kSimd128ScratchReg, rhs.gp());
+ sll_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ slli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ addv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ subv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ mulv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ min_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ min_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ max_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ max_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_d(kSimd128ScratchReg, rhs.gp());
+ sll_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ slli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_add");
+ addv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_sub");
+ subv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i64x2_mul");
+ mulv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_splat");
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bclri_w(dst.fp().toW(), src.fp().toW(), 31);
}
-void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_exract_lane");
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bnegi_w(dst.fp().toW(), src.fp().toW(), 31);
}
-void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_replace_lane");
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fsqrt_w(dst.fp().toW(), src.fp().toW());
}
-void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_add");
+ fadd_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_sub");
+ fsub_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i32x4_mul");
+ fmul_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_splat");
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fdiv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_exract_lane_u");
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
+ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
+ fseq_w(scratch0, lhs_msa, rhs_msa);
+ bsel_v(scratch0, rhs_msa, lhs_msa);
+ or_v(scratch1, scratch0, rhs_msa);
+ // scratch0 = isNaN(scratch1) ? scratch1: lhs.
+ fseq_w(scratch0, scratch1, scratch1);
+ bsel_v(scratch0, scratch1, lhs_msa);
+ // dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
+ fsle_w(dst_msa, scratch1, scratch0);
+ bsel_v(dst_msa, scratch0, scratch1);
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
+ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
+ fseq_w(scratch0, lhs_msa, rhs_msa);
+ bsel_v(scratch0, rhs_msa, lhs_msa);
+ and_v(scratch1, scratch0, rhs_msa);
+ // scratch0 = isNaN(scratch1) ? scratch1: lhs.
+ fseq_w(scratch0, scratch1, scratch1);
+ bsel_v(scratch0, scratch1, lhs_msa);
+ // dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
+ fsle_w(dst_msa, scratch0, scratch1);
+ bsel_v(dst_msa, scratch0, scratch1);
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bclri_d(dst.fp().toW(), src.fp().toW(), 63);
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bnegi_d(dst.fp().toW(), src.fp().toW(), 63);
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fsqrt_d(dst.fp().toW(), src.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_exract_lane_s");
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fadd_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_replace_lane");
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fsub_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_add");
+ fmul_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_sub");
+ fdiv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
+ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
+ fseq_d(scratch0, lhs_msa, rhs_msa);
+ bsel_v(scratch0, rhs_msa, lhs_msa);
+ or_v(scratch1, scratch0, rhs_msa);
+ // scratch0 = isNaN(scratch1) ? scratch1: lhs.
+ fseq_d(scratch0, scratch1, scratch1);
+ bsel_v(scratch0, scratch1, lhs_msa);
+ // dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
+ fsle_d(dst_msa, scratch1, scratch0);
+ bsel_v(dst_msa, scratch0, scratch1);
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i16x8_mul");
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
+ // scratch1 = (lhs == rhs) ? (lhs | rhs) : (rhs | rhs).
+ fseq_d(scratch0, lhs_msa, rhs_msa);
+ bsel_v(scratch0, rhs_msa, lhs_msa);
+ and_v(scratch1, scratch0, rhs_msa);
+ // scratch0 = isNaN(scratch1) ? scratch1: lhs.
+ fseq_d(scratch0, scratch1, scratch1);
+ bsel_v(scratch0, scratch1, lhs_msa);
+ // dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
+ fsle_d(dst_msa, scratch0, scratch1);
+ bsel_v(dst_msa, scratch0, scratch1);
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ sat_s_h(kSimd128ScratchReg, lhs.fp().toW(), 7);
+ sat_s_h(dst.fp().toW(), lhs.fp().toW(), 7);
+ pckev_b(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
}
-void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_splat");
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ max_s_h(kSimd128ScratchReg, kSimd128RegZero, lhs.fp().toW());
+ sat_u_h(kSimd128ScratchReg, kSimd128ScratchReg, 7);
+ max_s_h(dst.fp().toW(), kSimd128RegZero, rhs.fp().toW());
+ sat_u_h(dst.fp().toW(), dst.fp().toW(), 7);
+ pckev_b(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ sat_s_w(kSimd128ScratchReg, lhs.fp().toW(), 15);
+ sat_s_w(dst.fp().toW(), lhs.fp().toW(), 15);
+ pckev_h(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ max_s_w(kSimd128ScratchReg, kSimd128RegZero, lhs.fp().toW());
+ sat_u_w(kSimd128ScratchReg, kSimd128ScratchReg, 15);
+ max_s_w(dst.fp().toW(), kSimd128RegZero, rhs.fp().toW());
+ sat_u_w(dst.fp().toW(), dst.fp().toW(), 15);
+ pckev_h(dst.fp().toW(), dst.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvr_b(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_h(dst.fp().toW(), kSimd128ScratchReg, 8);
+ srai_h(dst.fp().toW(), dst.fp().toW(), 8);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvl_b(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_h(dst.fp().toW(), kSimd128ScratchReg, 8);
+ srai_h(dst.fp().toW(), dst.fp().toW(), 8);
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvl_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvr_h(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_w(dst.fp().toW(), kSimd128ScratchReg, 16);
+ srai_w(dst.fp().toW(), dst.fp().toW(), 16);
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvl_h(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_w(dst.fp().toW(), kSimd128ScratchReg, 16);
+ srai_w(dst.fp().toW(), dst.fp().toW(), 16);
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvl_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ aver_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ aver_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ asub_s_b(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ asub_s_h(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ asub_s_w(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ copy_s_b(dst.gp(), lhs.fp().toW(), imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_exract_lane_u");
+ copy_u_b(dst.gp(), lhs.fp().toW(), imm_lane_idx);
}
-void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ copy_s_h(dst.gp(), lhs.fp().toW(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_exract_lane_s");
+ copy_u_h(dst.gp(), lhs.fp().toW(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ copy_s_w(dst.gp(), lhs.fp().toW(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ copy_s_d(dst.gp(), lhs.fp().toW(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx);
+ TurboAssembler::FmoveLow(dst.fp(), kScratchReg);
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx);
+ TurboAssembler::Move(dst.fp(), kScratchReg);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_replace_lane");
+ if (dst != src1) {
+ move_v(dst.fp().toW(), src1.fp().toW());
+ }
+ insert_b(dst.fp().toW(), imm_lane_idx, src2.gp());
}
-void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_add");
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ move_v(dst.fp().toW(), src1.fp().toW());
+ }
+ insert_h(dst.fp().toW(), imm_lane_idx, src2.gp());
}
-void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_sub");
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ move_v(dst.fp().toW(), src1.fp().toW());
+ }
+ insert_w(dst.fp().toW(), imm_lane_idx, src2.gp());
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- // TODO(mips): Support this on loongson 3a4000. Currently, the main MIPS
- // CPU, Loongson 3a3000 does not support MSA(simd128), but the upcoming
- // 3a4000 support MSA.
- bailout(kSimd, "emit_i8x16_mul");
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (dst != src1) {
+ move_v(dst.fp().toW(), src1.fp().toW());
+ }
+ insert_d(dst.fp().toW(), imm_lane_idx, src2.gp());
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ TurboAssembler::FmoveLow(kScratchReg, src2.fp());
+ if (dst != src1) {
+ move_v(dst.fp().toW(), src1.fp().toW());
+ }
+ insert_w(dst.fp().toW(), imm_lane_idx, kScratchReg);
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ TurboAssembler::Move(kScratchReg, src2.fp());
+ if (dst != src1) {
+ move_v(dst.fp().toW(), src1.fp().toW());
+ }
+ insert_d(dst.fp().toW(), imm_lane_idx, kScratchReg);
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -1679,15 +2222,20 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) {
- daddiu(sp, sp, -(num_fp_regs * kStackSlotSize));
+ unsigned slot_size = IsEnabled(MIPS_SIMD) ? 16 : 8;
+ daddiu(sp, sp, -(num_fp_regs * slot_size));
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
+ if (IsEnabled(MIPS_SIMD)) {
+ TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset));
+ } else {
+ TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
+ }
fp_regs.clear(reg);
- offset += sizeof(double);
+ offset += slot_size;
}
- DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ DCHECK_EQ(offset, num_fp_regs * slot_size);
}
}
@@ -1696,9 +2244,13 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
unsigned fp_offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
+ if (IsEnabled(MIPS_SIMD)) {
+ TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset));
+ } else {
+ TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
+ }
fp_regs.clear(reg);
- fp_offset += sizeof(double);
+ fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8);
}
if (fp_offset) daddiu(sp, sp, fp_offset);
LiftoffRegList gp_regs = regs & kGpCacheRegList;
@@ -1796,8 +2348,15 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
- asm_->push(kScratchReg);
+ if (src.type() != kWasmS128) {
+ asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchReg);
+ } else {
+ asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->push(kScratchReg);
+ asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchReg);
+ }
break;
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 3b5a5f3a40..e02ab95ae4 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -26,7 +26,7 @@ namespace liftoff {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM_COMPILED |
+// -1 | 0xa: WASM |
// -2 | instance |
// -----+--------------------+---------------------------
// -3 | slot 0 (high) | ^
@@ -142,37 +142,38 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
@@ -191,6 +192,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
@@ -264,22 +271,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
Register rhs) { \
bailout(kUnsupportedArchitecture, "i32 binop:: " #name); \
}
-#define UNIMPLEMENTED_I32_BINOP_I(name) \
- UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
+#define UNIMPLEMENTED_I32_BINOP_I(name) \
+ UNIMPLEMENTED_I32_BINOP(name) \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
-#define UNIMPLEMENTED_I64_BINOP_I(name) \
- UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i64_i binop: " #name); \
+#define UNIMPLEMENTED_I64_BINOP_I(name) \
+ UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
+ LiftoffRegister lhs, int32_t imm) { \
+ bailout(kUnsupportedArchitecture, "i64_i binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src) { \
@@ -299,22 +306,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
-#define UNIMPLEMENTED_I32_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
+#define UNIMPLEMENTED_I32_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount) { \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
+ } \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
+ int32_t amount) { \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount) { \
bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
} \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- int32_t amount) { \
+ void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int32_t amount) { \
bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
@@ -550,6 +557,21 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_f64x2replacelane");
}
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2sqrt");
+}
+
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64x2add");
@@ -565,6 +587,21 @@ void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_f64x2mul");
}
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2max");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
@@ -583,6 +620,21 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_f32x4replacelane");
}
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4sqrt");
+}
+
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4add");
@@ -598,6 +650,21 @@ void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_f32x4mul");
}
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4max");
+}
+
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64x2splat");
@@ -616,6 +683,21 @@ void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2replacelane");
}
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2neg");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i64x2_shli");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64x2add");
@@ -649,6 +731,21 @@ void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4replacelane");
}
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4neg");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i32x4_shli");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4add");
@@ -664,26 +761,113 @@ void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i32x4mul");
}
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
}
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8neg");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i16x8_shli");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
+}
+
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
+}
+
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_max_u");
+}
+
void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -721,6 +905,21 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16replacelane");
}
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16neg");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i8x16_shli");
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -732,16 +931,317 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_32x4gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kUnsupportedArchitecture, "emit_s128select");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
+}
+
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
+}
+
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
bailout(kUnsupportedArchitecture, "StackCheck");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index e311677e79..704fcb81d7 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -26,7 +26,7 @@ namespace liftoff {
// 1 | return addr (lr) |
// 0 | previous frame (fp)|
// -----+--------------------+ <-- frame ptr (fp)
-// -1 | 0xa: WASM_COMPILED |
+// -1 | 0xa: WASM |
// -2 | instance |
// -----+--------------------+---------------------------
// -3 | slot 0 (high) | ^
@@ -141,37 +141,38 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
@@ -190,6 +191,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
@@ -268,22 +275,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
Register rhs) { \
bailout(kUnsupportedArchitecture, "i32 binop: " #name); \
}
-#define UNIMPLEMENTED_I32_BINOP_I(name) \
- UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
+#define UNIMPLEMENTED_I32_BINOP_I(name) \
+ UNIMPLEMENTED_I32_BINOP(name) \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
-#define UNIMPLEMENTED_I64_BINOP_I(name) \
- UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
+#define UNIMPLEMENTED_I64_BINOP_I(name) \
+ UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
+ LiftoffRegister lhs, int32_t imm) { \
+ bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src) { \
@@ -303,22 +310,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
-#define UNIMPLEMENTED_I32_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
+#define UNIMPLEMENTED_I32_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount) { \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
+ } \
+ void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
+ int32_t amount) { \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount) { \
bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
} \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- int32_t amount) { \
+ void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int32_t amount) { \
bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
@@ -554,6 +561,21 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_f64x2replacelane");
}
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2sqrt");
+}
+
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64x2add");
@@ -569,6 +591,21 @@ void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_f64x2mul");
}
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2max");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
@@ -587,6 +624,21 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_f32x4replacelane");
}
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4sqrt");
+}
+
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4add");
@@ -602,6 +654,21 @@ void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_f32x4mul");
}
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4max");
+}
+
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64x2splat");
@@ -620,6 +687,21 @@ void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2replacelane");
}
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i64x2neg");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i64x2_shli");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64x2add");
@@ -653,6 +735,21 @@ void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4replacelane");
}
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4neg");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i32x4_shli");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4add");
@@ -668,26 +765,113 @@ void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i32x4mul");
}
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
}
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8neg");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i16x8_shli");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
+}
+
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
+}
+
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_max_u");
+}
+
void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -731,21 +915,337 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16replacelane");
}
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16neg");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "i8x16_shli");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
+}
+
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
+}
+
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kUnsupportedArchitecture, "emit_s128select");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
bailout(kUnsupportedArchitecture, "StackCheck");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 6c58625536..7638c4f9cc 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -36,7 +36,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// rbp-8 holds the stack marker, rbp-16 is the instance parameter.
constexpr int kInstanceOffset = 16;
-inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
+inline Operand GetStackSlot(int offset) {
+ return Operand(offset > 0 ? rbp : rsp, -offset);
+}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
@@ -122,13 +124,6 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
-template <typename... Regs>
-inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
- for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
- if (assm->cache_state()->is_used(r)) assm->SpillRegister(r);
- }
-}
-
constexpr int kSubSpSize = 7; // 7 bytes for "subq rsp, <imm32>"
} // namespace liftoff
@@ -390,8 +385,15 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- DCHECK(!cache_state()->is_used(value));
+ LiftoffRegister result, StoreType type) {
+ DCHECK(!cache_state()->is_used(result));
+ if (cache_state()->is_used(value)) {
+ // We cannot overwrite {value}, but the {value} register is changed in the
+ // code we generate. Therefore we copy {value} to {result} and use the
+ // {result} register in the code below.
+ movq(result.gp(), value.gp());
+ value = result;
+ }
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
@@ -401,19 +403,25 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
case StoreType::kI32Store8:
case StoreType::kI64Store8:
xaddb(dst_op, value.gp());
- movzxbq(value.gp(), value.gp());
+ movzxbq(result.gp(), value.gp());
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
xaddw(dst_op, value.gp());
- movzxwq(value.gp(), value.gp());
+ movzxwq(result.gp(), value.gp());
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
xaddl(dst_op, value.gp());
+ if (value != result) {
+ movq(result.gp(), value.gp());
+ }
break;
case StoreType::kI64Store:
xaddq(dst_op, value.gp());
+ if (value != result) {
+ movq(result.gp(), value.gp());
+ }
break;
default:
UNREACHABLE();
@@ -422,8 +430,15 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- DCHECK(!cache_state()->is_used(value));
+ LiftoffRegister result, StoreType type) {
+ DCHECK(!cache_state()->is_used(result));
+ if (cache_state()->is_used(value)) {
+ // We cannot overwrite {value}, but the {value} register is changed in the
+ // code we generate. Therefore we copy {value} to {result} and use the
+ // {result} register in the code below.
+ movq(result.gp(), value.gp());
+ value = result;
+ }
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
@@ -434,25 +449,31 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
negb(value.gp());
lock();
xaddb(dst_op, value.gp());
- movzxbq(value.gp(), value.gp());
+ movzxbq(result.gp(), value.gp());
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
negw(value.gp());
lock();
xaddw(dst_op, value.gp());
- movzxwq(value.gp(), value.gp());
+ movzxwq(result.gp(), value.gp());
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
negl(value.gp());
lock();
xaddl(dst_op, value.gp());
+ if (value != result) {
+ movq(result.gp(), value.gp());
+ }
break;
case StoreType::kI64Store:
negq(value.gp());
lock();
xaddq(dst_op, value.gp());
+ if (value != result) {
+ movq(result.gp(), value.gp());
+ }
break;
default:
UNREACHABLE();
@@ -461,40 +482,21 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
namespace liftoff {
#define __ lasm->
-// Checks if a register in {possible_uses} uses {reg}. If so, it allocates a
-// replacement register for that use, and moves the content of {reg} to {use}.
-// The replacement register is written into the pointer stored in
-// {possible_uses}.
-inline void ClearRegister(LiftoffAssembler* lasm, Register reg,
- std::initializer_list<Register*> possible_uses,
- LiftoffRegList pinned) {
- liftoff::SpillRegisters(lasm, reg);
- Register replacement = no_reg;
- for (Register* use : possible_uses) {
- if (reg != *use) continue;
- if (replacement == no_reg) {
- replacement = __ GetUnusedRegister(kGpReg, pinned).gp();
- __ movq(replacement, reg);
- }
- // We cannot leave this loop early. There may be multiple uses of {reg}.
- *use = replacement;
- }
-}
inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
- DCHECK(!__ cache_state()->is_used(value));
+ LiftoffRegister result, StoreType type) {
+ DCHECK(!__ cache_state()->is_used(result));
Register value_reg = value.gp();
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
- ClearRegister(lasm, rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
+ __ ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (__ emit_debug_code() && offset_reg != no_reg) {
__ AssertZeroExtended(offset_reg);
}
@@ -554,8 +556,8 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
UNREACHABLE();
}
- if (value.gp() != rax) {
- __ movq(value.gp(), rax);
+ if (result.gp() != rax) {
+ __ movq(result.gp(), rax);
}
}
#undef __
@@ -563,29 +565,37 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::andl, &Assembler::andq, dst_addr,
- offset_reg, offset_imm, value, type);
+ offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::orl, &Assembler::orq, dst_addr,
- offset_reg, offset_imm, value, type);
+ offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
- StoreType type) {
+ LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::xorl, &Assembler::xorq, dst_addr,
- offset_reg, offset_imm, value, type);
+ offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
- LiftoffRegister value, StoreType type) {
- DCHECK(!cache_state()->is_used(value));
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ DCHECK(!cache_state()->is_used(result));
+ if (cache_state()->is_used(value)) {
+ // We cannot overwrite {value}, but the {value} register is changed in the
+ // code we generate. Therefore we copy {value} to {result} and use the
+ // {result} register in the code below.
+ movq(result.gp(), value.gp());
+ value = result;
+ }
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
@@ -594,19 +604,25 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
case StoreType::kI32Store8:
case StoreType::kI64Store8:
xchgb(value.gp(), dst_op);
- movzxbq(value.gp(), value.gp());
+ movzxbq(result.gp(), value.gp());
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
xchgw(value.gp(), dst_op);
- movzxwq(value.gp(), value.gp());
+ movzxwq(result.gp(), value.gp());
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
xchgl(value.gp(), dst_op);
+ if (value != result) {
+ movq(result.gp(), value.gp());
+ }
break;
case StoreType::kI64Store:
xchgq(value.gp(), dst_op);
+ if (value != result) {
+ movq(result.gp(), value.gp());
+ }
break;
default:
UNREACHABLE();
@@ -623,8 +639,7 @@ void LiftoffAssembler::AtomicCompareExchange(
// move any use to another register.
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, value_reg);
- liftoff::ClearRegister(this, rax, {&dst_addr, &offset_reg, &value_reg},
- pinned);
+ ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (expected.gp() != rax) {
movq(rax, expected.gp());
}
@@ -682,6 +697,13 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type);
}
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ Operand dst(rbp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Store(this, dst, src, type);
+}
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
@@ -840,7 +862,7 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_addi(Register dst, Register lhs, int32_t imm) {
if (lhs != dst) {
leal(dst, Operand(lhs, imm));
} else {
@@ -916,7 +938,7 @@ void EmitIntDivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// another temporary register.
// Do all this before any branch, such that the code is executed
// unconditionally, as the cache state will also be modified unconditionally.
- liftoff::SpillRegisters(assm, rdx, rax);
+ assm->SpillRegisters(rdx, rax);
if (rhs == rax || rhs == rdx) {
iop(mov, kScratchRegister, rhs);
rhs = kScratchRegister;
@@ -1006,7 +1028,7 @@ void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
lhs, rhs);
}
-void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_andi(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::andl, &Assembler::movl>(
this, dst, lhs, imm);
}
@@ -1016,7 +1038,7 @@ void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
lhs, rhs);
}
-void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_ori(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::orl, &Assembler::movl>(this, dst,
lhs, imm);
}
@@ -1026,7 +1048,7 @@ void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
lhs, rhs);
}
-void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
+void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::xorl, &Assembler::movl>(
this, dst, lhs, imm);
}
@@ -1071,8 +1093,8 @@ void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
&Assembler::shll_cl);
}
-void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
- int32_t amount) {
+void LiftoffAssembler::emit_i32_shli(Register dst, Register src,
+ int32_t amount) {
if (dst != src) movl(dst, src);
shll(dst, Immediate(amount & 31));
}
@@ -1083,8 +1105,8 @@ void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
&Assembler::sarl_cl);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
- int32_t amount) {
+void LiftoffAssembler::emit_i32_sari(Register dst, Register src,
+ int32_t amount) {
if (dst != src) movl(dst, src);
sarl(dst, Immediate(amount & 31));
}
@@ -1095,8 +1117,8 @@ void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
&Assembler::shrl_cl);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
- int32_t amount) {
+void LiftoffAssembler::emit_i32_shri(Register dst, Register src,
+ int32_t amount) {
if (dst != src) movl(dst, src);
shrl(dst, Immediate(amount & 31));
}
@@ -1125,8 +1147,8 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
if (lhs.gp() != dst.gp()) {
leaq(dst.gp(), Operand(lhs.gp(), imm));
} else {
@@ -1191,8 +1213,8 @@ void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
this, dst.gp(), lhs.gp(), rhs.gp());
}
-void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::andq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), imm);
}
@@ -1203,8 +1225,8 @@ void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
this, dst.gp(), lhs.gp(), rhs.gp());
}
-void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::orq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), imm);
}
@@ -1215,8 +1237,8 @@ void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
this, dst.gp(), lhs.gp(), rhs.gp());
}
-void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::xorq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), imm);
}
@@ -1227,8 +1249,8 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
&Assembler::shlq_cl);
}
-void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
if (dst.gp() != src.gp()) movq(dst.gp(), src.gp());
shlq(dst.gp(), Immediate(amount & 63));
}
@@ -1239,8 +1261,8 @@ void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
&Assembler::sarq_cl);
}
-void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
if (dst.gp() != src.gp()) movq(dst.gp(), src.gp());
sarq(dst.gp(), Immediate(amount & 63));
}
@@ -1251,8 +1273,8 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&Assembler::shrq_cl);
}
-void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
- int32_t amount) {
+void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
+ int32_t amount) {
if (dst != src) movq(dst.gp(), src.gp());
shrq(dst.gp(), Immediate(amount & 63));
}
@@ -1900,14 +1922,19 @@ void EmitSimdCommutativeBinOp(
template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
-void EmitSimdSub(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
+void EmitSimdNonCommutativeBinOp(
+ LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs, base::Optional<CpuFeature> feature = base::nullopt) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
(assm->*avx_op)(dst.fp(), lhs.fp(), rhs.fp());
- } else if (lhs.fp() == rhs.fp()) {
- assm->pxor(dst.fp(), dst.fp());
- } else if (dst.fp() == rhs.fp()) {
+ return;
+ }
+
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
+ if (dst.fp() == rhs.fp()) {
assm->movaps(kScratchDoubleReg, rhs.fp());
assm->movaps(dst.fp(), lhs.fp());
(assm->*sse_op)(dst.fp(), kScratchDoubleReg);
@@ -1916,138 +1943,733 @@ void EmitSimdSub(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*sse_op)(dst.fp(), rhs.fp());
}
}
+
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
+ void (Assembler::*sse_op)(XMMRegister, XMMRegister), uint8_t width>
+void EmitSimdShiftOp(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister operand, LiftoffRegister count) {
+ constexpr int mask = (1 << width) - 1;
+ assm->movq(kScratchRegister, count.gp());
+ assm->andq(kScratchRegister, Immediate(mask));
+ assm->Movq(kScratchDoubleReg, kScratchRegister);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), operand.fp(), kScratchDoubleReg);
+ } else {
+ if (dst.fp() != operand.fp()) assm->movaps(dst.fp(), operand.fp());
+ (assm->*sse_op)(dst.fp(), kScratchDoubleReg);
+ }
+}
+
+template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, byte),
+ void (Assembler::*sse_op)(XMMRegister, byte), uint8_t width>
+void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister operand, int32_t count) {
+ constexpr int mask = (1 << width) - 1;
+ byte shift = static_cast<byte>(count & mask);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx_op)(dst.fp(), operand.fp(), shift);
+ } else {
+ if (dst.fp() != operand.fp()) assm->movaps(dst.fp(), operand.fp());
+ (assm->*sse_op)(dst.fp(), shift);
+ }
+}
} // namespace liftoff
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ Pshufb(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movd(dst.fp(), src.gp());
+ Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movq(dst.fp(), src.gp());
+ Movddup(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() != src.fp()) {
+ Movss(dst.fp(), src.fp());
+ }
+ Shufps(dst.fp(), src.fp(), static_cast<byte>(0));
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movddup(dst.fp(), src.fp());
}
-void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrq(kScratchRegister, lhs.fp(), static_cast<int8_t>(imm_lane_idx));
- Movq(dst.fp(), kScratchRegister);
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqb, &Assembler::pcmpeqb>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqb, &Assembler::pcmpeqb>(
+ this, dst, lhs, rhs);
+ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpcmpgtb,
+ &Assembler::pcmpgtb>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxub, &Assembler::pmaxub>(
+ this, dst, lhs, rhs);
+ Pcmpeqb(dst.fp(), ref);
+ Pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsb, &Assembler::pminsb>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqb(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminub, &Assembler::pminub>(
+ this, dst, lhs, rhs);
+ Pcmpeqb(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqw, &Assembler::pcmpeqw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqw, &Assembler::pcmpeqw>(
+ this, dst, lhs, rhs);
+ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpcmpgtw,
+ &Assembler::pcmpgtw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxuw, &Assembler::pmaxuw>(
+ this, dst, lhs, rhs);
+ Pcmpeqw(dst.fp(), ref);
+ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsw, &Assembler::pminsw>(
+ this, dst, lhs, rhs);
+ Pcmpeqw(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminuw, &Assembler::pminuw>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqw(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqd, &Assembler::pcmpeqd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqd, &Assembler::pcmpeqd>(
+ this, dst, lhs, rhs);
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpcmpgtd,
+ &Assembler::pcmpgtd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxud, &Assembler::pmaxud>(
+ this, dst, lhs, rhs);
+ Pcmpeqd(dst.fp(), ref);
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsd, &Assembler::pminsd>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqd(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ DoubleRegister ref = rhs.fp();
+ if (dst == rhs) {
+ Movaps(kScratchDoubleReg, rhs.fp());
+ ref = kScratchDoubleReg;
+ }
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminud, &Assembler::pminud>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqd(dst.fp(), ref);
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqps, &Assembler::cmpeqps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpneqps,
+ &Assembler::cmpneqps>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmpltps,
+ &Assembler::cmpltps>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmpleps,
+ &Assembler::cmpleps>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqpd, &Assembler::cmpeqpd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpneqpd,
+ &Assembler::cmpneqpd>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmpltpd,
+ &Assembler::cmpltpd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vcmplepd,
+ &Assembler::cmplepd>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ if (dst.fp() != src.fp()) {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Pxor(dst.fp(), src.fp());
+ } else {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+ }
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpand, &Assembler::pand>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpor, &Assembler::por>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpxor, &Assembler::pxor>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- if (imm_lane_idx == 0) {
- vpblendw(dst.fp(), src1.fp(), src2.fp(), 0b00001111);
- } else {
- vmovlhps(dst.fp(), src1.fp(), src2.fp());
- }
+ vxorps(kScratchDoubleReg, src1.fp(), src2.fp());
+ vandps(kScratchDoubleReg, kScratchDoubleReg, mask.fp());
+ vxorps(dst.fp(), kScratchDoubleReg, src2.fp());
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- if (imm_lane_idx == 0) {
- pblendw(dst.fp(), src2.fp(), 0b00001111);
- } else {
- movlhps(dst.fp(), src2.fp());
- }
+ movaps(kScratchDoubleReg, src1.fp());
+ xorps(kScratchDoubleReg, src2.fp());
+ andps(kScratchDoubleReg, mask.fp());
+ if (dst.fp() != src2.fp()) movaps(dst.fp(), src2.fp());
+ xorps(dst.fp(), kScratchDoubleReg);
}
}
-void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psignb(dst.fp(), kScratchDoubleReg);
+ } else {
+ Pxor(dst.fp(), dst.fp());
+ Psubb(dst.fp(), src.fp());
+ }
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddpd, &Assembler::addpd>(
- this, dst, lhs, rhs);
+ static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp_simd =
+ GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
+ // Mask off the unwanted bits before word-shifting.
+ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ movq(kScratchRegister, rhs.gp());
+ andq(kScratchRegister, Immediate(7));
+ addq(kScratchRegister, Immediate(8));
+ Movq(tmp_simd.fp(), kScratchRegister);
+ Psrlw(kScratchDoubleReg, tmp_simd.fp());
+ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ pand(dst.fp(), kScratchDoubleReg);
+ }
+ subq(kScratchRegister, Immediate(8));
+ Movq(tmp_simd.fp(), kScratchRegister);
+ Psllw(dst.fp(), tmp_simd.fp());
}
-void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ byte shift = static_cast<byte>(rhs & 0x7);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsllw(dst.fp(), lhs.fp(), shift);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ psllw(dst.fp(), shift);
+ }
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ movl(kScratchRegister, Immediate(mask));
+ Movd(kScratchDoubleReg, kScratchRegister);
+ Pshufd(kScratchDoubleReg, kScratchDoubleReg, uint8_t{0});
+ Pand(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vsubpd, &Assembler::subpd>(this, dst, lhs,
- rhs);
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulpd, &Assembler::mulpd>(
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubb, &Assembler::psubb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Movss(dst.fp(), src.fp());
- }
- Shufps(dst.fp(), src.fp(), static_cast<byte>(0));
+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
+ &Assembler::psubusb>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister tmp =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ //Ā I16x8Ā viewĀ ofĀ I8x16
+ //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
+ //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
+ //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
+ //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
+ vpsrlw(tmp.fp(), lhs.fp(), 8);
+ vpsrlw(kScratchDoubleReg, rhs.fp(), 8);
+ //Ā tĀ =Ā I16x8Mul(t0,Ā t1)
+ //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
+ vpmullw(tmp.fp(), tmp.fp(), kScratchDoubleReg);
+ //Ā sĀ =Ā leftĀ *Ā 256
+ vpsllw(kScratchDoubleReg, lhs.fp(), 8);
+ //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
+ //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
+ vpmullw(dst.fp(), kScratchDoubleReg, rhs.fp());
+ //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
+ //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
+ vpsrlw(dst.fp(), dst.fp(), 8);
+ //Ā tĀ =Ā I16x8Shl(t,Ā 8)
+ //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
+ vpsllw(tmp.fp(), tmp.fp(), 8);
+ //Ā dstĀ =Ā I16x8Or(dst,Ā t)
+ //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
+ vpor(dst.fp(), dst.fp(), tmp.fp());
} else {
if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
+ //Ā I16x8Ā viewĀ ofĀ I8x16
+ //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
+ //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
+ //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
+ //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
+ movaps(tmp.fp(), dst.fp());
+ movaps(kScratchDoubleReg, rhs.fp());
+ psrlw(tmp.fp(), 8);
+ psrlw(kScratchDoubleReg, 8);
+ //Ā dstĀ =Ā leftĀ *Ā 256
+ psllw(dst.fp(), 8);
+ //Ā tĀ =Ā I16x8Mul(t,Ā s)
+ //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
+ pmullw(tmp.fp(), kScratchDoubleReg);
+ //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
+ //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
+ pmullw(dst.fp(), rhs.fp());
+ //Ā tĀ =Ā I16x8Shl(t,Ā 8)
+ //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
+ psllw(tmp.fp(), 8);
+ //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
+ //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
+ psrlw(dst.fp(), 8);
+ //Ā dstĀ =Ā I16x8Or(dst,Ā t)
+ //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
+ por(dst.fp(), tmp.fp());
}
}
-void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vinsertps(dst.fp(), src1.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsb, &Assembler::pminsb>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminub, &Assembler::pminub>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxsb, &Assembler::pmaxsb>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxub, &Assembler::pmaxub>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psignw(dst.fp(), kScratchDoubleReg);
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- insertps(dst.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ Pxor(dst.fp(), dst.fp());
+ Psubw(dst.fp(), src.fp());
}
}
-void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddps, &Assembler::addps>(
+ liftoff::EmitSimdShiftOp<&Assembler::vpsllw, &Assembler::psllw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsllw, &Assembler::psllw, 4>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vsubps, &Assembler::subps>(this, dst, lhs,
- rhs);
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulps, &Assembler::mulps>(
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubw, &Assembler::psubw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movq(dst.fp(), src.gp());
- Movddup(dst.fp(), dst.fp());
+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrq(dst.gp(), lhs.fp(), static_cast<int8_t>(imm_lane_idx));
+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
+ &Assembler::psubusw>(this, dst, lhs,
+ rhs);
}
-void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmullw, &Assembler::pmullw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsw, &Assembler::pminsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminuw, &Assembler::pminuw>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxsw, &Assembler::pmaxsw>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxuw, &Assembler::pmaxuw>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psignd(dst.fp(), kScratchDoubleReg);
+ } else {
+ Pxor(dst.fp(), dst.fp());
+ Psubd(dst.fp(), src.fp());
+ }
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpslld, &Assembler::pslld, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpslld, &Assembler::pslld, 5>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubd, &Assembler::psubd>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmulld, &Assembler::pmulld>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminsd, &Assembler::pminsd>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpminud, &Assembler::pminud>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxsd, &Assembler::pmaxsd>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaxud, &Assembler::pmaxud>(
+ this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
+ Pxor(reg, reg);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrq(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ vpsubq(dst.fp(), reg, src.fp());
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- pinsrq(dst.fp(), src2.gp(), imm_lane_idx);
+ psubq(reg, src.fp());
+ if (dst.fp() != reg) movapd(dst.fp(), reg);
}
}
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsllq, &Assembler::psllq, 6>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsllq, &Assembler::psllq, 6>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
@@ -2056,8 +2678,8 @@ void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubq, &Assembler::psubq>(this, dst, lhs,
- rhs);
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubq, &Assembler::psubq>(
+ this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2087,119 +2709,401 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Paddq(dst.fp(), tmp2.fp());
}
-void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psrld(kScratchDoubleReg, static_cast<byte>(1));
+ Andps(dst.fp(), kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Psrld(dst.fp(), static_cast<byte>(1));
+ Andps(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrd(dst.gp(), lhs.fp(), imm_lane_idx);
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Pslld(kScratchDoubleReg, static_cast<byte>(31));
+ Xorps(dst.fp(), kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Pslld(dst.fp(), static_cast<byte>(31));
+ Xorps(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sqrtps(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddps, &Assembler::addps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vsubps, &Assembler::subps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulps, &Assembler::mulps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vdivps, &Assembler::divps>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // The minps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minps in both orders, merge the results, and adjust.
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrd(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ vminps(kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vminps(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movaps(kScratchDoubleReg, src);
+ minps(kScratchDoubleReg, dst.fp());
+ minps(dst.fp(), src);
} else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- pinsrd(dst.fp(), src2.gp(), imm_lane_idx);
+ movaps(kScratchDoubleReg, lhs.fp());
+ minps(kScratchDoubleReg, rhs.fp());
+ movaps(dst.fp(), rhs.fp());
+ minps(dst.fp(), lhs.fp());
}
+ // propagate -0's and NaNs, which may be non-canonical.
+ Orps(kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by quieting and clearing the payload.
+ Cmpps(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Orps(kScratchDoubleReg, dst.fp());
+ Psrld(dst.fp(), byte{10});
+ Andnps(dst.fp(), kScratchDoubleReg);
}
-void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
- this, dst, lhs, rhs);
+ // The maxps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxps in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vmaxps(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movaps(kScratchDoubleReg, src);
+ maxps(kScratchDoubleReg, dst.fp());
+ maxps(dst.fp(), src);
+ } else {
+ movaps(kScratchDoubleReg, lhs.fp());
+ maxps(kScratchDoubleReg, rhs.fp());
+ movaps(dst.fp(), rhs.fp());
+ maxps(dst.fp(), lhs.fp());
+ }
+ // Find discrepancies.
+ Xorps(dst.fp(), kScratchDoubleReg);
+ // Propagate NaNs, which may be non-canonical.
+ Orps(kScratchDoubleReg, dst.fp());
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ Subps(kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ Cmpps(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Psrld(dst.fp(), byte{10});
+ Andnps(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psrlq(kScratchDoubleReg, static_cast<byte>(1));
+ Andpd(dst.fp(), kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Psrlq(dst.fp(), static_cast<byte>(1));
+ Andpd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubd, &Assembler::psubd>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ if (dst.fp() == src.fp()) {
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psllq(kScratchDoubleReg, static_cast<byte>(63));
+ Xorpd(dst.fp(), kScratchDoubleReg);
+ } else {
+ Pcmpeqd(dst.fp(), dst.fp());
+ Psllq(dst.fp(), static_cast<byte>(63));
+ Xorpd(dst.fp(), src.fp());
+ }
}
-void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sqrtpd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmulld, &Assembler::pmulld>(
- this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vaddpd, &Assembler::addpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vsubpd, &Assembler::subpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vmulpd, &Assembler::mulpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- uint8_t imm_lane_idx) {
- Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
- movsxwl(dst.gp(), dst.gp());
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vdivpd, &Assembler::divpd>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
- LiftoffRegister src1,
- LiftoffRegister src2,
- uint8_t imm_lane_idx) {
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the results, and adjust.
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpinsrw(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ vminpd(kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vminpd(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movapd(kScratchDoubleReg, src);
+ minpd(kScratchDoubleReg, dst.fp());
+ minpd(dst.fp(), src);
} else {
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- pinsrw(dst.fp(), src2.gp(), imm_lane_idx);
+ movapd(kScratchDoubleReg, lhs.fp());
+ minpd(kScratchDoubleReg, rhs.fp());
+ movapd(dst.fp(), rhs.fp());
+ minpd(dst.fp(), lhs.fp());
}
+ // propagate -0's and NaNs, which may be non-canonical.
+ Orpd(kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by quieting and clearing the payload.
+ Cmppd(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Orpd(kScratchDoubleReg, dst.fp());
+ Psrlq(dst.fp(), 13);
+ Andnpd(dst.fp(), kScratchDoubleReg);
}
-void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
- this, dst, lhs, rhs);
+ // The maxpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxpd in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxpd(kScratchDoubleReg, lhs.fp(), rhs.fp());
+ vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
+ } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
+ XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
+ movapd(kScratchDoubleReg, src);
+ maxpd(kScratchDoubleReg, dst.fp());
+ maxpd(dst.fp(), src);
+ } else {
+ movapd(kScratchDoubleReg, lhs.fp());
+ maxpd(kScratchDoubleReg, rhs.fp());
+ movapd(dst.fp(), rhs.fp());
+ maxpd(dst.fp(), lhs.fp());
+ }
+ // Find discrepancies.
+ Xorpd(dst.fp(), kScratchDoubleReg);
+ // Propagate NaNs, which may be non-canonical.
+ Orpd(kScratchDoubleReg, dst.fp());
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ Subpd(kScratchDoubleReg, dst.fp());
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ Cmppd(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Psrlq(dst.fp(), 13);
+ Andnpd(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpacksswb,
+ &Assembler::packsswb>(this, dst, lhs,
+ rhs);
}
-void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubw, &Assembler::psubw>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpackuswb,
+ &Assembler::packuswb>(this, dst, lhs,
+ rhs);
}
-void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmullw, &Assembler::pmullw>(
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpackssdw,
+ &Assembler::packssdw>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpackusdw,
+ &Assembler::packusdw>(this, dst, lhs,
+ rhs, SSE4_1);
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxbw(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovsxbw(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxbw(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovzxbw(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxwd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovsxwd(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxwd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
+ Pmovzxwd(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vandnps, &Assembler::andnps>(
+ this, dst, rhs, lhs);
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpavgb, &Assembler::pavgb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
- LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpavgw, &Assembler::pavgw>(
+ this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pabsb(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pabsw(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pabsd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsxbl(dst.gp(), dst.gp());
}
-void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
Pextrb(dst.gp(), lhs.fp(), imm_lane_idx);
- movsxbl(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+ movsxwl(dst.gp(), dst.gp());
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrw(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrd(dst.gp(), lhs.fp(), imm_lane_idx);
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrq(dst.gp(), lhs.fp(), static_cast<int8_t>(imm_lane_idx));
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
+ if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ Pextrq(kScratchRegister, lhs.fp(), static_cast<int8_t>(imm_lane_idx));
+ Movq(dst.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -2216,77 +3120,80 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
- this, dst, lhs, rhs);
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrw(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrw(dst.fp(), src2.gp(), imm_lane_idx);
+ }
}
-void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- liftoff::EmitSimdSub<&Assembler::vpsubb, &Assembler::psubb>(this, dst, lhs,
- rhs);
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpinsrd(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrd(dst.fp(), src2.gp(), imm_lane_idx);
+ }
}
-void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
- LiftoffRegister tmp =
- GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- //Ā I16x8Ā viewĀ ofĀ I8x16
- //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
- //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
- //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
- //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
- vpsrlw(tmp.fp(), lhs.fp(), 8);
- vpsrlw(kScratchDoubleReg, rhs.fp(), 8);
- //Ā tĀ =Ā I16x8Mul(t0,Ā t1)
- //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
- vpmullw(tmp.fp(), tmp.fp(), kScratchDoubleReg);
- //Ā sĀ =Ā leftĀ *Ā 256
- vpsllw(kScratchDoubleReg, lhs.fp(), 8);
- //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
- //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
- vpmullw(dst.fp(), kScratchDoubleReg, rhs.fp());
- //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
- //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
- vpsrlw(dst.fp(), dst.fp(), 8);
- //Ā tĀ =Ā I16x8Shl(t,Ā 8)
- //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
- vpsllw(tmp.fp(), tmp.fp(), 8);
- //Ā dstĀ =Ā I16x8Or(dst,Ā t)
- //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
- vpor(dst.fp(), dst.fp(), tmp.fp());
+ vpinsrq(dst.fp(), src1.fp(), src2.gp(), imm_lane_idx);
} else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- //Ā I16x8Ā viewĀ ofĀ I8x16
- //Ā leftĀ =Ā AAaaĀ AAaaĀ ...Ā AAaaĀ AAaa
- //Ā right=Ā BBbbĀ BBbbĀ ...Ā BBbbĀ BBbb
- //Ā tĀ =Ā 00AAĀ 00AAĀ ...Ā 00AAĀ 00AA
- //Ā sĀ =Ā 00BBĀ 00BBĀ ...Ā 00BBĀ 00BB
- movaps(tmp.fp(), dst.fp());
- movaps(kScratchDoubleReg, rhs.fp());
- psrlw(tmp.fp(), 8);
- psrlw(kScratchDoubleReg, 8);
- //Ā dstĀ =Ā leftĀ *Ā 256
- psllw(dst.fp(), 8);
- //Ā tĀ =Ā I16x8Mul(t,Ā s)
- //Ā Ā Ā Ā =>Ā __PPĀ __PPĀ ...Ā Ā __PPĀ Ā __PP
- pmullw(tmp.fp(), kScratchDoubleReg);
- //Ā dstĀ =Ā I16x8Mul(leftĀ *Ā 256,Ā right)
- //Ā Ā Ā Ā =>Ā pp__Ā pp__Ā ...Ā Ā pp__Ā Ā pp__
- pmullw(dst.fp(), rhs.fp());
- //Ā tĀ =Ā I16x8Shl(t,Ā 8)
- //Ā Ā Ā Ā =>Ā PP00Ā PP00Ā ...Ā Ā PP00Ā Ā PP00
- psllw(tmp.fp(), 8);
- //Ā dstĀ =Ā I16x8Shr(dst,Ā 8)
- //Ā Ā Ā Ā =>Ā 00ppĀ 00ppĀ ...Ā Ā 00ppĀ Ā 00pp
- psrlw(dst.fp(), 8);
- //Ā dstĀ =Ā I16x8Or(dst,Ā t)
- //Ā Ā Ā Ā =>Ā PPppĀ PPppĀ ...Ā Ā PPppĀ Ā PPpp
- por(dst.fp(), tmp.fp());
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ pinsrq(dst.fp(), src2.gp(), imm_lane_idx);
+ }
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vinsertps(dst.fp(), src1.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ insertps(dst.fp(), src2.fp(), (imm_lane_idx << 4) & 0x30);
+ }
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ if (imm_lane_idx == 0) {
+ vpblendw(dst.fp(), src1.fp(), src2.fp(), 0b00001111);
+ } else {
+ vmovlhps(dst.fp(), src1.fp(), src2.fp());
+ }
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
+ if (imm_lane_idx == 0) {
+ pblendw(dst.fp(), src2.fp(), 0b00001111);
+ } else {
+ movlhps(dst.fp(), src2.fp());
+ }
}
}
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index d098a5f57f..cd5d04bd2d 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -265,8 +265,8 @@ auto Engine::make(own<Config>&& config) -> own<Engine> {
StoreImpl::~StoreImpl() {
#ifdef DEBUG
reinterpret_cast<i::Isolate*>(isolate_)->heap()->PreciseCollectAllGarbage(
- i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
- v8::kGCCallbackFlagForced);
+ i::Heap::kForcedGC, i::GarbageCollectionReason::kTesting,
+ v8::kNoGCCallbackFlags);
#endif
context()->Exit();
isolate_->Dispose();
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index be60dfd519..1b2710666e 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -60,15 +60,11 @@ struct CompilationEnv {
const LowerSimd lower_simd;
- // Whether the debugger is active.
- const bool debug;
-
constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler,
RuntimeExceptionSupport runtime_exception_support,
const WasmFeatures& enabled_features,
- LowerSimd lower_simd = kNoLowerSimd,
- bool debug = false)
+ LowerSimd lower_simd = kNoLowerSimd)
: module(module),
use_trap_handler(use_trap_handler),
runtime_exception_support(runtime_exception_support),
@@ -79,8 +75,7 @@ struct CompilationEnv {
: max_initial_mem_pages()) *
uint64_t{kWasmPageSize}),
enabled_features(enabled_features),
- lower_simd(lower_simd),
- debug(debug) {}
+ lower_simd(lower_simd) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -129,12 +124,12 @@ class CompilationState {
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
+ CompilationState() = delete;
+
private:
// NativeModule is allowed to call the static {New} method.
friend class NativeModule;
- CompilationState() = delete;
-
// The CompilationState keeps a {std::weak_ptr} back to the {NativeModule}
// such that it can keep it alive (by regaining a {std::shared_ptr}) in
// certain scopes.
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 3c0c0493b0..695960086e 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -15,6 +15,7 @@
#include "src/flags/flags.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
+#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone-containers.h"
@@ -126,6 +127,40 @@ class Decoder {
name);
}
+ // Reads a prefixed-opcode, possibly with variable-length index.
+ // The length param is set to the number of bytes this index is encoded with.
+ // For most cases (non variable-length), it will be 1.
+ template <ValidateFlag validate>
+ WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length = nullptr,
+ const char* name = "prefixed opcode") {
+ uint32_t unused_length;
+ if (length == nullptr) {
+ length = &unused_length;
+ }
+ DCHECK(WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(*pc)));
+ uint32_t index;
+ if (*pc == WasmOpcode::kSimdPrefix) {
+ // SIMD opcodes can be multiple bytes (when LEB128 encoded).
+ index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
+ // Only support SIMD opcodes that go up to 0xFF (when decoded). Anything
+ // bigger will need 1 more byte, and the '<< 8' below will be wrong.
+ if (validate && V8_UNLIKELY(index > 0xff)) {
+ errorf(pc, "Invalid SIMD opcode %d", index);
+ }
+ } else {
+ if (!validate || validate_size(pc, 2, "expected 2 bytes")) {
+ DCHECK(validate_size(pc, 2, "expected 2 bytes"));
+ index = *(pc + 1);
+ *length = 1;
+ } else {
+ // If kValidate and size validation fails.
+ index = 0;
+ *length = 0;
+ }
+ }
+ return static_cast<WasmOpcode>((*pc) << 8 | index);
+ }
+
// Reads a 8-bit unsigned integer (byte) and advances {pc_}.
uint8_t consume_u8(const char* name = "uint8_t") {
return consume_little_endian<uint8_t>(name);
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 9752d4ef0c..48b804a3a9 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -41,22 +41,18 @@ struct WasmException;
return true; \
}())
-#define RET_ON_PROTOTYPE_OPCODE(feat) \
+#define CHECK_PROTOTYPE_OPCODE_GEN(feat, opt_break) \
DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
if (!this->enabled_.has_##feat()) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \
+ opt_break \
} else { \
this->detected_->Add(kFeature_##feat); \
}
-#define CHECK_PROTOTYPE_OPCODE(feat) \
- DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
- if (!this->enabled_.has_##feat()) { \
- this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \
- break; \
- } else { \
- this->detected_->Add(kFeature_##feat); \
- }
+#define CHECK_PROTOTYPE_OPCODE(feat) CHECK_PROTOTYPE_OPCODE_GEN(feat, break;)
+
+#define RET_ON_PROTOTYPE_OPCODE(feat) CHECK_PROTOTYPE_OPCODE_GEN(feat, )
#define OPCODE_ERROR(opcode, message) \
(this->errorf(this->pc_, "%s: %s", WasmOpcodes::OpcodeName(opcode), \
@@ -207,54 +203,142 @@ struct GlobalIndexImmediate {
}
};
-namespace function_body_decoder {
-// Decode a byte representing a local type. Return {false} if the encoded
-// byte was invalid or the start of a type index.
-inline bool decode_local_type(uint8_t val, ValueType* result) {
- switch (static_cast<ValueTypeCode>(val)) {
- case kLocalVoid:
- *result = kWasmStmt;
- return true;
+namespace value_type_reader {
+
+// Read a value type starting at address 'pc' in 'decoder'.
+// No bytes are consumed. The result is written into the 'result' parameter.
+// Returns the amount of bytes read, or 0 if decoding failed.
+// Registers an error if the type opcode is invalid iff validate is set.
+template <Decoder::ValidateFlag validate>
+uint32_t read_value_type(Decoder* decoder, const byte* pc, ValueType* result,
+ const WasmFeatures& enabled) {
+ byte val = decoder->read_u8<validate>(pc, "value type opcode");
+ if (decoder->failed()) return 0;
+
+ ValueTypeCode code = static_cast<ValueTypeCode>(val);
+ switch (code) {
case kLocalI32:
*result = kWasmI32;
- return true;
+ return 1;
case kLocalI64:
*result = kWasmI64;
- return true;
+ return 1;
case kLocalF32:
*result = kWasmF32;
- return true;
+ return 1;
case kLocalF64:
*result = kWasmF64;
- return true;
- case kLocalS128:
- *result = kWasmS128;
- return true;
- case kLocalFuncRef:
- *result = kWasmFuncRef;
- return true;
+ return 1;
case kLocalAnyRef:
- *result = kWasmAnyRef;
- return true;
+ if (enabled.has_anyref()) {
+ *result = kWasmAnyRef;
+ return 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'anyref', enable with "
+ "--experimental-wasm-anyref");
+ return 0;
+ case kLocalFuncRef:
+ if (enabled.has_anyref()) {
+ *result = kWasmFuncRef;
+ return 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'funcref', enable with "
+ "--experimental-wasm-anyref");
+ return 0;
case kLocalNullRef:
- *result = kWasmNullRef;
- return true;
+ if (enabled.has_anyref()) {
+ *result = kWasmNullRef;
+ return 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'nullref', enable with "
+ "--experimental-wasm-anyref");
+ return 0;
case kLocalExnRef:
- *result = kWasmExnRef;
- return true;
+ if (enabled.has_eh()) {
+ *result = kWasmExnRef;
+ return 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'exception ref', enable with "
+ "--experimental-wasm-eh");
+ return 0;
+ case kLocalRef:
+ if (enabled.has_gc()) {
+ uint32_t length;
+ uint32_t type_index =
+ decoder->read_u32v<validate>(pc + 1, &length, "type index");
+ *result = ValueType(ValueType::kRef, type_index);
+ return length + 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'ref', enable with "
+ "--experimental-wasm-gc");
+ return 0;
+ case kLocalOptRef:
+ if (enabled.has_gc()) {
+ uint32_t length;
+ uint32_t type_index =
+ decoder->read_u32v<validate>(pc + 1, &length, "type index");
+ *result = ValueType(ValueType::kOptRef, type_index);
+ return length + 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'optref', enable with "
+ "--experimental-wasm-gc");
+ return 0;
+ case kLocalEqRef:
+ if (enabled.has_gc()) {
+ *result = kWasmEqRef;
+ return 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'eqref', enable with "
+ "--experimental-wasm-simd");
+ return 0;
+ case kLocalI31Ref:
+ if (enabled.has_gc()) {
+ // TODO(7748): Implement
+ decoder->error(pc, "'i31ref' is unimplemented");
+ }
+ decoder->error(pc,
+ "invalid value type 'i31ref', enable with "
+ "--experimental-wasm-simd");
+ return 0;
+ case kLocalRttRef:
+ if (enabled.has_gc()) {
+ // TODO(7748): Implement
+ decoder->error(pc, "'rttref' is unimplemented");
+ }
+ decoder->error(pc,
+ "invalid value type 'rttref', enable with "
+ "--experimental-wasm-simd");
+ return 0;
+ case kLocalS128:
+ if (enabled.has_simd()) {
+ *result = kWasmS128;
+ return 1;
+ }
+ decoder->error(pc,
+ "invalid value type 'Simd128', enable with "
+ "--experimental-wasm-simd");
+ return 0;
default:
*result = kWasmBottom;
- return false;
+ return 0;
}
}
-} // namespace function_body_decoder
+} // namespace value_type_reader
template <Decoder::ValidateFlag validate>
struct SelectTypeImmediate {
uint32_t length;
ValueType type;
- inline SelectTypeImmediate(Decoder* decoder, const byte* pc) {
+ inline SelectTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc) {
uint8_t num_types =
decoder->read_u32v<validate>(pc + 1, &length, "number of select types");
if (!VALIDATE(num_types == 1)) {
@@ -262,12 +346,11 @@ struct SelectTypeImmediate {
pc + 1, "Invalid number of types. Select accepts exactly one type");
return;
}
- uint8_t val = decoder->read_u8<validate>(pc + length + 1, "select type");
- length++;
- if (!function_body_decoder::decode_local_type(val, &type) ||
- type == kWasmStmt) {
+ uint32_t type_length = value_type_reader::read_value_type<validate>(
+ decoder, pc + length + 1, &type, enabled);
+ length += type_length;
+ if (type_length == 0) {
decoder->error(pc + 1, "invalid select type");
- return;
}
}
};
@@ -281,22 +364,30 @@ struct BlockTypeImmediate {
inline BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
const byte* pc) {
- uint8_t val = decoder->read_u8<validate>(pc + 1, "block type");
- if (!function_body_decoder::decode_local_type(val, &type)) {
- // Handle multi-value blocks.
- if (!VALIDATE(enabled.has_mv())) {
- decoder->error(pc + 1, "invalid block type");
- return;
- }
- if (!VALIDATE(decoder->ok())) return;
- int32_t index =
- decoder->read_i32v<validate>(pc + 1, &length, "block arity");
- if (!VALIDATE(length > 0 && index >= 0)) {
- decoder->error(pc + 1, "invalid block type index");
- return;
- }
- sig_index = static_cast<uint32_t>(index);
+ if (decoder->read_u8<validate>(pc + 1, "block type") == kLocalVoid) {
+ // 1st case: void block. Struct fields stay at default values.
+ return;
+ }
+ length = value_type_reader::read_value_type<validate>(decoder, pc + 1,
+ &type, enabled);
+ if (length > 0) {
+ // 2nd case: block with val type immediate.
+ return;
+ }
+ // It has to be the 3rd case: multi-value block,
+ // which is represented by a type index.
+ if (!VALIDATE(enabled.has_mv())) {
+ decoder->error(pc + 1, "invalid block type");
+ return;
}
+ if (!VALIDATE(decoder->ok())) return;
+ int32_t index =
+ decoder->read_i32v<validate>(pc + 1, &length, "block type index");
+ if (!VALIDATE(length > 0 && index >= 0)) {
+ decoder->error(pc + 1, "invalid block type index");
+ return;
+ }
+ sig_index = static_cast<uint32_t>(index);
}
uint32_t in_arity() const {
@@ -374,6 +465,39 @@ struct TableIndexImmediate {
};
template <Decoder::ValidateFlag validate>
+struct StructIndexImmediate {
+ uint32_t index = 0;
+ uint32_t length = 0;
+ const StructType* struct_type = nullptr;
+ inline StructIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u32v<validate>(pc, &length, "struct index");
+ }
+};
+
+template <Decoder::ValidateFlag validate>
+struct FieldIndexImmediate {
+ StructIndexImmediate<validate> struct_index;
+ uint32_t index = 0;
+ uint32_t length = 0;
+ inline FieldIndexImmediate(Decoder* decoder, const byte* pc)
+ : struct_index(decoder, pc) {
+ index = decoder->read_u32v<validate>(pc + struct_index.length, &length,
+ "field index");
+ length += struct_index.length;
+ }
+};
+
+template <Decoder::ValidateFlag validate>
+struct ArrayIndexImmediate {
+ uint32_t index = 0;
+ uint32_t length = 0;
+ const ArrayType* array_type = nullptr;
+ inline ArrayIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u32v<validate>(pc, &length, "array index");
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
uint32_t table_index;
uint32_t sig_index;
@@ -485,8 +609,12 @@ struct SimdLaneImmediate {
uint8_t lane;
uint32_t length = 1;
- inline SimdLaneImmediate(Decoder* decoder, const byte* pc) {
- lane = decoder->read_u8<validate>(pc + 2, "lane");
+ inline SimdLaneImmediate(Decoder* decoder, const byte* pc,
+ uint32_t opcode_length) {
+ // Callers should pass in pc unchanged from where the decoding happens. 1 is
+ // added to account for the SIMD prefix byte, and opcode_length is the
+ // number of bytes the LEB encoding of the SIMD opcode takes.
+ lane = decoder->read_u8<validate>(pc + 1 + opcode_length, "lane");
}
};
@@ -495,9 +623,14 @@ template <Decoder::ValidateFlag validate>
struct Simd8x16ShuffleImmediate {
uint8_t shuffle[kSimd128Size] = {0};
- inline Simd8x16ShuffleImmediate(Decoder* decoder, const byte* pc) {
+ inline Simd8x16ShuffleImmediate(Decoder* decoder, const byte* pc,
+ uint32_t opcode_length) {
+ // Callers should pass in pc unchanged from where the decoding happens. 1 is
+ // added to account for the SIMD prefix byte, and opcode_length is the
+ // number of bytes the LEB encoding of the SIMD opcode takes.
for (uint32_t i = 0; i < kSimd128Size; ++i) {
- shuffle[i] = decoder->read_u8<validate>(pc + 2 + i, "shuffle");
+ shuffle[i] =
+ decoder->read_u8<validate>(pc + 1 + opcode_length + i, "shuffle");
}
}
};
@@ -710,6 +843,7 @@ enum class LoadTransformationKind : uint8_t {
F(F64Const, Value* result, double value) \
F(RefNull, Value* result) \
F(RefFunc, uint32_t function_index, Value* result) \
+ F(RefAsNonNull, const Value& arg, Value* result) \
F(Drop, const Value& value) \
F(DoReturn, Vector<Value> values) \
F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
@@ -746,6 +880,7 @@ enum class LoadTransformationKind : uint8_t {
const Value args[]) \
F(ReturnCallIndirect, const Value& index, \
const CallIndirectImmediate<validate>& imm, const Value args[]) \
+ F(BrOnNull, const Value& ref_object, uint32_t depth) \
F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
const Vector<Value> inputs, Value* result) \
@@ -774,7 +909,23 @@ enum class LoadTransformationKind : uint8_t {
const Value& delta, Value* result) \
F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
- const Value& value, const Value& count)
+ const Value& value, const Value& count) \
+ F(StructNew, const StructIndexImmediate<validate>& imm, const Value args[], \
+ Value* result) \
+ F(StructGet, const Value& struct_object, \
+ const FieldIndexImmediate<validate>& field, Value* result) \
+ F(StructSet, const Value& struct_object, \
+ const FieldIndexImmediate<validate>& field, const Value& field_value) \
+ F(ArrayNew, const ArrayIndexImmediate<validate>& imm, const Value& length, \
+ const Value& initial_value, Value* result) \
+ F(ArrayGet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ Value* result) \
+ F(ArraySet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ const Value& value) \
+ F(ArrayLen, const Value& array_obj, Value* result) \
+ F(PassThrough, const Value& from, Value* to)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
// lengths, etc.
@@ -826,73 +977,15 @@ class WasmDecoder : public Decoder {
decoder->error(decoder->pc() - 1, "local count too large");
return false;
}
- byte code = decoder->consume_u8("local type");
- if (decoder->failed()) return false;
-
ValueType type;
- switch (code) {
- case kLocalI32:
- type = kWasmI32;
- break;
- case kLocalI64:
- type = kWasmI64;
- break;
- case kLocalF32:
- type = kWasmF32;
- break;
- case kLocalF64:
- type = kWasmF64;
- break;
- case kLocalAnyRef:
- if (enabled.has_anyref()) {
- type = kWasmAnyRef;
- break;
- }
- decoder->error(decoder->pc() - 1,
- "invalid local type 'anyref', enable with "
- "--experimental-wasm-anyref");
- return false;
- case kLocalFuncRef:
- if (enabled.has_anyref()) {
- type = kWasmFuncRef;
- break;
- }
- decoder->error(decoder->pc() - 1,
- "invalid local type 'funcref', enable with "
- "--experimental-wasm-anyref");
- return false;
- case kLocalNullRef:
- if (enabled.has_anyref()) {
- type = kWasmNullRef;
- break;
- }
- decoder->error(decoder->pc() - 1,
- "invalid local type 'nullref', enable with "
- "--experimental-wasm-anyref");
- return false;
- case kLocalExnRef:
- if (enabled.has_eh()) {
- type = kWasmExnRef;
- break;
- }
- decoder->error(decoder->pc() - 1,
- "invalid local type 'exception ref', enable with "
- "--experimental-wasm-eh");
- return false;
- case kLocalS128:
- if (enabled.has_simd()) {
- type = kWasmS128;
- break;
- }
- decoder->error(decoder->pc() - 1,
- "invalid local type 'Simd128', enable with "
- "--experimental-wasm-simd");
- return false;
- default:
- decoder->error(decoder->pc() - 1, "invalid local type");
- return false;
+ uint32_t type_length = value_type_reader::read_value_type<validate>(
+ decoder, decoder->pc(), &type, enabled);
+ if (type_length == 0) {
+ decoder->error(decoder->pc(), "invalid local type");
+ return false;
}
type_list->insert(type_list->end(), count, type);
+ decoder->consume_bytes(type_length);
}
DCHECK(decoder->ok());
return true;
@@ -989,6 +1082,41 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Complete(const byte* pc, StructIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr && module_->has_struct(imm.index))) {
+ return false;
+ }
+ imm.struct_type = module_->struct_type(imm.index);
+ return true;
+ }
+
+ inline bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
+ if (Complete(pc, imm)) return true;
+ errorf(pc, "invalid struct index: %u", imm.index);
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, FieldIndexImmediate<validate>& imm) {
+ if (!Validate(pc, imm.struct_index)) return false;
+ if (imm.index < imm.struct_index.struct_type->field_count()) return true;
+ errorf(pc + imm.struct_index.length, "invalid field index: %u", imm.index);
+ return false;
+ }
+
+ inline bool Complete(const byte* pc, ArrayIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr && module_->has_array(imm.index))) {
+ return false;
+ }
+ imm.array_type = module_->array_type(imm.index);
+ return true;
+ }
+
+ inline bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
+ if (Complete(pc, imm)) return true;
+ errorf(pc, "invalid array index: %u", imm.index);
+ return false;
+ }
+
inline bool CanReturnCall(const FunctionSig* target_sig) {
if (target_sig == nullptr) return false;
size_t num_returns = sig_->return_count();
@@ -1018,10 +1146,10 @@ class WasmDecoder : public Decoder {
inline bool Complete(const byte* pc, CallIndirectImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
- imm.sig_index < module_->signatures.size())) {
+ module_->has_signature(imm.sig_index))) {
return false;
}
- imm.sig = module_->signatures[imm.sig_index];
+ imm.sig = module_->signature(imm.sig_index);
return true;
}
@@ -1116,17 +1244,17 @@ class WasmDecoder : public Decoder {
inline bool Complete(BlockTypeImmediate<validate>& imm) {
if (imm.type != kWasmBottom) return true;
- if (!VALIDATE(module_ && imm.sig_index < module_->signatures.size())) {
+ if (!VALIDATE(module_ && module_->has_signature(imm.sig_index))) {
return false;
}
- imm.sig = module_->signatures[imm.sig_index];
+ imm.sig = module_->signature(imm.sig_index);
return true;
}
inline bool Validate(BlockTypeImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc_, "block type index %u out of bounds (%zu signatures)",
- imm.sig_index, module_ ? module_->signatures.size() : 0);
+ errorf(pc_, "block type index %u out of bounds (%zu types)",
+ imm.sig_index, module_ ? module_->types.size() : 0);
return false;
}
return true;
@@ -1285,6 +1413,11 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
+ case kExprBrOnNull: {
+ BranchDepthImmediate<validate> imm(decoder, pc);
+ return 1 + imm.length;
+ }
+
case kExprLocalGet:
case kExprLocalSet:
case kExprLocalTee: {
@@ -1292,7 +1425,7 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
case kExprSelectWithType: {
- SelectTypeImmediate<validate> imm(decoder, pc);
+ SelectTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc);
return 1 + imm.length;
}
case kExprBrTable: {
@@ -1379,31 +1512,31 @@ class WasmDecoder : public Decoder {
}
}
case kSimdPrefix: {
- byte simd_index = decoder->read_u8<validate>(pc + 1, "simd_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length);
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 2;
+ return 1 + length;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 3;
+ return 2 + length;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX);
- return 2 + imm.length;
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return 1 + length + imm.length;
}
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS8x16Shuffle:
- return 2 + kSimd128Size;
+ return 1 + length + kSimd128Size;
default:
decoder->error(pc, "invalid SIMD opcode");
- return 2;
+ return 1 + length;
}
}
case kAtomicPrefix: {
@@ -1429,6 +1562,48 @@ class WasmDecoder : public Decoder {
return 2;
}
}
+ case kGCPrefix: {
+ byte gc_index = decoder->read_u8<validate>(pc + 1, "gc_index");
+ WasmOpcode opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ switch (opcode) {
+ case kExprStructNew:
+ case kExprStructNewSub:
+ case kExprStructNewDefault: {
+ StructIndexImmediate<validate> imm(decoder, pc + 2);
+ return 2 + imm.length;
+ }
+ case kExprStructGet:
+ case kExprStructGetS:
+ case kExprStructGetU:
+ case kExprStructSet: {
+ FieldIndexImmediate<validate> imm(decoder, pc + 2);
+ return 2 + imm.length;
+ }
+ case kExprArrayNew:
+ case kExprArrayNewSub:
+ case kExprArrayNewDefault:
+ case kExprArrayGet:
+ case kExprArrayGetS:
+ case kExprArrayGetU:
+ case kExprArraySet:
+ case kExprArrayLen: {
+ ArrayIndexImmediate<validate> imm(decoder, pc + 2);
+ return 2 + imm.length;
+ }
+ case kExprBrOnCast: {
+ BranchDepthImmediate<validate> imm(decoder, pc + 2);
+ return 2 + imm.length;
+ }
+ case kExprRttGet:
+ case kExprRttSub: {
+ // TODO(7748): Impelement.
+ UNIMPLEMENTED();
+ }
+
+ default:
+ return 2;
+ }
+ }
default:
return 1;
}
@@ -1454,6 +1629,8 @@ class WasmDecoder : public Decoder {
case kExprTableGet:
case kExprLocalTee:
case kExprMemoryGrow:
+ case kExprRefAsNonNull:
+ case kExprBrOnNull:
return {1, 1};
case kExprLocalSet:
case kExprGlobalSet:
@@ -1508,7 +1685,7 @@ class WasmDecoder : public Decoder {
case kNumericPrefix:
case kAtomicPrefix:
case kSimdPrefix: {
- opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
+ opcode = this->read_prefixed_opcode<validate>(pc);
switch (opcode) {
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(DECLARE_OPCODE_CASE)
return {1, 1};
@@ -1622,12 +1799,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
- // We need one more byte.
- ++pc;
- if (pc >= this->end_) return "<end>";
- byte sub_opcode = *pc;
- opcode = static_cast<WasmOpcode>(opcode << 8 | sub_opcode);
- return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
+ opcode = this->template read_prefixed_opcode<Decoder::kValidate>(pc);
+ return WasmOpcodes::OpcodeName(opcode);
}
inline Zone* zone() const { return zone_; }
@@ -1858,6 +2031,51 @@ class WasmFullDecoder : public WasmDecoder<validate> {
*pexception = exception;
break;
}
+ case kExprBrOnNull: {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ len = 1 + imm.length;
+ Value ref_object = Pop();
+ if (this->failed()) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ switch (ref_object.type.kind()) {
+ case ValueType::kRef: {
+ auto* result = Push(
+ ValueType(ValueType::kRef, ref_object.type.ref_index()));
+ CALL_INTERFACE(PassThrough, ref_object, result);
+ break;
+ }
+ case ValueType::kOptRef: {
+ // We need to Push the result value after calling BrOnNull on
+ // the interface. Therefore we must sync the ref_object and
+ // result nodes afterwards (in PassThrough).
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ auto* result = Push(
+ ValueType(ValueType::kRef, ref_object.type.ref_index()));
+ CALL_INTERFACE(PassThrough, ref_object, result);
+ c->br_merge()->reached = true;
+ break;
+ }
+ case ValueType::kNullRef:
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
+ EndControl();
+ break;
+ default:
+ this->error(this->pc_,
+ "invalid agrument type to ref.as_non_null");
+ break;
+ }
+ }
+ break;
+ }
case kExprLoop: {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
@@ -1957,7 +2175,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprSelectWithType: {
CHECK_PROTOTYPE_OPCODE(anyref);
- SelectTypeImmediate<validate> imm(this, this->pc_);
+ SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (this->failed()) break;
auto cond = Pop(2, kWasmI32);
auto fval = Pop(1, imm.type);
@@ -2063,7 +2281,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Since we deal with unreachable code, we do not have to keep the
// values.
int num_returns = static_cast<int>(this->sig_->return_count());
- for (int i = 0; i < num_returns; ++i) {
+ for (int i = num_returns - 1; i >= 0; --i) {
Pop(i, this->sig_->GetReturn(i));
}
}
@@ -2120,6 +2338,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + imm.length;
break;
}
+ case kExprRefAsNonNull: {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ auto value = Pop();
+ switch (value.type.kind()) {
+ case ValueType::kRef: {
+ auto* result =
+ Push(ValueType(ValueType::kRef, value.type.ref_index()));
+ CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
+ break;
+ }
+ case ValueType::kOptRef: {
+ auto* result =
+ Push(ValueType(ValueType::kRef, value.type.ref_index()));
+ CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
+ break;
+ }
+ case ValueType::kNullRef:
+ // TODO(7748): Fix this once the standard clears up (see
+ // https://github.com/WebAssembly/function-references/issues/21).
+ CALL_INTERFACE_IF_REACHABLE(Unreachable);
+ EndControl();
+ break;
+ default:
+ this->error(this->pc_ + 1,
+ "invalid agrument type to ref.as_non_null");
+ break;
+ }
+ break;
+ }
case kExprLocalGet: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
@@ -2355,13 +2602,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kSimdPrefix: {
CHECK_PROTOTYPE_OPCODE(simd);
- len++;
- byte simd_index =
- this->template read_u8<validate>(this->pc_ + 1, "simd index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
+ uint32_t length = 0;
+ opcode =
+ this->template read_prefixed_opcode<validate>(this->pc_, &length);
+ if (!VALIDATE(this->ok())) break;
+ len += length;
+
TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
WasmOpcodes::OpcodeName(opcode));
- len += DecodeSimdOpcode(opcode);
+ len += DecodeSimdOpcode(opcode, length);
break;
}
case kAtomicPrefix: {
@@ -2375,6 +2624,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len += DecodeAtomicOpcode(opcode);
break;
}
+ case kGCPrefix: {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ byte gc_index =
+ this->template read_u8<validate>(this->pc_ + 1, "gc index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | gc_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len = DecodeGCOpcode(opcode);
+ break;
+ }
// Note that prototype opcodes are not handled in the fastpath
// above this switch, to avoid checking a feature flag.
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
@@ -2426,7 +2685,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto& val = stack_[i];
WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = static_cast<WasmOpcode>(opcode << 8 | *(val.pc + 1));
+ opcode = this->template read_prefixed_opcode<Decoder::kNoValidate>(
+ val.pc);
}
TRACE_PART(" %c@%d:%s", val.type.short_name(),
static_cast<int>(val.pc - this->start_),
@@ -2507,6 +2767,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return args;
}
+ V8_INLINE ArgVector PopArgs(const StructType* type) {
+ int count = static_cast<int>(type->field_count());
+ ArgVector args(count);
+ for (int i = count - 1; i >= 0; i--) {
+ args[i] = Pop(i, type->field(i));
+ }
+ return args;
+ }
+
ValueType GetReturnType(const FunctionSig* sig) {
DCHECK_GE(1, sig->return_count());
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
@@ -2546,9 +2815,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- int DecodeLoadTransformMem(LoadType type, LoadTransformationKind transform) {
+ int DecodeLoadTransformMem(LoadType type, LoadTransformationKind transform,
+ uint32_t opcode_length) {
if (!CheckHasMemory()) return 0;
- MemoryAccessImmediate<validate> imm(this, this->pc_ + 1, type.size_log_2());
+ // Load extends always load 64-bits.
+ uint32_t max_alignment =
+ transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
+ MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
+ max_alignment);
auto index = Pop(0, kWasmI32);
auto* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
@@ -2649,8 +2923,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return this->ok();
}
- uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type) {
- SimdLaneImmediate<validate> imm(this, this->pc_);
+ uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type,
+ uint32_t opcode_length) {
+ SimdLaneImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, opcode, imm)) {
Value inputs[] = {Pop(0, kWasmS128)};
auto* result = Push(type);
@@ -2660,8 +2935,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- uint32_t SimdReplaceLane(WasmOpcode opcode, ValueType type) {
- SimdLaneImmediate<validate> imm(this, this->pc_);
+ uint32_t SimdReplaceLane(WasmOpcode opcode, ValueType type,
+ uint32_t opcode_length) {
+ SimdLaneImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, opcode, imm)) {
Value inputs[2] = {UnreachableValue(this->pc_),
UnreachableValue(this->pc_)};
@@ -2674,8 +2950,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
- uint32_t Simd8x16ShuffleOp() {
- Simd8x16ShuffleImmediate<validate> imm(this, this->pc_);
+ uint32_t Simd8x16ShuffleOp(uint32_t opcode_length) {
+ Simd8x16ShuffleImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, imm)) {
auto input1 = Pop(1, kWasmS128);
auto input0 = Pop(0, kWasmS128);
@@ -2686,19 +2962,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 16;
}
- uint32_t DecodeSimdOpcode(WasmOpcode opcode) {
+ uint32_t DecodeSimdOpcode(WasmOpcode opcode, uint32_t opcode_length) {
+ // opcode_length is the number of bytes that this SIMD-specific opcode takes
+ // up in the LEB128 encoded form.
uint32_t len = 0;
switch (opcode) {
case kExprF64x2ExtractLane: {
- len = SimdExtractLane(opcode, kWasmF64);
+ len = SimdExtractLane(opcode, kWasmF64, opcode_length);
break;
}
case kExprF32x4ExtractLane: {
- len = SimdExtractLane(opcode, kWasmF32);
+ len = SimdExtractLane(opcode, kWasmF32, opcode_length);
break;
}
case kExprI64x2ExtractLane: {
- len = SimdExtractLane(opcode, kWasmI64);
+ len = SimdExtractLane(opcode, kWasmI64, opcode_length);
break;
}
case kExprI32x4ExtractLane:
@@ -2706,76 +2984,84 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprI16x8ExtractLaneU:
case kExprI8x16ExtractLaneS:
case kExprI8x16ExtractLaneU: {
- len = SimdExtractLane(opcode, kWasmI32);
+ len = SimdExtractLane(opcode, kWasmI32, opcode_length);
break;
}
case kExprF64x2ReplaceLane: {
- len = SimdReplaceLane(opcode, kWasmF64);
+ len = SimdReplaceLane(opcode, kWasmF64, opcode_length);
break;
}
case kExprF32x4ReplaceLane: {
- len = SimdReplaceLane(opcode, kWasmF32);
+ len = SimdReplaceLane(opcode, kWasmF32, opcode_length);
break;
}
case kExprI64x2ReplaceLane: {
- len = SimdReplaceLane(opcode, kWasmI64);
+ len = SimdReplaceLane(opcode, kWasmI64, opcode_length);
break;
}
case kExprI32x4ReplaceLane:
case kExprI16x8ReplaceLane:
case kExprI8x16ReplaceLane: {
- len = SimdReplaceLane(opcode, kWasmI32);
+ len = SimdReplaceLane(opcode, kWasmI32, opcode_length);
break;
}
case kExprS8x16Shuffle: {
- len = Simd8x16ShuffleOp();
+ len = Simd8x16ShuffleOp(opcode_length);
break;
}
case kExprS128LoadMem:
- len = DecodeLoadMem(LoadType::kS128Load, 1);
+ len = DecodeLoadMem(LoadType::kS128Load, opcode_length);
break;
case kExprS128StoreMem:
- len = DecodeStoreMem(StoreType::kS128Store, 1);
+ len = DecodeStoreMem(StoreType::kS128Store, opcode_length);
break;
case kExprS8x16LoadSplat:
len = DecodeLoadTransformMem(LoadType::kI32Load8S,
- LoadTransformationKind::kSplat);
+ LoadTransformationKind::kSplat,
+ opcode_length);
break;
case kExprS16x8LoadSplat:
len = DecodeLoadTransformMem(LoadType::kI32Load16S,
- LoadTransformationKind::kSplat);
+ LoadTransformationKind::kSplat,
+ opcode_length);
break;
case kExprS32x4LoadSplat:
- len = DecodeLoadTransformMem(LoadType::kI32Load,
- LoadTransformationKind::kSplat);
+ len = DecodeLoadTransformMem(
+ LoadType::kI32Load, LoadTransformationKind::kSplat, opcode_length);
break;
case kExprS64x2LoadSplat:
- len = DecodeLoadTransformMem(LoadType::kI64Load,
- LoadTransformationKind::kSplat);
+ len = DecodeLoadTransformMem(
+ LoadType::kI64Load, LoadTransformationKind::kSplat, opcode_length);
break;
case kExprI16x8Load8x8S:
len = DecodeLoadTransformMem(LoadType::kI32Load8S,
- LoadTransformationKind::kExtend);
+ LoadTransformationKind::kExtend,
+ opcode_length);
break;
case kExprI16x8Load8x8U:
len = DecodeLoadTransformMem(LoadType::kI32Load8U,
- LoadTransformationKind::kExtend);
+ LoadTransformationKind::kExtend,
+ opcode_length);
break;
case kExprI32x4Load16x4S:
len = DecodeLoadTransformMem(LoadType::kI32Load16S,
- LoadTransformationKind::kExtend);
+ LoadTransformationKind::kExtend,
+ opcode_length);
break;
case kExprI32x4Load16x4U:
len = DecodeLoadTransformMem(LoadType::kI32Load16U,
- LoadTransformationKind::kExtend);
+ LoadTransformationKind::kExtend,
+ opcode_length);
break;
case kExprI64x2Load32x2S:
len = DecodeLoadTransformMem(LoadType::kI64Load32S,
- LoadTransformationKind::kExtend);
+ LoadTransformationKind::kExtend,
+ opcode_length);
break;
case kExprI64x2Load32x2U:
len = DecodeLoadTransformMem(LoadType::kI64Load32U,
- LoadTransformationKind::kExtend);
+ LoadTransformationKind::kExtend,
+ opcode_length);
break;
default: {
if (!FLAG_wasm_simd_post_mvp &&
@@ -2798,6 +3084,88 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return len;
}
+ uint32_t DecodeGCOpcode(WasmOpcode opcode) {
+ uint32_t len = 2;
+ switch (opcode) {
+ case kExprStructNew: {
+ StructIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto args = PopArgs(imm.struct_type);
+ auto* value = Push(ValueType(ValueType::kRef, imm.index));
+ CALL_INTERFACE_IF_REACHABLE(StructNew, imm, args.begin(), value);
+ break;
+ }
+ case kExprStructGet: {
+ FieldIndexImmediate<validate> field(this, this->pc_ + len);
+ if (!this->Validate(this->pc_ + len, field)) break;
+ len += field.length;
+ auto struct_obj =
+ Pop(0, ValueType(ValueType::kOptRef, field.struct_index.index));
+ auto* value = Push(field.struct_index.struct_type->field(field.index));
+ CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, value);
+ break;
+ }
+ case kExprStructSet: {
+ FieldIndexImmediate<validate> field(this, this->pc_ + len);
+ if (!this->Validate(this->pc_ + len, field)) break;
+ len += field.length;
+ auto field_value = Pop(
+ 0, ValueType(field.struct_index.struct_type->field(field.index)));
+ auto struct_obj =
+ Pop(0, ValueType(ValueType::kOptRef, field.struct_index.index));
+ CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
+ break;
+ }
+ case kExprArrayNew: {
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto length = Pop(0, kWasmI32);
+ auto initial_value = Pop(0, imm.array_type->element_type());
+ auto* value = Push(ValueType(ValueType::kRef, imm.index));
+ CALL_INTERFACE_IF_REACHABLE(ArrayNew, imm, length, initial_value,
+ value);
+ break;
+ }
+ case kExprArrayGet: {
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ auto index = Pop(0, kWasmI32);
+ auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
+ auto* value = Push(imm.array_type->element_type());
+ // TODO(7748): Optimize this when array_obj is non-nullable ref.
+ CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, value);
+ break;
+ }
+ case kExprArraySet: {
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ auto value = Pop(0, imm.array_type->element_type());
+ auto index = Pop(0, kWasmI32);
+ auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
+ // TODO(7748): Optimize this when array_obj is non-nullable ref.
+ CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
+ break;
+ }
+ case kExprArrayLen: {
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
+ auto* value = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
+ break;
+ }
+ default:
+ this->error("invalid gc opcode");
+ return 0;
+ }
+ return len;
+ }
+
uint32_t DecodeAtomicOpcode(WasmOpcode opcode) {
uint32_t len = 0;
ValueType ret_type;
@@ -3205,8 +3573,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
void BuildSimplePrototypeOperator(WasmOpcode opcode) {
- if (WasmOpcodes::IsAnyRefOpcode(opcode)) {
+ if (opcode == kExprRefIsNull) {
RET_ON_PROTOTYPE_OPCODE(anyref);
+ } else if (opcode == kExprRefEq) {
+ RET_ON_PROTOTYPE_OPCODE(gc);
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
BuildSimpleOperator(opcode, sig);
@@ -3278,6 +3648,8 @@ class EmptyInterface {
#undef TRACE_INST_FORMAT
#undef VALIDATE
#undef CHECK_PROTOTYPE_OPCODE
+#undef RET_ON_PROTOTYPE_OPCODE
+#undef CHECK_PROTOTYPE_OPCODE_GEN
#undef OPCODE_ERROR
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 63788bcc8d..8b2b027b13 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -154,7 +154,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
++line_nr;
}
- os << "// body: " << std::endl;
+ os << "// body:" << std::endl;
if (line_numbers) line_numbers->push_back(kNoByteCode);
++line_nr;
unsigned control_depth = 0;
@@ -187,6 +187,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
opcode == kExprTry) {
DCHECK_EQ(2, length);
+ // TODO(7748) Update this for gc and ref types if needed
switch (i.pc()[1]) {
#define CASE_LOCAL_TYPE(local_name, type_name) \
case kLocal##local_name: \
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index f542540233..4fab50817c 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -173,9 +173,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- byte prefix = read_u8<Decoder::kNoValidate>(pc_, "expected prefix");
- byte index = read_u8<Decoder::kNoValidate>(pc_ + 1, "expected index");
- return static_cast<WasmOpcode>(prefix << 8 | index);
+ return read_prefixed_opcode<Decoder::kNoValidate>(pc_);
}
};
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 1b6b83a3b1..6b25520d84 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -115,7 +115,6 @@ ExecutionTier WasmCompilationUnit::GetBaselineExecutionTier(
// Liftoff does not support the special asm.js opcodes, thus always compile
// asm.js modules with TurboFan.
if (is_asmjs_module(module)) return ExecutionTier::kTurbofan;
- if (FLAG_wasm_interpret_all) return ExecutionTier::kInterpreter;
return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
@@ -131,7 +130,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
counters, detected);
}
- if (result.succeeded()) {
+ if (result.succeeded() && counters) {
counters->wasm_generated_code_size()->Increment(
result.code_desc.instr_size);
counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
@@ -164,12 +163,16 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
wasm::FunctionBody func_body{func->sig, func->code.offset(), code.begin(),
code.end()};
- auto size_histogram = SELECT_WASM_COUNTER(counters, env->module->origin, wasm,
- function_size_bytes);
- size_histogram->AddSample(static_cast<int>(func_body.end - func_body.start));
- auto timed_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
- wasm_compile, function_time);
- TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
+ base::Optional<TimedHistogramScope> wasm_compile_function_time_scope;
+ if (counters) {
+ auto size_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
+ wasm, function_size_bytes);
+ size_histogram->AddSample(
+ static_cast<int>(func_body.end - func_body.start));
+ auto timed_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
+ wasm_compile, function_time);
+ wasm_compile_function_time_scope.emplace(timed_histogram);
+ }
if (FLAG_trace_wasm_compiler) {
PrintF("Compiling wasm function %d with %s\n", func_index_,
@@ -188,9 +191,9 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
if (V8_LIKELY(FLAG_wasm_tier_mask_for_testing == 0) ||
func_index_ >= 32 ||
((FLAG_wasm_tier_mask_for_testing & (1 << func_index_)) == 0)) {
- result =
- ExecuteLiftoffCompilation(wasm_engine->allocator(), env, func_body,
- func_index_, counters, detected);
+ result = ExecuteLiftoffCompilation(wasm_engine->allocator(), env,
+ func_body, func_index_,
+ for_debugging_, counters, detected);
if (result.succeeded()) break;
}
@@ -202,12 +205,11 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kTurbofan:
result = compiler::ExecuteTurbofanWasmCompilation(
wasm_engine, env, func_body, func_index_, counters, detected);
+ result.for_debugging = for_debugging_;
break;
case ExecutionTier::kInterpreter:
- result = compiler::ExecuteInterpreterEntryCompilation(
- wasm_engine, env, func_body, func_index_, counters, detected);
- break;
+ UNREACHABLE();
}
return result;
@@ -250,7 +252,7 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
DCHECK_LE(native_module->num_imported_functions(), function->func_index);
DCHECK_LT(function->func_index, native_module->num_functions());
- WasmCompilationUnit unit(function->func_index, tier);
+ WasmCompilationUnit unit(function->func_index, tier, kNoDebugging);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
isolate->wasm_engine(), &env,
@@ -258,7 +260,8 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
isolate->counters(), detected);
if (result.succeeded()) {
WasmCodeRefScope code_ref_scope;
- native_module->AddCompiledCode(std::move(result));
+ native_module->PublishCode(
+ native_module->AddCompiledCode(std::move(result)));
} else {
native_module->compilation_state()->SetError();
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index a511f19b76..c66c748064 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -55,7 +55,6 @@ struct WasmCompilationResult {
enum Kind : int8_t {
kFunction,
kWasmToJsWrapper,
- kInterpreterEntry,
};
bool succeeded() const { return code_desc.buffer != nullptr; }
@@ -72,14 +71,15 @@ struct WasmCompilationResult {
ExecutionTier requested_tier;
ExecutionTier result_tier;
Kind kind = kFunction;
+ ForDebugging for_debugging = kNoDebugging;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
public:
static ExecutionTier GetBaselineExecutionTier(const WasmModule*);
- WasmCompilationUnit(int index, ExecutionTier tier)
- : func_index_(index), tier_(tier) {}
+ WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging)
+ : func_index_(index), tier_(tier), for_debugging_(for_debugging) {}
WasmCompilationResult ExecuteCompilation(
WasmEngine*, CompilationEnv*, const std::shared_ptr<WireBytesStorage>&,
@@ -103,6 +103,7 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
int func_index_;
ExecutionTier tier_;
+ ForDebugging for_debugging_;
};
// {WasmCompilationUnit} should be trivially copyable and small enough so we can
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 5f73f27200..5d23dbf183 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -12,6 +12,7 @@
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
@@ -259,6 +260,10 @@ class WasmGraphBuildingInterface {
result->node = BUILD(RefFunc, function_index);
}
+ void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
+ result->node = BUILD(RefAsNonNull, arg.node, decoder->position());
+ }
+
void Drop(FullDecoder* decoder, const Value& value) {}
void DoReturn(FullDecoder* decoder, Vector<Value> values) {
@@ -307,7 +312,7 @@ class WasmGraphBuildingInterface {
}
void Unreachable(FullDecoder* decoder) {
- BUILD(Unreachable, decoder->position());
+ BUILD(Trap, wasm::TrapReason::kTrapUnreachable, decoder->position());
}
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
@@ -445,6 +450,18 @@ class WasmGraphBuildingInterface {
args);
}
+ void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
+ SsaEnv* non_null_env = ssa_env_;
+ SsaEnv* null_env = Split(decoder, non_null_env);
+ non_null_env->SetNotMerged();
+ BUILD(BrOnNull, ref_object.node, &null_env->control,
+ &non_null_env->control);
+ builder_->SetControl(non_null_env->control);
+ SetEnv(null_env);
+ BrOrRet(decoder, depth);
+ SetEnv(non_null_env);
+ }
+
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
base::SmallVector<TFNode*, 8> inputs(args.size());
@@ -600,6 +617,69 @@ class WasmGraphBuildingInterface {
BUILD(TableFill, imm.index, start.node, value.node, count.node);
}
+ void StructNew(FullDecoder* decoder,
+ const StructIndexImmediate<validate>& imm, const Value args[],
+ Value* result) {
+ uint32_t field_count = imm.struct_type->field_count();
+ base::SmallVector<TFNode*, 16> arg_nodes(field_count);
+ for (uint32_t i = 0; i < field_count; i++) {
+ arg_nodes[i] = args[i].node;
+ }
+ result->node =
+ BUILD(StructNew, imm.index, imm.struct_type, VectorOf(arg_nodes));
+ }
+
+ void StructGet(FullDecoder* decoder, const Value& struct_object,
+ const FieldIndexImmediate<validate>& field, Value* result) {
+ using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
+ CheckForNull null_check = struct_object.type.kind() == ValueType::kRef
+ ? CheckForNull::kWithoutNullCheck
+ : CheckForNull::kWithNullCheck;
+ result->node =
+ BUILD(StructGet, struct_object.node, field.struct_index.struct_type,
+ field.index, null_check, decoder->position());
+ }
+
+ void StructSet(FullDecoder* decoder, const Value& struct_object,
+ const FieldIndexImmediate<validate>& field,
+ const Value& field_value) {
+ using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
+ CheckForNull null_check = struct_object.type.kind() == ValueType::kRef
+ ? CheckForNull::kWithoutNullCheck
+ : CheckForNull::kWithNullCheck;
+ BUILD(StructSet, struct_object.node, field.struct_index.struct_type,
+ field.index, field_value.node, null_check, decoder->position());
+ }
+
+ void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ const Value& length, const Value& initial_value,
+ Value* result) {
+ result->node = BUILD(ArrayNew, imm.index, imm.array_type, length.node,
+ initial_value.node);
+ }
+
+ void ArrayGet(FullDecoder* decoder, const Value& array_obj,
+ const ArrayIndexImmediate<validate>& imm, const Value& index,
+ Value* result) {
+ result->node = BUILD(ArrayGet, array_obj.node, imm.array_type, index.node,
+ decoder->position());
+ }
+
+ void ArraySet(FullDecoder* decoder, const Value& array_obj,
+ const ArrayIndexImmediate<validate>& imm, const Value& index,
+ const Value& value) {
+ BUILD(ArraySet, array_obj.node, imm.array_type, index.node, value.node,
+ decoder->position());
+ }
+
+ void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
+ result->node = BUILD(ArrayLen, array_obj.node, decoder->position());
+ }
+
+ void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
+ to->node = from.node;
+ }
+
private:
SsaEnv* ssa_env_ = nullptr;
compiler::WasmGraphBuilder* builder_;
@@ -711,6 +791,8 @@ class WasmGraphBuildingInterface {
case ValueType::kFuncRef:
case ValueType::kNullRef:
case ValueType::kExnRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef:
return builder_->RefNull();
default:
UNREACHABLE();
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index 37a210086b..257f384bef 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -31,6 +31,9 @@ size_t LocalDeclEncoder::Emit(byte* buffer) const {
LEBHelper::write_u32v(&pos, local_decl.first);
*pos = local_decl.second.value_type_code();
++pos;
+ if (local_decl.second.has_immediate()) {
+ LEBHelper::write_u32v(&pos, local_decl.second.ref_index());
+ }
}
DCHECK_EQ(Size(), pos - buffer);
return static_cast<size_t>(pos - buffer);
@@ -48,9 +51,17 @@ uint32_t LocalDeclEncoder::AddLocals(uint32_t count, ValueType type) {
return result;
}
+// Size = (size of locals count) +
+// (for each local pair <reps, type>, (size of reps) + (size of type))
size_t LocalDeclEncoder::Size() const {
size_t size = LEBHelper::sizeof_u32v(local_decls.size());
- for (auto p : local_decls) size += 1 + LEBHelper::sizeof_u32v(p.first);
+ for (auto p : local_decls) {
+ size +=
+ LEBHelper::sizeof_u32v(p.first) + // number of locals
+ 1 + // Opcode
+ (p.second.has_immediate() ? LEBHelper::sizeof_u32v(p.second.ref_index())
+ : 0); // immediate
+ }
return size;
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 369dcfd9f7..9f6e91c73e 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -90,21 +90,59 @@ class BackgroundCompileToken {
: native_module_(native_module) {}
void Cancel() {
- base::SharedMutexGuard<base::kExclusive> mutex_guard(&mutex_);
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ &compilation_scope_mutex_);
native_module_.reset();
}
private:
friend class BackgroundCompileScope;
- base::SharedMutex mutex_;
- std::weak_ptr<NativeModule> native_module_;
std::shared_ptr<NativeModule> StartScope() {
- mutex_.LockShared();
+ compilation_scope_mutex_.LockShared();
return native_module_.lock();
}
- void ExitScope() { mutex_.UnlockShared(); }
+ // This private method can only be called via {BackgroundCompileScope}.
+ void SchedulePublishCode(NativeModule* native_module,
+ std::vector<std::unique_ptr<WasmCode>> codes) {
+ {
+ base::MutexGuard guard(&publish_mutex_);
+ if (publisher_running_) {
+ // Add new code to the queue and return.
+ publish_queue_.reserve(publish_queue_.size() + codes.size());
+ for (auto& c : codes) publish_queue_.emplace_back(std::move(c));
+ return;
+ }
+ publisher_running_ = true;
+ }
+ while (true) {
+ PublishCode(native_module, VectorOf(codes));
+ codes.clear();
+
+ // Keep publishing new code that came in.
+ base::MutexGuard guard(&publish_mutex_);
+ DCHECK(publisher_running_);
+ if (publish_queue_.empty()) {
+ publisher_running_ = false;
+ return;
+ }
+ codes.swap(publish_queue_);
+ }
+ }
+
+ void PublishCode(NativeModule*, Vector<std::unique_ptr<WasmCode>>);
+
+ void ExitScope() { compilation_scope_mutex_.UnlockShared(); }
+
+ // {compilation_scope_mutex_} protects {native_module_}.
+ base::SharedMutex compilation_scope_mutex_;
+ std::weak_ptr<NativeModule> native_module_;
+
+ // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
+ base::Mutex publish_mutex_;
+ std::vector<std::unique_ptr<WasmCode>> publish_queue_;
+ bool publisher_running_ = false;
};
class CompilationStateImpl;
@@ -129,6 +167,12 @@ class BackgroundCompileScope {
inline CompilationStateImpl* compilation_state();
+ // Call {SchedulePublishCode} via the {BackgroundCompileScope} to guarantee
+ // that the {NativeModule} stays alive.
+ void SchedulePublishCode(std::vector<std::unique_ptr<WasmCode>> codes) {
+ token_->SchedulePublishCode(native_module_.get(), std::move(codes));
+ }
+
private:
BackgroundCompileToken* const token_;
// Keep the native module alive while in this scope.
@@ -386,7 +430,7 @@ class CompilationStateImpl {
// called immediately if no recompilation is needed, or called later
// otherwise.
void InitializeRecompilation(
- ExecutionTier tier,
+ TieringState new_tiering_state,
CompilationState::callback_t recompilation_finished_callback);
// Add the callback function to be called on compilation events. Needs to be
@@ -409,7 +453,7 @@ class CompilationStateImpl {
void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray>* export_wrappers_out);
- void OnFinishedUnits(Vector<WasmCode*>, Vector<WasmCompilationResult>);
+ void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
@@ -556,7 +600,8 @@ class CompilationStateImpl {
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
- ExecutionTier recompilation_tier_;
+ TieringState tiering_state_ = kTieredUp;
+
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -564,7 +609,7 @@ class CompilationStateImpl {
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
- using ReachedRecompilationTierField = base::BitField8<ExecutionTier, 6, 2>;
+ using MissingRecompilationField = base::BitField8<bool, 6, 1>;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -578,10 +623,23 @@ CompilationStateImpl* BackgroundCompileScope::compilation_state() {
return Impl(native_module()->compilation_state());
}
+void BackgroundCompileToken::PublishCode(
+ NativeModule* native_module, Vector<std::unique_ptr<WasmCode>> code) {
+ WasmCodeRefScope code_ref_scope;
+ std::vector<WasmCode*> published_code = native_module->PublishCode(code);
+ native_module->engine()->LogCode(VectorOf(published_code));
+
+ Impl(native_module->compilation_state())
+ ->OnFinishedUnits(VectorOf(published_code));
+}
+
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
if (detected.has_threads()) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
}
+ if (detected.has_simd()) {
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSimdOpcodes);
+ }
}
} // namespace
@@ -641,8 +699,6 @@ ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
switch (hint) {
case WasmCompilationHintTier::kDefault:
return default_tier;
- case WasmCompilationHintTier::kInterpreter:
- return ExecutionTier::kInterpreter;
case WasmCompilationHintTier::kBaseline:
return ExecutionTier::kLiftoff;
case WasmCompilationHintTier::kOptimized:
@@ -739,15 +795,16 @@ class CompilationUnitBuilder {
void AddUnits(uint32_t func_index) {
if (func_index < native_module_->module()->num_imported_functions) {
- baseline_units_.emplace_back(func_index, ExecutionTier::kNone);
+ baseline_units_.emplace_back(func_index, ExecutionTier::kNone,
+ kNoDebugging);
return;
}
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
- baseline_units_.emplace_back(func_index, tiers.baseline_tier);
+ baseline_units_.emplace_back(func_index, tiers.baseline_tier, kNoDebugging);
if (tiers.baseline_tier != tiers.top_tier) {
- tiering_units_.emplace_back(func_index, tiers.top_tier);
+ tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging);
}
}
@@ -770,12 +827,14 @@ class CompilationUnitBuilder {
GetCompileStrategy(module, native_module_->enabled_features(),
func_index, lazy_module));
#endif
- tiering_units_.emplace_back(func_index, tiers.top_tier);
+ tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging);
}
void AddRecompilationUnit(int func_index, ExecutionTier tier) {
// For recompilation, just treat all units like baseline units.
- baseline_units_.emplace_back(func_index, tier);
+ baseline_units_.emplace_back(
+ func_index, tier,
+ tier == ExecutionTier::kLiftoff ? kForDebugging : kNoDebugging);
}
bool Commit() {
@@ -898,7 +957,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
- WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
+ WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier,
+ kNoDebugging};
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(),
@@ -923,7 +983,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
}
WasmCodeRefScope code_ref_scope;
- WasmCode* code = native_module->AddCompiledCode(std::move(result));
+ WasmCode* code = native_module->PublishCode(
+ native_module->AddCompiledCode(std::move(result)));
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
@@ -934,7 +995,7 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
if (GetCompileStrategy(module, enabled_features, func_index, lazy_module) ==
CompileStrategy::kLazy &&
tiers.baseline_tier < tiers.top_tier) {
- WasmCompilationUnit tiering_unit{func_index, tiers.top_tier};
+ WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
compilation_state->AddTopTierCompilationUnit(tiering_unit);
}
@@ -977,9 +1038,7 @@ bool ExecuteJSToWasmWrapperCompilationUnits(
return true;
}
-bool NeedsDeterministicCompile() {
- return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
-}
+bool NeedsDeterministicCompile() { return FLAG_single_threaded; }
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
@@ -1052,17 +1111,16 @@ bool ExecuteCompilationUnits(
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "PublishResults",
"num_results", results_to_publish.size());
if (results_to_publish.empty()) return;
- WasmCodeRefScope code_ref_scope;
- std::vector<WasmCode*> code_vector =
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope->native_module()->AddCompiledCode(
VectorOf(results_to_publish));
+ results_to_publish.clear();
// For import wrapper compilation units, add result to the cache.
const NativeModule* native_module = compile_scope->native_module();
int num_imported_functions = native_module->num_imported_functions();
- DCHECK_EQ(code_vector.size(), results_to_publish.size());
WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
- for (WasmCode* code : code_vector) {
+ for (const auto& code : unpublished_code) {
int func_index = code->index();
DCHECK_LE(0, func_index);
DCHECK_LT(func_index, native_module->num_functions());
@@ -1075,16 +1133,12 @@ bool ExecuteCompilationUnits(
// have been added as a compilation unit. So it is always the first time
// we compile a wrapper for this key here.
DCHECK_NULL((*cache)[key]);
- (*cache)[key] = code;
+ (*cache)[key] = code.get();
code->IncRef();
}
}
- native_module->engine()->LogCode(VectorOf(code_vector));
-
- compile_scope->compilation_state()->OnFinishedUnits(
- VectorOf(code_vector), VectorOf(results_to_publish));
- results_to_publish.clear();
+ compile_scope->SchedulePublishCode(std::move(unpublished_code));
};
bool compilation_failed = false;
@@ -1273,6 +1327,7 @@ class CompilationTimeCallback {
void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
const WasmModule* wasm_module,
NativeModule* native_module) {
+ CHECK(!FLAG_jitless);
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const bool lazy_module = IsLazyModule(wasm_module);
if (!FLAG_wasm_lazy_validation && wasm_module->origin == kWasmOrigin &&
@@ -1314,15 +1369,11 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// are part of initial compilation). Otherwise, just execute baseline units.
bool is_tiering = compilation_state->compile_mode() == CompileMode::kTiering;
auto baseline_only = is_tiering ? kBaselineOnly : kBaselineOrTopTier;
- // The main threads contributes to the compilation, except if we need
- // deterministic compilation; in that case, the single background task will
- // execute all compilation.
- if (!NeedsDeterministicCompile()) {
- while (ExecuteCompilationUnits(
- compilation_state->background_compile_token(), isolate->counters(),
- kMainThreadTaskId, baseline_only)) {
- // Continue executing compilation units.
- }
+ // The main threads contributes to the compilation.
+ while (ExecuteCompilationUnits(compilation_state->background_compile_token(),
+ isolate->counters(), kMainThreadTaskId,
+ baseline_only)) {
+ // Continue executing compilation units.
}
// Now wait until baseline compilation finished.
@@ -1415,32 +1466,29 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
return native_module;
}
-void RecompileNativeModule(Isolate* isolate, NativeModule* native_module,
- ExecutionTier tier) {
+void RecompileNativeModule(NativeModule* native_module,
+ TieringState tiering_state) {
// Install a callback to notify us once background recompilation finished.
auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
auto* compilation_state = Impl(native_module->compilation_state());
- DCHECK(tier == ExecutionTier::kTurbofan || tier == ExecutionTier::kLiftoff);
// The callback captures a shared ptr to the semaphore.
// Initialize the compilation units and kick off background compile tasks.
compilation_state->InitializeRecompilation(
- tier, [recompilation_finished_semaphore](CompilationEvent event) {
+ tiering_state,
+ [recompilation_finished_semaphore](CompilationEvent event) {
if (event == CompilationEvent::kFinishedRecompilation) {
recompilation_finished_semaphore->Signal();
}
});
- // For tier down only.
- if (tier == ExecutionTier::kLiftoff) {
- // The main thread contributes to the compilation, except if we need
- // deterministic compilation; in that case, the single background task will
- // execute all compilation.
- if (!NeedsDeterministicCompile()) {
- while (ExecuteCompilationUnits(
- compilation_state->background_compile_token(), isolate->counters(),
- kMainThreadTaskId, kBaselineOnly)) {
- // Continue executing compilation units.
- }
+ // We only wait for tier down. Tier up can happen in the background.
+ if (tiering_state == kTieredDown) {
+ // The main thread contributes to the compilation.
+ constexpr Counters* kNoCounters = nullptr;
+ while (ExecuteCompilationUnits(
+ compilation_state->background_compile_token(), kNoCounters,
+ kMainThreadTaskId, kBaselineOnly)) {
+ // Continue executing compilation units.
}
// Now wait until baseline recompilation finished.
@@ -1596,12 +1644,9 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// Create heap objects for script and module bytes to be stored in the
// module object. Asm.js is not compiled asynchronously.
DCHECK(module_object_.is_null());
- const WasmModule* module = native_module_->module();
auto source_url = stream_ ? stream_->url() : Vector<const char>();
- Handle<Script> script = CreateWasmScript(
- isolate_, native_module_->wire_bytes(), VectorOf(module->source_map_url),
- module->name, source_url);
-
+ auto script = isolate_->wasm_engine()->GetOrCreateScript(
+ isolate_, native_module_, source_url);
Handle<WasmModuleObject> module_object =
WasmModuleObject::New(isolate_, native_module_, script);
@@ -1633,10 +1678,13 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
DCHECK(!isolate_->context().is_null());
// Finish the wasm script now and make it public to the debugger.
Handle<Script> script(module_object_->script(), isolate_);
+ const WasmModule* module = module_object_->module();
if (script->type() == Script::TYPE_WASM &&
- module_object_->module()->source_map_url.size() != 0) {
+ module->debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
+ !module->debug_symbols.external_url.is_empty()) {
+ ModuleWireBytes wire_bytes(module_object_->native_module()->wire_bytes());
MaybeHandle<String> src_map_str = isolate_->factory()->NewStringFromUtf8(
- CStrVector(module_object_->module()->source_map_url.c_str()),
+ wire_bytes.GetNameOrNull(module->debug_symbols.external_url),
AllocationType::kOld);
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
@@ -1651,11 +1699,10 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
Handle<FixedArray> export_wrappers;
if (is_after_cache_hit) {
// TODO(thibaudm): Look into sharing wrappers.
- CompileJsToWasmWrappers(isolate_, module_object_->module(),
- &export_wrappers);
+ CompileJsToWasmWrappers(isolate_, module, &export_wrappers);
} else {
- compilation_state->FinalizeJSToWasmWrappers(
- isolate_, module_object_->module(), &export_wrappers);
+ compilation_state->FinalizeJSToWasmWrappers(isolate_, module,
+ &export_wrappers);
}
module_object_->set_export_wrappers(*export_wrappers);
}
@@ -2456,11 +2503,9 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
for (int func_index = start; func_index < end; func_index++) {
if (prefer_liftoff) {
constexpr uint8_t kLiftoffOnlyFunctionProgress =
- RequiredTopTierField::update(
- RequiredBaselineTierField::update(
- ReachedTierField::encode(ExecutionTier::kNone),
- ExecutionTier::kLiftoff),
- ExecutionTier::kLiftoff);
+ RequiredTopTierField::encode(ExecutionTier::kLiftoff) |
+ RequiredBaselineTierField::encode(ExecutionTier::kLiftoff) |
+ ReachedTierField::encode(ExecutionTier::kNone);
compilation_progress_.push_back(kLiftoffOnlyFunctionProgress);
outstanding_baseline_units_++;
outstanding_top_tier_functions_++;
@@ -2517,7 +2562,7 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
}
void CompilationStateImpl::InitializeRecompilation(
- ExecutionTier tier,
+ TieringState new_tiering_state,
CompilationState::callback_t recompilation_finished_callback) {
DCHECK(!failed());
@@ -2529,27 +2574,21 @@ void CompilationStateImpl::InitializeRecompilation(
// Restart recompilation if another recompilation is already happening.
outstanding_recompilation_functions_ = 0;
- // If compilation hasn't started yet then code would be keep as tiered-down
+ // If compilation hasn't started yet then code would be kept as tiered-down
// and don't need to recompile.
if (compilation_progress_.size() > 0) {
- int start = native_module_->module()->num_imported_functions;
- int end = start + native_module_->module()->num_declared_functions;
- for (int function_index = start; function_index < end; function_index++) {
- int slot_index = function_index - start;
- DCHECK_LT(slot_index, compilation_progress_.size());
- ExecutionTier reached_tier =
- ReachedTierField::decode(compilation_progress_[slot_index]);
- bool has_correct_tier =
- reached_tier == tier &&
- native_module_->HasCodeWithTier(function_index, tier);
- compilation_progress_[slot_index] =
- ReachedRecompilationTierField::update(
- compilation_progress_[slot_index],
- has_correct_tier ? tier : ExecutionTier::kNone);
- if (!has_correct_tier) {
- outstanding_recompilation_functions_++;
- builder.AddRecompilationUnit(function_index, tier);
- }
+ const WasmModule* module = native_module_->module();
+ int imported = module->num_imported_functions;
+ int declared = module->num_declared_functions;
+ outstanding_recompilation_functions_ = declared;
+ DCHECK_EQ(declared, compilation_progress_.size());
+ for (int slot_index = 0; slot_index < declared; ++slot_index) {
+ compilation_progress_[slot_index] = MissingRecompilationField::update(
+ compilation_progress_[slot_index], true);
+ builder.AddRecompilationUnit(imported + slot_index,
+ new_tiering_state == kTieredDown
+ ? ExecutionTier::kLiftoff
+ : ExecutionTier::kTurbofan);
}
}
@@ -2559,7 +2598,7 @@ void CompilationStateImpl::InitializeRecompilation(
recompilation_finished_callback(CompilationEvent::kFinishedRecompilation);
} else {
callbacks_.emplace_back(std::move(recompilation_finished_callback));
- recompilation_tier_ = tier;
+ tiering_state_ = new_tiering_state;
}
}
@@ -2640,8 +2679,7 @@ CompilationStateImpl::GetNextCompilationUnit(
return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
}
-void CompilationStateImpl::OnFinishedUnits(
- Vector<WasmCode*> code_vector, Vector<WasmCompilationResult> results) {
+void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "OnFinishedUnits",
"num_units", code_vector.size());
@@ -2706,21 +2744,19 @@ void CompilationStateImpl::OnFinishedUnits(
outstanding_top_tier_functions_--;
}
- // If there is recompilation in progress, we would only count the
- // functions which are not Liftoff already, and would only decrement the
- // counter once a function reaches Liftoff.
- if (outstanding_recompilation_functions_ > 0) {
- // TODO(duongn): extend this logic for tier up.
- ExecutionTier recompilation_tier =
- ReachedRecompilationTierField::decode(function_progress);
- if (results[i].requested_tier == recompilation_tier_ &&
- recompilation_tier == ExecutionTier::kNone) {
- DCHECK(code->tier() >= recompilation_tier_);
+ if (V8_UNLIKELY(MissingRecompilationField::decode(function_progress))) {
+ DCHECK_LT(0, outstanding_recompilation_functions_);
+ // If tiering up, accept any TurboFan code. For tiering down, look at
+ // the {for_debugging} flag. The tier can be Liftoff or TurboFan and is
+ // irrelevant here. In particular, we want to ignore any outstanding
+ // non-debugging units.
+ bool matches = tiering_state_ == kTieredDown
+ ? code->for_debugging()
+ : code->tier() == ExecutionTier::kTurbofan;
+ if (matches) {
outstanding_recompilation_functions_--;
- // Update function's recompilation progress.
- compilation_progress_[slot_index] =
- ReachedRecompilationTierField::update(
- compilation_progress_[slot_index], code->tier());
+ compilation_progress_[slot_index] = MissingRecompilationField::update(
+ compilation_progress_[slot_index], false);
if (outstanding_recompilation_functions_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedRecompilation);
}
@@ -2979,7 +3015,7 @@ WasmCode* CompileImportWrapper(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
- ExecutionTier::kNone);
+ ExecutionTier::kNone, kNoDebugging);
WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
(*cache_scope)[key] = published_code;
published_code->IncRef();
@@ -2989,71 +3025,6 @@ WasmCode* CompileImportWrapper(
return published_code;
}
-Handle<Script> CreateWasmScript(Isolate* isolate,
- Vector<const uint8_t> wire_bytes,
- Vector<const char> source_map_url,
- WireBytesRef name,
- Vector<const char> source_url) {
- Handle<Script> script =
- isolate->factory()->NewScript(isolate->factory()->empty_string());
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- script->set_context_data(isolate->native_context()->debug_context_id());
- script->set_type(Script::TYPE_WASM);
-
- int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
- kZeroHashSeed);
-
- const int kBufferSize = 32;
- char buffer[kBufferSize];
-
- // Script name is "<module_name>-hash" if name is available and "hash"
- // otherwise.
- Handle<String> name_str;
- if (name.is_set()) {
- int name_chars = SNPrintF(ArrayVector(buffer), "-%08x", hash);
- DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- Handle<String> name_hash =
- isolate->factory()
- ->NewStringFromOneByte(
- VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
- AllocationType::kOld)
- .ToHandleChecked();
- Handle<String> module_name =
- WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate, wire_bytes, name, kNoInternalize);
- name_str = isolate->factory()
- ->NewConsString(module_name, name_hash)
- .ToHandleChecked();
- } else {
- int name_chars = SNPrintF(ArrayVector(buffer), "%08x", hash);
- DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- name_str = isolate->factory()
- ->NewStringFromOneByte(
- VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
- AllocationType::kOld)
- .ToHandleChecked();
- }
- script->set_name(*name_str);
- MaybeHandle<String> url_str;
- if (!source_url.empty()) {
- url_str =
- isolate->factory()->NewStringFromUtf8(source_url, AllocationType::kOld);
- } else {
- Handle<String> url_prefix =
- isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
- url_str = isolate->factory()->NewConsString(url_prefix, name_str);
- }
- script->set_source_url(*url_str.ToHandleChecked());
-
- if (!source_map_url.empty()) {
- MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
- source_map_url, AllocationType::kOld);
- script->set_source_mapping_url(*src_map_str.ToHandleChecked());
- }
- return script;
-}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 4c6acd9aa9..a3fc4037a2 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -44,8 +44,8 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out);
-void RecompileNativeModule(Isolate* isolate, NativeModule* native_module,
- ExecutionTier tier);
+void RecompileNativeModule(NativeModule* native_module,
+ TieringState new_tiering_state);
V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
@@ -60,11 +60,6 @@ WasmCode* CompileImportWrapper(
compiler::WasmImportCallKind kind, const FunctionSig* sig,
WasmImportWrapperCache::ModificationScope* cache_scope);
-V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
- Isolate* isolate, Vector<const uint8_t> wire_bytes,
- Vector<const char> source_map_url, WireBytesRef name,
- Vector<const char> source_url = {});
-
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
// compilation was successful. Lazy compilation can fail only if validation is
// also lazy.
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 58be26f845..e7ecd1396b 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -13,6 +13,8 @@
#include "src/utils/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/struct-types.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -31,11 +33,7 @@ constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
constexpr char kDebugInfoString[] = ".debug_info";
-
-template <size_t N>
-constexpr size_t num_chars(const char (&)[N]) {
- return N - 1; // remove null character at end.
-}
+constexpr char kExternalDebugInfoString[] = ".external_debug_info";
const char* ExternalKindName(ImportExportKindCode kind) {
switch (kind) {
@@ -91,6 +89,8 @@ const char* SectionName(SectionCode code) {
return kSourceMappingURLString;
case kDebugInfoSectionCode:
return kDebugInfoString;
+ case kExternalDebugInfoSectionCode:
+ return kExternalDebugInfoString;
case kCompilationHintsSectionCode:
return kCompilationHintsString;
default:
@@ -162,25 +162,21 @@ SectionCode IdentifyUnknownSectionInternal(Decoder* decoder) {
static_cast<int>(section_name_start - decoder->start()),
string.length() < 20 ? string.length() : 20, section_name_start);
- if (string.length() == num_chars(kNameString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start), kNameString,
- num_chars(kNameString)) == 0) {
- return kNameSectionCode;
- } else if (string.length() == num_chars(kSourceMappingURLString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kSourceMappingURLString,
- num_chars(kSourceMappingURLString)) == 0) {
- return kSourceMappingURLSectionCode;
- } else if (string.length() == num_chars(kCompilationHintsString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kCompilationHintsString,
- num_chars(kCompilationHintsString)) == 0) {
- return kCompilationHintsSectionCode;
- } else if (string.length() == num_chars(kDebugInfoString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kDebugInfoString, num_chars(kDebugInfoString)) == 0) {
- return kDebugInfoSectionCode;
+ using SpecialSectionPair = std::pair<Vector<const char>, SectionCode>;
+ static constexpr SpecialSectionPair kSpecialSections[]{
+ {StaticCharVector(kNameString), kNameSectionCode},
+ {StaticCharVector(kSourceMappingURLString), kSourceMappingURLSectionCode},
+ {StaticCharVector(kCompilationHintsString), kCompilationHintsSectionCode},
+ {StaticCharVector(kDebugInfoString), kDebugInfoSectionCode},
+ {StaticCharVector(kExternalDebugInfoString),
+ kExternalDebugInfoSectionCode}};
+
+ auto name_vec =
+ Vector<const char>::cast(VectorOf(section_name_start, string.length()));
+ for (auto& special_section : kSpecialSections) {
+ if (name_vec == special_section.first) return special_section.second;
}
+
return kUnknownSectionCode;
}
} // namespace
@@ -450,6 +446,9 @@ class ModuleDecoderImpl : public Decoder {
// .debug_info is a custom section containing core DWARF information
// if produced by compiler. Its presence likely means that Wasm was
// built in a debug mode.
+ case kExternalDebugInfoSectionCode:
+ // .external_debug_info is a custom section containing a reference to an
+ // external symbol file.
case kCompilationHintsSectionCode:
// TODO(frgossen): report out of place compilation hints section as a
// warning.
@@ -506,11 +505,14 @@ class ModuleDecoderImpl : public Decoder {
break;
case kDebugInfoSectionCode:
// If there is an explicit source map, prefer it over DWARF info.
- if (!has_seen_unordered_section(kSourceMappingURLSectionCode)) {
- module_->source_map_url.assign("wasm://dwarf");
+ if (module_->debug_symbols.type == WasmDebugSymbols::Type::None) {
+ module_->debug_symbols = {WasmDebugSymbols::Type::EmbeddedDWARF, {}};
}
consume_bytes(static_cast<uint32_t>(end_ - start_), ".debug_info");
break;
+ case kExternalDebugInfoSectionCode:
+ DecodeExternalDebugInfoSection();
+ break;
case kCompilationHintsSectionCode:
if (enabled_features_.has_compilation_hints()) {
DecodeCompilationHintsSection();
@@ -550,14 +552,41 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTypeSection() {
uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
- module_->signatures.reserve(signatures_count);
+ module_->types.reserve(signatures_count);
for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->signatures.push_back(s);
- uint32_t id = s ? module_->signature_map.FindOrInsert(*s) : 0;
- module_->signature_ids.push_back(id);
+ uint8_t kind = consume_u8("type kind");
+ switch (kind) {
+ case kWasmFunctionTypeCode: {
+ const FunctionSig* s = consume_sig(module_->signature_zone.get());
+ module_->add_signature(s);
+ break;
+ }
+ case kWasmStructTypeCode: {
+ if (!enabled_features_.has_gc()) {
+ errorf(pc(), "struct types are part of the GC proposal");
+ break;
+ }
+ const StructType* s = consume_struct(module_->signature_zone.get());
+ module_->add_struct_type(s);
+ // TODO(7748): Should we canonicalize struct types, like
+ // {signature_map} does for function signatures?
+ break;
+ }
+ case kWasmArrayTypeCode: {
+ if (!enabled_features_.has_gc()) {
+ errorf(pc(), "array types are part of the GC proposal");
+ break;
+ }
+ const ArrayType* type = consume_array(module_->signature_zone.get());
+ module_->add_array_type(type);
+ break;
+ }
+ default:
+ errorf(pc(), "unknown type form: %d", kind);
+ break;
+ }
}
module_->signature_map.Freeze();
}
@@ -1034,12 +1063,22 @@ class ModuleDecoderImpl : public Decoder {
Decoder inner(start_, pc_, end_, buffer_offset_);
WireBytesRef url = wasm::consume_string(&inner, true, "module name");
if (inner.ok() &&
- !has_seen_unordered_section(kSourceMappingURLSectionCode)) {
- const byte* url_start =
- inner.start() + inner.GetBufferRelativeOffset(url.offset());
- module_->source_map_url.assign(reinterpret_cast<const char*>(url_start),
- url.length());
- set_seen_unordered_section(kSourceMappingURLSectionCode);
+ module_->debug_symbols.type != WasmDebugSymbols::Type::SourceMap) {
+ module_->debug_symbols = {WasmDebugSymbols::Type::SourceMap, url};
+ }
+ set_seen_unordered_section(kSourceMappingURLSectionCode);
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+
+ void DecodeExternalDebugInfoSection() {
+ Decoder inner(start_, pc_, end_, buffer_offset_);
+ WireBytesRef url =
+ wasm::consume_string(&inner, true, "external symbol file");
+ // If there is an explicit source map, prefer it over DWARF info.
+ if (inner.ok() &&
+ module_->debug_symbols.type != WasmDebugSymbols::Type::SourceMap) {
+ module_->debug_symbols = {WasmDebugSymbols::Type::ExternalDWARF, url};
+ set_seen_unordered_section(kExternalDebugInfoSectionCode);
}
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
@@ -1257,6 +1296,8 @@ class ModuleDecoderImpl : public Decoder {
const WasmModule* module,
std::unique_ptr<WasmFunction> function) {
pc_ = start_;
+ expect_u8("type form", kWasmFunctionTypeCode);
+ if (!ok()) return FunctionResult{std::move(intermediate_error_)};
function->sig = consume_sig(zone);
function->code = {off(pc_), static_cast<uint32_t>(end_ - pc_)};
@@ -1274,6 +1315,7 @@ class ModuleDecoderImpl : public Decoder {
// Decodes a single function signature at {start}.
const FunctionSig* DecodeFunctionSignature(Zone* zone, const byte* start) {
pc_ = start;
+ if (!expect_u8("type form", kWasmFunctionTypeCode)) return nullptr;
const FunctionSig* result = consume_sig(zone);
return ok() ? result : nullptr;
}
@@ -1438,13 +1480,13 @@ class ModuleDecoderImpl : public Decoder {
uint32_t consume_sig_index(WasmModule* module, const FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
- if (sig_index >= module->signatures.size()) {
+ if (!module->has_signature(sig_index)) {
errorf(pos, "signature index %u out of bounds (%d signatures)", sig_index,
- static_cast<int>(module->signatures.size()));
+ static_cast<int>(module->types.size()));
*sig = nullptr;
return 0;
}
- *sig = module->signatures[sig_index];
+ *sig = module->signature(sig_index);
return sig_index;
}
@@ -1680,44 +1722,14 @@ class ModuleDecoderImpl : public Decoder {
return val != 0;
}
- // Reads a single 8-bit integer, interpreting it as a local type.
ValueType consume_value_type() {
- byte val = consume_u8("value type");
- ValueTypeCode t = static_cast<ValueTypeCode>(val);
- switch (t) {
- case kLocalI32:
- return kWasmI32;
- case kLocalI64:
- return kWasmI64;
- case kLocalF32:
- return kWasmF32;
- case kLocalF64:
- return kWasmF64;
- default:
- if (origin_ == kWasmOrigin) {
- switch (t) {
- case kLocalS128:
- if (enabled_features_.has_simd()) return kWasmS128;
- break;
- case kLocalFuncRef:
- if (enabled_features_.has_anyref()) return kWasmFuncRef;
- break;
- case kLocalAnyRef:
- if (enabled_features_.has_anyref()) return kWasmAnyRef;
- break;
- case kLocalNullRef:
- if (enabled_features_.has_anyref()) return kWasmNullRef;
- break;
- case kLocalExnRef:
- if (enabled_features_.has_eh()) return kWasmExnRef;
- break;
- default:
- break;
- }
- }
- error(pc_ - 1, "invalid local type");
- return kWasmStmt;
- }
+ ValueType result;
+ uint32_t type_length = value_type_reader::read_value_type<kValidate>(
+ this, this->pc(), &result,
+ origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
+ if (type_length == 0) error(pc_, "invalid value type");
+ consume_bytes(type_length);
+ return result;
}
// Reads a single 8-bit integer, interpreting it as a reference type.
@@ -1754,8 +1766,7 @@ class ModuleDecoderImpl : public Decoder {
}
const FunctionSig* consume_sig(Zone* zone) {
- if (!expect_u8("type form", kWasmFunctionTypeCode)) return nullptr;
- // parse parameter types
+ // Parse parameter types.
uint32_t param_count =
consume_count("param count", kV8MaxWasmFunctionParams);
if (failed()) return nullptr;
@@ -1765,7 +1776,7 @@ class ModuleDecoderImpl : public Decoder {
params.push_back(param);
}
std::vector<ValueType> returns;
- // parse return types
+ // Parse return types.
const size_t max_return_count = enabled_features_.has_mv()
? kV8MaxWasmFunctionMultiReturns
: kV8MaxWasmFunctionReturns;
@@ -1787,6 +1798,28 @@ class ModuleDecoderImpl : public Decoder {
return new (zone) FunctionSig(return_count, param_count, buffer);
}
+ const StructType* consume_struct(Zone* zone) {
+ // TODO(7748): Introduce a proper maximum.
+ uint32_t field_count = consume_count("field count", 999);
+ if (failed()) return nullptr;
+ std::vector<ValueType> fields;
+ for (uint32_t i = 0; ok() && i < field_count; ++i) {
+ ValueType field = consume_value_type();
+ fields.push_back(field);
+ }
+ if (failed()) return nullptr;
+ ValueType* buffer = zone->NewArray<ValueType>(field_count);
+ for (uint32_t i = 0; i < field_count; i++) buffer[i] = fields[i];
+ uint32_t* offsets = zone->NewArray<uint32_t>(field_count);
+ return new (zone) StructType(field_count, offsets, buffer);
+ }
+
+ const ArrayType* consume_array(Zone* zone) {
+ ValueType field = consume_value_type();
+ if (failed()) return nullptr;
+ return new (zone) ArrayType(field);
+ }
+
// Consume the attribute field of an exception.
uint32_t consume_exception_attribute() {
const byte* pos = pc_;
@@ -2081,8 +2114,8 @@ FunctionResult DecodeWasmFunctionForTesting(
const byte* function_end, Counters* counters) {
size_t size = function_end - function_start;
CHECK_LE(function_start, function_end);
- auto size_histogram = SELECT_WASM_COUNTER(counters, module->origin, wasm,
- function_size_bytes);
+ auto size_histogram =
+ SELECT_WASM_COUNTER(counters, module->origin, wasm, function_size_bytes);
// TODO(bradnelson): Improve histogram handling of ptrdiff_t.
size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize) {
@@ -2257,17 +2290,18 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
-void DecodeGlobalNames(
- const Vector<const WasmImport> import_table,
+void GenerateNamesFromImportsAndExports(
+ ImportExportKindCode kind, const Vector<const WasmImport> import_table,
const Vector<const WasmExport> export_table,
std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>*
names) {
DCHECK_NOT_NULL(names);
DCHECK(names->empty());
+ DCHECK(kind == kExternalGlobal || kind == kExternalMemory);
// Extract from import table.
for (const WasmImport& imp : import_table) {
- if (imp.kind != kExternalGlobal) continue;
+ if (imp.kind != kind) continue;
if (!imp.module_name.is_set() || !imp.field_name.is_set()) continue;
if (names->count(imp.index) == 0) {
names->insert(std::make_pair(
@@ -2277,7 +2311,7 @@ void DecodeGlobalNames(
// Extract from export table.
for (const WasmExport& exp : export_table) {
- if (exp.kind != kExternalGlobal) continue;
+ if (exp.kind != kind) continue;
if (!exp.name.is_set()) continue;
if (names->count(exp.index) == 0) {
names->insert(
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 6feeebb41d..4b157a4ab5 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -168,10 +168,10 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
std::unordered_map<uint32_t, WireBytesRef>* names,
const Vector<const WasmExport> export_table);
-// Decode the global names from import table and export table. Returns the
-// result as an unordered map.
-void DecodeGlobalNames(
- const Vector<const WasmImport> import_table,
+// Decode the global or memory names from import table and export table. Returns
+// the result as an unordered map.
+void GenerateNamesFromImportsAndExports(
+ ImportExportKindCode kind, const Vector<const WasmImport> import_table,
const Vector<const WasmExport> export_table,
std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>* names);
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 08bd8ff871..9dfc1e1608 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -11,6 +11,7 @@
#include "src/tracing/trace-event.h"
#include "src/utils/utils.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
@@ -84,6 +85,39 @@ class CompileImportWrapperTask final : public CancelableTask {
WasmImportWrapperCache::ModificationScope* const cache_scope_;
};
+Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
+ int struct_index) {
+ const wasm::StructType* type = module->struct_type(struct_index);
+ int inobject_properties = 0;
+ DCHECK_LE(type->total_fields_size(), kMaxInt - WasmStruct::kHeaderSize);
+ int instance_size =
+ WasmStruct::kHeaderSize + static_cast<int>(type->total_fields_size());
+ InstanceType instance_type = WASM_STRUCT_TYPE;
+ // TODO(jkummerow): If NO_ELEMENTS were supported, we could use that here.
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ Handle<Foreign> type_info =
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(type));
+ Handle<Map> map = isolate->factory()->NewMap(
+ instance_type, instance_size, elements_kind, inobject_properties);
+ map->set_wasm_type_info(*type_info);
+ return map;
+}
+
+Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
+ int array_index) {
+ const wasm::ArrayType* type = module->array_type(array_index);
+ int inobject_properties = 0;
+ int instance_size = kVariableSizeSentinel;
+ InstanceType instance_type = WASM_ARRAY_TYPE;
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ Handle<Foreign> type_info =
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(type));
+ Handle<Map> map = isolate->factory()->NewMap(
+ instance_type, instance_size, elements_kind, inobject_properties);
+ map->set_wasm_type_info(*type_info);
+ return map;
+}
+
} // namespace
// A helper class to simplify instantiating a module from a module object.
@@ -534,11 +568,30 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Debugging support.
+ // Create maps for managed objects (GC proposal).
//--------------------------------------------------------------------------
- // Set all breakpoints that were set on the shared module.
- WasmScript::SetBreakpointsOnNewInstance(
- handle(module_object_->script(), isolate_), instance);
+ if (enabled_.has_gc()) {
+ int count = 0;
+ for (uint8_t type_kind : module_->type_kinds) {
+ if (type_kind == kWasmStructTypeCode || type_kind == kWasmArrayTypeCode) {
+ count++;
+ }
+ }
+ Handle<FixedArray> maps =
+ isolate_->factory()->NewUninitializedFixedArray(count);
+ for (int i = 0; i < static_cast<int>(module_->type_kinds.size()); i++) {
+ int index = 0;
+ if (module_->type_kinds[i] == kWasmStructTypeCode) {
+ Handle<Map> map = CreateStructMap(isolate_, module_, i);
+ maps->set(index++, *map);
+ }
+ if (module_->type_kinds[i] == kWasmArrayTypeCode) {
+ Handle<Map> map = CreateArrayMap(isolate_, module_, i);
+ maps->set(index++, *map);
+ }
+ }
+ instance->set_managed_object_maps(*maps);
+ }
//--------------------------------------------------------------------------
// Create a wrapper for the start function.
@@ -751,7 +804,10 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
case ValueType::kAnyRef:
case ValueType::kFuncRef:
case ValueType::kNullRef:
- case ValueType::kExnRef: {
+ case ValueType::kExnRef:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef: {
DCHECK_IMPLIES(global.type == kWasmNullRef, value->GetRef()->IsNull());
tagged_globals_->set(global.offset, *value->GetRef());
break;
diff --git a/deps/v8/src/wasm/struct-types.h b/deps/v8/src/wasm/struct-types.h
new file mode 100644
index 0000000000..6cd4271c24
--- /dev/null
+++ b/deps/v8/src/wasm/struct-types.h
@@ -0,0 +1,116 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_STRUCT_TYPES_H_
+#define V8_WASM_STRUCT_TYPES_H_
+
+#include "src/base/iterator.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/wasm/value-type.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class StructType : public ZoneObject {
+ public:
+ StructType(uint32_t field_count, uint32_t* field_offsets,
+ const ValueType* reps)
+ : field_count_(field_count), field_offsets_(field_offsets), reps_(reps) {
+ InitializeOffsets();
+ }
+
+ uint32_t field_count() const { return field_count_; }
+
+ ValueType field(uint32_t index) const {
+ DCHECK_LT(index, field_count_);
+ return reps_[index];
+ }
+
+ // Iteration support.
+ base::iterator_range<const ValueType*> fields() const {
+ return {reps_, reps_ + field_count_};
+ }
+
+ bool operator==(const StructType& other) const {
+ if (this == &other) return true;
+ if (field_count() != other.field_count()) return false;
+ return std::equal(fields().begin(), fields().end(), other.fields().begin());
+ }
+ bool operator!=(const StructType& other) const { return !(*this == other); }
+
+ uint32_t field_offset(uint32_t index) const {
+ DCHECK_LT(index, field_count());
+ if (index == 0) return 0;
+ return field_offsets_[index - 1];
+ }
+ uint32_t total_fields_size() const {
+ return field_offsets_[field_count() - 1];
+ }
+
+ void InitializeOffsets() {
+ uint32_t offset = field(0).element_size_bytes();
+ for (uint32_t i = 1; i < field_count(); i++) {
+ uint32_t field_size = field(i).element_size_bytes();
+ offset = RoundUp(offset, field_size);
+ field_offsets_[i - 1] = offset;
+ offset += field_size;
+ }
+ offset = RoundUp(offset, kTaggedSize);
+ field_offsets_[field_count() - 1] = offset;
+ }
+
+ // For incrementally building StructTypes.
+ class Builder {
+ public:
+ Builder(Zone* zone, uint32_t field_count)
+ : field_count_(field_count),
+ zone_(zone),
+ cursor_(0),
+ buffer_(zone->NewArray<ValueType>(static_cast<int>(field_count))) {}
+
+ void AddField(ValueType type) {
+ DCHECK_LT(cursor_, field_count_);
+ buffer_[cursor_++] = type;
+ }
+
+ StructType* Build() {
+ DCHECK_EQ(cursor_, field_count_);
+ uint32_t* offsets = zone_->NewArray<uint32_t>(field_count_);
+ return new (zone_) StructType(field_count_, offsets, buffer_);
+ }
+
+ private:
+ const uint32_t field_count_;
+ Zone* zone_;
+ uint32_t cursor_;
+ ValueType* buffer_;
+ };
+
+ private:
+ uint32_t field_count_;
+ uint32_t* field_offsets_;
+ const ValueType* reps_;
+};
+
+class ArrayType : public ZoneObject {
+ public:
+ constexpr explicit ArrayType(ValueType rep) : rep_(rep) {}
+
+ ValueType element_type() const { return rep_; }
+
+ bool operator==(const ArrayType& other) const { return rep_ == other.rep_; }
+ bool operator!=(const ArrayType& other) const { return rep_ != other.rep_; }
+
+ private:
+ const ValueType rep_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_STRUCT_TYPES_H_
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 42230dfc06..357dafbe2c 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_VALUE_TYPE_H_
#define V8_WASM_VALUE_TYPE_H_
+#include "src/base/bit-field.h"
#include "src/codegen/machine-type.h"
#include "src/wasm/wasm-constants.h"
@@ -19,17 +20,28 @@ namespace wasm {
// Type for holding simd values, defined in wasm-value.h.
class Simd128;
-// Type lattice: For any two types connected by a line, the type at the bottom
-// is a subtype of the other type.
+// Type lattice: Given a fixed struct type S, the following lattice
+// defines the subtyping relation among types:
+// For every two types connected by a line, the top type is a
+// (direct) subtype of the bottom type.
//
-// AnyRef
-// / \
-// FuncRef ExnRef
-// \ /
-// I32 I64 F32 F64 NullRef
-// \ \ \ \ /
-// ------------ Bottom
+// AnyRef
+// / \
+// / EqRef
+// / / \
+// FuncRef ExnRef OptRef(S)
+// \ | / \
+// I32 I64 F32 F64 NullRef Ref(S)
+// \ \ \ \ | /
+// ---------------------- Bottom ---------
// Format: kind, log2Size, code, machineType, shortName, typeName
+//
+// Some of these types are from proposals that are not standardized yet:
+// - "ref" types per https://github.com/WebAssembly/function-references
+// - "optref"/"eqref" per https://github.com/WebAssembly/gc
+//
+// TODO(7748): Extend this with struct and function subtyping.
+// Keep up to date with funcref vs. anyref subtyping.
#define FOREACH_VALUE_TYPE(V) \
V(Stmt, -1, Void, None, 'v', "<stmt>") \
V(I32, 2, I32, Int32, 'i', "i32") \
@@ -41,6 +53,9 @@ class Simd128;
V(FuncRef, kSystemPointerSizeLog2, FuncRef, TaggedPointer, 'a', "funcref") \
V(NullRef, kSystemPointerSizeLog2, NullRef, TaggedPointer, 'n', "nullref") \
V(ExnRef, kSystemPointerSizeLog2, ExnRef, TaggedPointer, 'e', "exn") \
+ V(Ref, kSystemPointerSizeLog2, Ref, TaggedPointer, '*', "ref") \
+ V(OptRef, kSystemPointerSizeLog2, OptRef, TaggedPointer, 'o', "optref") \
+ V(EqRef, kSystemPointerSizeLog2, EqRef, TaggedPointer, 'q', "eqref") \
V(Bottom, -1, Void, None, '*', "<bot>")
class ValueType {
@@ -51,15 +66,36 @@ class ValueType {
#undef DEF_ENUM
};
- constexpr ValueType() : kind_(kStmt) {}
- explicit constexpr ValueType(Kind kind) : kind_(kind) {}
+ constexpr bool has_immediate() const {
+ return kind() == kRef || kind() == kOptRef;
+ }
+
+ constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
+ explicit constexpr ValueType(Kind kind)
+ : bit_field_(KindField::encode(kind)) {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK(!has_immediate());
+#endif
+ }
+ constexpr ValueType(Kind kind, uint32_t ref_index)
+ : bit_field_(KindField::encode(kind) | RefIndexField::encode(ref_index)) {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK(has_immediate());
+#endif
+ }
- constexpr Kind kind() const { return kind_; }
+ constexpr Kind kind() const { return KindField::decode(bit_field_); }
+ constexpr uint32_t ref_index() const {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK(has_immediate());
+#endif
+ return RefIndexField::decode(bit_field_);
+ }
constexpr int element_size_log2() const {
#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kStmt, kind_);
- DCHECK_NE(kBottom, kind_);
+ DCHECK_NE(kStmt, kind());
+ DCHECK_NE(kBottom, kind());
#endif
constexpr int kElementSizeLog2[] = {
@@ -68,46 +104,57 @@ class ValueType {
#undef ELEM_SIZE_LOG2
};
- return kElementSizeLog2[kind_];
+ return kElementSizeLog2[kind()];
}
constexpr int element_size_bytes() const { return 1 << element_size_log2(); }
constexpr bool operator==(ValueType other) const {
- return kind_ == other.kind_;
+ return bit_field_ == other.bit_field_;
}
constexpr bool operator!=(ValueType other) const {
- return kind_ != other.kind_;
+ return bit_field_ != other.bit_field_;
}
- bool IsSubTypeOf(ValueType other) const {
- return (*this == other) || (kind_ == kNullRef && other.kind_ == kAnyRef) ||
- (kind_ == kFuncRef && other.kind_ == kAnyRef) ||
- (kind_ == kExnRef && other.kind_ == kAnyRef) ||
- (kind_ == kNullRef && other.kind_ == kFuncRef) ||
- (kind_ == kNullRef && other.kind_ == kExnRef);
+ // TODO(7748): Extend this with struct and function subtyping.
+ // Keep up to date with funcref vs. anyref subtyping.
+ constexpr bool IsSubTypeOf(ValueType other) const {
+ return (*this == other) || (other.kind() == kAnyRef && IsReferenceType()) ||
+ (kind() == kNullRef && other.kind() != kRef &&
+ other.IsReferenceType()) ||
+ (other.kind() == kEqRef &&
+ (kind() == kExnRef || kind() == kOptRef || kind() == kRef)) ||
+ (kind() == kRef && other.kind() == kOptRef &&
+ ref_index() == other.ref_index());
}
- bool IsReferenceType() const {
- return kind_ == kAnyRef || kind_ == kFuncRef || kind_ == kNullRef ||
- kind_ == kExnRef;
+ constexpr bool IsReferenceType() const {
+ return kind() == kAnyRef || kind() == kFuncRef || kind() == kNullRef ||
+ kind() == kExnRef || kind() == kRef || kind() == kOptRef ||
+ kind() == kEqRef;
}
+ // TODO(7748): Extend this with struct and function subtyping.
+ // Keep up to date with funcref vs. anyref subtyping.
static ValueType CommonSubType(ValueType a, ValueType b) {
- if (a.kind() == b.kind()) return a;
+ if (a == b) return a;
// The only sub type of any value type is {bot}.
if (!a.IsReferenceType() || !b.IsReferenceType()) {
return ValueType(kBottom);
}
if (a.IsSubTypeOf(b)) return a;
if (b.IsSubTypeOf(a)) return b;
- // {a} and {b} are not each other's subtype. The biggest sub-type of all
- // reference types is {kWasmNullRef}.
+ // {a} and {b} are not each other's subtype.
+ // If one of them is not nullable, their greatest subtype is bottom,
+ // otherwise null.
+ if (a.kind() == kRef || b.kind() == kRef) return ValueType(kBottom);
return ValueType(kNullRef);
}
- ValueTypeCode value_type_code() const {
- DCHECK_NE(kBottom, kind_);
+ constexpr ValueTypeCode value_type_code() const {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK_NE(kBottom, kind());
+#endif
constexpr ValueTypeCode kValueTypeCode[] = {
#define TYPE_CODE(kind, log2Size, code, ...) kLocal##code,
@@ -115,11 +162,13 @@ class ValueType {
#undef TYPE_CODE
};
- return kValueTypeCode[kind_];
+ return kValueTypeCode[kind()];
}
- MachineType machine_type() const {
- DCHECK_NE(kBottom, kind_);
+ constexpr MachineType machine_type() const {
+#if V8_HAS_CXX14_CONSTEXPR
+ DCHECK_NE(kBottom, kind());
+#endif
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
@@ -128,10 +177,10 @@ class ValueType {
#undef MACH_TYPE
};
- return kMachineType[kind_];
+ return kMachineType[kind()];
}
- MachineRepresentation machine_representation() {
+ constexpr MachineRepresentation machine_representation() const {
return machine_type().representation();
}
@@ -163,7 +212,7 @@ class ValueType {
#undef SHORT_NAME
};
- return kShortName[kind_];
+ return kShortName[kind()];
}
constexpr const char* type_name() const {
@@ -174,13 +223,14 @@ class ValueType {
#undef TYPE_NAME
};
- return kTypeName[kind_];
+ return kTypeName[kind()];
}
private:
- Kind kind_ : 8;
- // TODO(jkummerow): Add and use the following for reference types:
- // uint32_t ref_index_ : 24;
+ using KindField = base::BitField<Kind, 0, 8>;
+ using RefIndexField = base::BitField<uint32_t, 8, 24>;
+
+ uint32_t bit_field_;
};
static_assert(sizeof(ValueType) <= kUInt32Size,
@@ -200,6 +250,7 @@ constexpr ValueType kWasmI64 = ValueType(ValueType::kI64);
constexpr ValueType kWasmF32 = ValueType(ValueType::kF32);
constexpr ValueType kWasmF64 = ValueType(ValueType::kF64);
constexpr ValueType kWasmAnyRef = ValueType(ValueType::kAnyRef);
+constexpr ValueType kWasmEqRef = ValueType(ValueType::kEqRef);
constexpr ValueType kWasmExnRef = ValueType(ValueType::kExnRef);
constexpr ValueType kWasmFuncRef = ValueType(ValueType::kFuncRef);
constexpr ValueType kWasmNullRef = ValueType(ValueType::kNullRef);
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 99cf484b17..5477a18f33 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -24,6 +24,7 @@
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module-sourcemap.h"
@@ -46,47 +47,59 @@ namespace wasm {
using trap_handler::ProtectedInstructionData;
-base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) {
- auto dest_it = regions_.begin();
- auto dest_end = regions_.end();
-
- // Skip over dest regions strictly before {region}.
- while (dest_it != dest_end && dest_it->end() < region.begin()) ++dest_it;
-
- // After last dest region: insert and done.
- if (dest_it == dest_end) {
- regions_.push_back(region);
- return region;
- }
-
- // Adjacent (from below) to dest: merge and done.
- if (dest_it->begin() == region.end()) {
- base::AddressRegion merged_region{region.begin(),
- region.size() + dest_it->size()};
- DCHECK_EQ(merged_region.end(), dest_it->end());
- *dest_it = merged_region;
+base::AddressRegion DisjointAllocationPool::Merge(
+ base::AddressRegion new_region) {
+ // Find the possible insertion position by identifying the first region whose
+ // start address is not less than that of {new_region}. Since there cannot be
+ // any overlap between regions, this also means that the start of {above} is
+ // bigger or equal than the *end* of {new_region}.
+ auto above = regions_.lower_bound(new_region);
+ DCHECK(above == regions_.end() || above->begin() >= new_region.end());
+
+ // Check whether to merge with {above}.
+ if (above != regions_.end() && new_region.end() == above->begin()) {
+ base::AddressRegion merged_region{new_region.begin(),
+ new_region.size() + above->size()};
+ DCHECK_EQ(merged_region.end(), above->end());
+ // Check whether to also merge with the region below.
+ if (above != regions_.begin()) {
+ auto below = above;
+ --below;
+ if (below->end() == new_region.begin()) {
+ merged_region = {below->begin(), below->size() + merged_region.size()};
+ regions_.erase(below);
+ }
+ }
+ auto insert_pos = regions_.erase(above);
+ regions_.insert(insert_pos, merged_region);
return merged_region;
}
- // Before dest: insert and done.
- if (dest_it->begin() > region.end()) {
- regions_.insert(dest_it, region);
- return region;
+ // No element below, and not adjavent to {above}: insert and done.
+ if (above == regions_.begin()) {
+ regions_.insert(above, new_region);
+ return new_region;
}
- // Src is adjacent from above. Merge and check whether the merged region is
- // now adjacent to the next region.
- DCHECK_EQ(dest_it->end(), region.begin());
- dest_it->set_size(dest_it->size() + region.size());
- DCHECK_EQ(dest_it->end(), region.end());
- auto next_dest = dest_it;
- ++next_dest;
- if (next_dest != dest_end && dest_it->end() == next_dest->begin()) {
- dest_it->set_size(dest_it->size() + next_dest->size());
- DCHECK_EQ(dest_it->end(), next_dest->end());
- regions_.erase(next_dest);
+ auto below = above;
+ --below;
+ // Sanity check:
+ DCHECK(above == regions_.end() || below->end() < above->begin());
+
+ // Adjacent to {below}: merge and done.
+ if (below->end() == new_region.begin()) {
+ base::AddressRegion merged_region{below->begin(),
+ below->size() + new_region.size()};
+ DCHECK_EQ(merged_region.end(), new_region.end());
+ regions_.erase(below);
+ regions_.insert(above, merged_region);
+ return merged_region;
}
- return *dest_it;
+
+ // Not adjacent to any existing region: insert between {below} and {above}.
+ DCHECK_LT(below->end(), new_region.begin());
+ regions_.insert(above, new_region);
+ return new_region;
}
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
@@ -96,24 +109,31 @@ base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
base::AddressRegion DisjointAllocationPool::AllocateInRegion(
size_t size, base::AddressRegion region) {
- for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
+ // Get an iterator to the first contained region whose start address is not
+ // smaller than the start address of {region}. Start the search from the
+ // region one before that (the last one whose start address is smaller).
+ auto it = regions_.lower_bound(region);
+ if (it != regions_.begin()) --it;
+
+ for (auto end = regions_.end(); it != end; ++it) {
base::AddressRegion overlap = it->GetOverlap(region);
if (size > overlap.size()) continue;
base::AddressRegion ret{overlap.begin(), size};
- if (size == it->size()) {
- // We use the full region --> erase the region from {regions_}.
- regions_.erase(it);
- } else if (ret.begin() == it->begin()) {
- // We return a region at the start --> shrink remaining region from front.
- *it = base::AddressRegion{it->begin() + size, it->size() - size};
- } else if (ret.end() == it->end()) {
+ base::AddressRegion old = *it;
+ auto insert_pos = regions_.erase(it);
+ if (size == old.size()) {
+ // We use the full region --> nothing to add back.
+ } else if (ret.begin() == old.begin()) {
+ // We return a region at the start --> shrink old region from front.
+ regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
+ } else if (ret.end() == old.end()) {
// We return a region at the end --> shrink remaining region.
- *it = base::AddressRegion{it->begin(), it->size() - size};
+ regions_.insert(insert_pos, {old.begin(), old.size() - size});
} else {
- // We return something in the middle --> split the remaining region.
- regions_.insert(
- it, base::AddressRegion{it->begin(), ret.begin() - it->begin()});
- *it = base::AddressRegion{ret.end(), it->end() - ret.end()};
+ // We return something in the middle --> split the remaining region
+ // (insert the region with smaller address first).
+ regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
+ regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
}
return ret;
}
@@ -195,24 +215,31 @@ void WasmCode::LogCode(Isolate* isolate) const {
if (IsAnonymous()) return;
ModuleWireBytes wire_bytes(native_module()->wire_bytes());
- WireBytesRef name_ref = native_module()->module()->function_names.Lookup(
- wire_bytes, index(), VectorOf(native_module()->module()->export_table));
+ WireBytesRef name_ref =
+ native_module()->module()->lazily_generated_names.LookupFunctionName(
+ wire_bytes, index(),
+ VectorOf(native_module()->module()->export_table));
WasmName name = wire_bytes.GetNameOrNull(name_ref);
- const std::string& source_map_url = native_module()->module()->source_map_url;
+ const WasmDebugSymbols& debug_symbols =
+ native_module()->module()->debug_symbols;
auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
auto source_map = native_module()->GetWasmSourceMap();
- if (!source_map && !source_map_url.empty() && load_wasm_source_map) {
+ if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
+ !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
+ WasmName external_url =
+ wire_bytes.GetNameOrNull(debug_symbols.external_url);
+ std::string external_url_string(external_url.data(), external_url.size());
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Local<v8::String> source_map_str =
- load_wasm_source_map(v8_isolate, source_map_url.c_str());
+ load_wasm_source_map(v8_isolate, external_url_string.c_str());
native_module()->SetWasmSourceMap(
std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
std::string name_buffer;
- if (kind_ == kWasmToJsWrapper) {
+ if (kind() == kWasmToJsWrapper) {
name_buffer = "wasm-to-js:";
size_t prefix_len = name_buffer.size();
constexpr size_t kMaxSigLength = 128;
@@ -245,6 +272,8 @@ void WasmCode::LogCode(Isolate* isolate) const {
void WasmCode::Validate() const {
#ifdef DEBUG
+ // Scope for foreign WasmCode pointers.
+ WasmCodeRefScope code_ref_scope;
// We expect certain relocation info modes to never appear in {WasmCode}
// objects or to be restricted to a small set of valid values. Hence the
// iteration below does not use a mask, but visits all relocation data.
@@ -305,7 +334,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
Address current_pc) const {
if (name) os << "name: " << name << "\n";
if (!IsAnonymous()) os << "index: " << index() << "\n";
- os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
+ os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
size_t padding = instructions().size() - unpadded_binary_size_;
os << "Body (size = " << instructions().size() << " = "
@@ -401,8 +430,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
return "wasm-to-capi";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
- case WasmCode::kInterpreterEntry:
- return "interpreter entry";
case WasmCode::kJumpTable:
return "jump table";
}
@@ -444,6 +471,16 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
if (engine) engine->FreeDeadCode(dead_code);
}
+int WasmCode::GetSourcePositionBefore(int offset) {
+ int position = kNoSourcePosition;
+ for (SourcePositionTableIterator iterator(source_positions());
+ !iterator.done() && iterator.code_offset() < offset;
+ iterator.Advance()) {
+ position = iterator.source_position().ScriptOffset();
+ }
+ return position;
+}
+
WasmCodeAllocator::OptionalLock::~OptionalLock() {
if (allocator_) allocator_->mutex_.Unlock();
}
@@ -800,10 +837,8 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- // Protect concurrent accesses to {tier_down_}.
- base::MutexGuard guard(&allocation_mutex_);
- return {module(), use_trap_handler_, kRuntimeExceptionSupport,
- enabled_features_, kNoLowerSimd, tier_down_};
+ return {module(), use_trap_handler_, kRuntimeExceptionSupport,
+ enabled_features_, kNoLowerSimd};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
@@ -886,7 +921,8 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
reloc_info.as_vector(), // reloc_info
source_pos.as_vector(), // source positions
WasmCode::kFunction, // kind
- ExecutionTier::kNone}}; // tier
+ ExecutionTier::kNone, // tier
+ kNoDebugging}}; // for_debugging
new_code->MaybePrint(nullptr);
new_code->Validate();
@@ -932,23 +968,23 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
int index, const CodeDesc& desc, int stack_slots,
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier) {
+ ExecutionTier tier, ForDebugging for_debugging) {
Vector<byte> code_space =
code_allocator_.AllocateForCode(this, desc.instr_size);
auto jump_table_ref =
FindJumpTablesForRegion(base::AddressRegionOf(code_space));
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
- source_position_table, kind, tier, code_space,
- jump_table_ref);
+ source_position_table, kind, tier, for_debugging,
+ code_space, jump_table_ref);
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
int index, const CodeDesc& desc, int stack_slots,
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier, Vector<uint8_t> dst_code_bytes,
- const JumpTablesRef& jump_tables) {
+ ExecutionTier tier, ForDebugging for_debugging,
+ Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
Vector<byte> reloc_info{desc.buffer + desc.buffer_size - desc.reloc_size,
static_cast<size_t>(desc.reloc_size)};
@@ -998,7 +1034,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
- source_position_table, kind, tier}};
+ source_position_table, kind, tier, for_debugging}};
code->MaybePrint();
code->Validate();
@@ -1010,12 +1046,22 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
return PublishCodeLocked(std::move(code));
}
+std::vector<WasmCode*> NativeModule::PublishCode(
+ Vector<std::unique_ptr<WasmCode>> codes) {
+ std::vector<WasmCode*> published_code;
+ published_code.reserve(codes.size());
+ base::MutexGuard lock(&allocation_mutex_);
+ // The published code is put into the top-most surrounding {WasmCodeRefScope}.
+ for (auto& code : codes) {
+ published_code.push_back(PublishCodeLocked(std::move(code)));
+ }
+ return published_code;
+}
+
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
switch (result.kind) {
case WasmCompilationResult::kWasmToJsWrapper:
return WasmCode::Kind::kWasmToJsWrapper;
- case WasmCompilationResult::kInterpreterEntry:
- return WasmCode::Kind::kInterpreterEntry;
case WasmCompilationResult::kFunction:
return WasmCode::Kind::kFunction;
default:
@@ -1040,16 +1086,16 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
"Assume an order on execution tiers");
- // Unless tier down to Liftoff: update code table but avoid to fall back to
- // less optimized code. We use the new code if it was compiled with a higher
- // tier.
uint32_t slot_idx = declared_function_index(module(), code->index());
WasmCode* prior_code = code_table_[slot_idx];
- // TODO(clemensb): Revisit this logic once tier down is fully working.
- const bool prefer_liftoff = tier_down_ || debug_info_;
+ // If we are tiered down, install all debugging code (except for stepping
+ // code, which is only used for a single frame and never installed in the
+ // code table of jump table). Otherwise, install code if it was compiled
+ // with a higher tier.
const bool update_code_table =
- prefer_liftoff ? !prior_code || code->tier() == ExecutionTier::kLiftoff
- : !prior_code || prior_code->tier() < code->tier();
+ tiering_state_ == kTieredDown
+ ? !prior_code || code->for_debugging() == kForDebugging
+ : !prior_code || prior_code->tier() < code->tier();
if (update_code_table) {
code_table_[slot_idx] = code.get();
if (prior_code) {
@@ -1058,21 +1104,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// count cannot drop to zero here.
CHECK(!prior_code->DecRef());
}
- }
-
- // Populate optimized code to the jump table unless there is an active
- // redirection to the interpreter that should be preserved.
- DCHECK_NOT_NULL(main_jump_table_);
- bool update_jump_table =
- update_code_table && !has_interpreter_redirection(code->index());
-
- // Ensure that interpreter entries always populate to the jump table.
- if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
- SetInterpreterRedirection(code->index());
- update_jump_table = true;
- }
- if (update_jump_table) {
PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
}
@@ -1098,7 +1130,7 @@ WasmCode* NativeModule::AddDeserializedCode(
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, unpadded_binary_size, protected_instructions_data,
- reloc_info, source_position_table, kind, tier}};
+ reloc_info, source_position_table, kind, tier, kNoDebugging}};
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
@@ -1150,21 +1182,22 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
DCHECK(!code_space.empty());
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
- new WasmCode{this, // native_module
- kAnonymousFuncIndex, // index
- code_space, // instructions
- 0, // stack_slots
- 0, // tagged_parameter_slots
- 0, // safepoint_table_offset
- jump_table_size, // handler_table_offset
- jump_table_size, // constant_pool_offset
- jump_table_size, // code_comments_offset
- jump_table_size, // unpadded_binary_size
- {}, // protected_instructions
- {}, // reloc_info
- {}, // source_pos
- WasmCode::kJumpTable, // kind
- ExecutionTier::kNone}}; // tier
+ new WasmCode{this, // native_module
+ kAnonymousFuncIndex, // index
+ code_space, // instructions
+ 0, // stack_slots
+ 0, // tagged_parameter_slots
+ 0, // safepoint_table_offset
+ jump_table_size, // handler_table_offset
+ jump_table_size, // constant_pool_offset
+ jump_table_size, // code_comments_offset
+ jump_table_size, // unpadded_binary_size
+ {}, // protected_instructions
+ {}, // reloc_info
+ {}, // source_pos
+ WasmCode::kJumpTable, // kind
+ ExecutionTier::kNone, // tier
+ kNoDebugging}}; // for_debugging
return PublishCode(std::move(code));
}
@@ -1452,22 +1485,6 @@ WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
return WasmCode::kRuntimeStubCount;
}
-const char* NativeModule::GetRuntimeStubName(Address target) const {
- WasmCode::RuntimeStubId stub_id = GetRuntimeStubId(target);
-
-#define RUNTIME_STUB_NAME(Name) #Name,
-#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
- constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
- RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
-#undef RUNTIME_STUB_NAME
-#undef RUNTIME_STUB_NAME_TRAP
- STATIC_ASSERT(arraysize(runtime_stub_names) ==
- WasmCode::kRuntimeStubCount + 1);
-
- DCHECK_GT(arraysize(runtime_stub_names), stub_id);
- return runtime_stub_names[stub_id];
-}
-
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", this);
// Cancel all background compilation before resetting any field of the
@@ -1779,11 +1796,13 @@ void NativeModule::SampleCodeSize(
histogram->AddSample(code_size_mb);
}
-WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
- return AddCompiledCode({&result, 1})[0];
+std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
+ WasmCompilationResult result) {
+ std::vector<std::unique_ptr<WasmCode>> code = AddCompiledCode({&result, 1});
+ return std::move(code[0]);
}
-std::vector<WasmCode*> NativeModule::AddCompiledCode(
+std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
DCHECK(!results.empty());
// First, allocate code space for all the results.
@@ -1811,68 +1830,37 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
- result.result_tier, this_code_space, jump_tables));
+ result.result_tier, result.for_debugging, this_code_space,
+ jump_tables));
}
DCHECK_EQ(0, code_space.size());
- // Under the {allocation_mutex_}, publish the code. The published code is put
- // into the top-most surrounding {WasmCodeRefScope} by {PublishCodeLocked}.
- std::vector<WasmCode*> code_vector;
- code_vector.reserve(results.size());
- {
- base::MutexGuard lock(&allocation_mutex_);
- for (auto& result : generated_code)
- code_vector.push_back(PublishCodeLocked(std::move(result)));
- }
-
- return code_vector;
+ return generated_code;
}
-bool NativeModule::IsRedirectedToInterpreter(uint32_t func_index) {
- base::MutexGuard lock(&allocation_mutex_);
- return has_interpreter_redirection(func_index);
-}
-
-bool NativeModule::SetTieredDown() {
- // Do not tier down asm.js.
- if (module()->origin != kWasmOrigin) return false;
+void NativeModule::SetTieringState(TieringState new_tiering_state) {
+ // Do not tier down asm.js (just never change the tiering state).
+ if (module()->origin != kWasmOrigin) return;
base::MutexGuard lock(&allocation_mutex_);
- if (tier_down_) return true;
- tier_down_ = true;
- return false;
+ tiering_state_ = new_tiering_state;
}
bool NativeModule::IsTieredDown() {
base::MutexGuard lock(&allocation_mutex_);
- return tier_down_;
+ return tiering_state_ == kTieredDown;
}
-void NativeModule::TierDown(Isolate* isolate) {
- // Do not tier down asm.js.
- if (module()->origin != kWasmOrigin) return;
-
- // Set the flag. Return if it is already set.
- if (SetTieredDown()) return;
-
- // Tier down all functions.
- isolate->wasm_engine()->RecompileAllFunctions(isolate, this,
- ExecutionTier::kLiftoff);
-}
-
-void NativeModule::TierUp(Isolate* isolate) {
- // Do not tier up asm.js.
- if (module()->origin != kWasmOrigin) return;
-
- // Set the flag.
+void NativeModule::TriggerRecompilation() {
+ // Read the tiering state under the lock, then trigger recompilation after
+ // releasing the lock. If the tiering state was changed when the triggered
+ // compilation units finish, code installation will handle that correctly.
+ TieringState current_state;
{
base::MutexGuard lock(&allocation_mutex_);
- tier_down_ = false;
+ current_state = tiering_state_;
}
-
- // Tier up all functions.
- isolate->wasm_engine()->RecompileAllFunctions(isolate, this,
- ExecutionTier::kTurbofan);
+ RecompileNativeModule(this, current_state);
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
@@ -1998,6 +1986,20 @@ void WasmCodeRefScope::AddRef(WasmCode* code) {
if (entry.second) code->IncRef();
}
+const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
+#define RUNTIME_STUB_NAME(Name) #Name,
+#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
+ constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
+ RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
+#undef RUNTIME_STUB_NAME
+#undef RUNTIME_STUB_NAME_TRAP
+ STATIC_ASSERT(arraysize(runtime_stub_names) ==
+ WasmCode::kRuntimeStubCount + 1);
+
+ DCHECK_GT(arraysize(runtime_stub_names), stub_id);
+ return runtime_stub_names[stub_id];
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 4b176f3ba6..443f6f3605 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -6,14 +6,15 @@
#define V8_WASM_WASM_CODE_MANAGER_H_
#include <atomic>
-#include <list>
#include <map>
#include <memory>
+#include <set>
#include <unordered_set>
#include <utility>
#include <vector>
#include "src/base/address-region.h"
+#include "src/base/bit-field.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h"
@@ -49,11 +50,18 @@ struct WasmModule;
FOREACH_WASM_TRAPREASON(VTRAP) \
V(WasmCompileLazy) \
V(WasmDebugBreak) \
+ V(WasmInt32ToHeapNumber) \
+ V(WasmTaggedNonSmiToInt32) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
+ V(WasmTaggedToFloat64) \
+ V(WasmAllocateJSArray) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
+ V(WasmRefFunc) \
V(WasmMemoryGrow) \
V(WasmTableInit) \
V(WasmTableCopy) \
@@ -83,10 +91,9 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
- // Merge the parameter region into this object while preserving ordering of
- // the regions. The assumption is that the passed parameter is not
- // intersecting this object - for example, it was obtained from a previous
- // Allocate. Returns the merged region.
+ // Merge the parameter region into this object. The assumption is that the
+ // passed parameter is not intersecting this object - for example, it was
+ // obtained from a previous Allocate. Returns the merged region.
base::AddressRegion Merge(base::AddressRegion);
// Allocate a contiguous region of size {size}. Return an empty pool on
@@ -98,10 +105,11 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
bool IsEmpty() const { return regions_.empty(); }
- const std::list<base::AddressRegion>& regions() const { return regions_; }
+
+ const auto& regions() const { return regions_; }
private:
- std::list<base::AddressRegion> regions_;
+ std::set<base::AddressRegion, base::AddressRegion::StartAddressLess> regions_;
};
class V8_EXPORT_PRIVATE WasmCode final {
@@ -110,7 +118,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
kFunction,
kWasmToCapiWrapper,
kWasmToJsWrapper,
- kInterpreterEntry,
kJumpTable
};
@@ -125,9 +132,11 @@ class V8_EXPORT_PRIVATE WasmCode final {
kRuntimeStubCount
};
- Vector<byte> instructions() const { return instructions_; }
+ Vector<byte> instructions() const {
+ return VectorOf(instructions_, static_cast<size_t>(instructions_size_));
+ }
Address instruction_start() const {
- return reinterpret_cast<Address>(instructions_.begin());
+ return reinterpret_cast<Address>(instructions_);
}
Vector<const byte> reloc_info() const {
return {protected_instructions_data().end(),
@@ -144,9 +153,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
// Anonymous functions are functions that don't carry an index.
bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
- Kind kind() const { return kind_; }
+ Kind kind() const { return KindField::decode(flags_); }
NativeModule* native_module() const { return native_module_; }
- ExecutionTier tier() const { return tier_; }
+ ExecutionTier tier() const { return ExecutionTierField::decode(flags_); }
Address constant_pool() const;
Address handler_table() const;
int handler_table_size() const;
@@ -159,12 +168,16 @@ class V8_EXPORT_PRIVATE WasmCode final {
int unpadded_binary_size() const { return unpadded_binary_size_; }
int stack_slots() const { return stack_slots_; }
int tagged_parameter_slots() const { return tagged_parameter_slots_; }
- bool is_liftoff() const { return tier_ == ExecutionTier::kLiftoff; }
+ bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
bool contains(Address pc) const {
- return reinterpret_cast<Address>(instructions_.begin()) <= pc &&
- pc < reinterpret_cast<Address>(instructions_.end());
+ return reinterpret_cast<Address>(instructions_) <= pc &&
+ pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
}
+ // Only Liftoff code that was generated for debugging can be inspected
+ // (otherwise debug side table positions would not match up).
+ bool is_inspectable() const { return is_liftoff() && for_debugging(); }
+
Vector<const uint8_t> protected_instructions_data() const {
return {meta_data_.get(),
static_cast<size_t>(protected_instructions_size_)};
@@ -219,6 +232,16 @@ class V8_EXPORT_PRIVATE WasmCode final {
// belonging to different {NativeModule}s. Dead code will be deleted.
static void DecrementRefCount(Vector<WasmCode* const>);
+ // Returns the last source position before {offset}.
+ int GetSourcePositionBefore(int offset);
+
+ // Returns whether this code was generated for debugging. If this returns
+ // {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
+ // bailed out.
+ ForDebugging for_debugging() const {
+ return ForDebuggingField::decode(flags_);
+ }
+
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
private:
@@ -232,24 +255,25 @@ class V8_EXPORT_PRIVATE WasmCode final {
Vector<const byte> protected_instructions_data,
Vector<const byte> reloc_info,
Vector<const byte> source_position_table, Kind kind,
- ExecutionTier tier)
- : instructions_(instructions),
- native_module_(native_module),
+ ExecutionTier tier, ForDebugging for_debugging)
+ : native_module_(native_module),
+ instructions_(instructions.begin()),
+ flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
+ ForDebuggingField::encode(for_debugging)),
meta_data_(ConcatenateBytes(
{protected_instructions_data, reloc_info, source_position_table})),
+ instructions_size_(instructions.length()),
reloc_info_size_(reloc_info.length()),
source_positions_size_(source_position_table.length()),
protected_instructions_size_(protected_instructions_data.length()),
index_(index),
- kind_(kind),
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
tagged_parameter_slots_(tagged_parameter_slots),
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
code_comments_offset_(code_comments_offset),
- unpadded_binary_size_(unpadded_binary_size),
- tier_(tier) {
+ unpadded_binary_size_(unpadded_binary_size) {
DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
DCHECK_LE(handler_table_offset, unpadded_binary_size);
DCHECK_LE(code_comments_offset, unpadded_binary_size);
@@ -279,32 +303,37 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Returns whether this code becomes dead and needs to be freed.
V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
- Vector<byte> instructions_;
- NativeModule* native_module_ = nullptr;
+ NativeModule* const native_module_ = nullptr;
+ byte* const instructions_;
+ const uint8_t flags_; // Bit field, see below.
// {meta_data_} contains several byte vectors concatenated into one:
// - protected instructions data of size {protected_instructions_size_}
// - relocation info of size {reloc_info_size_}
// - source positions of size {source_positions_size_}
// Note that the protected instructions come first to ensure alignment.
std::unique_ptr<const byte[]> meta_data_;
+ const int instructions_size_;
const int reloc_info_size_;
const int source_positions_size_;
const int protected_instructions_size_;
- int index_;
- Kind kind_;
- int constant_pool_offset_ = 0;
- int stack_slots_ = 0;
+ const int index_;
+ const int constant_pool_offset_;
+ const int stack_slots_;
// Number of tagged parameters passed to this function via the stack. This
// value is used by the stack walker (e.g. GC) to find references.
- int tagged_parameter_slots_ = 0;
+ const int tagged_parameter_slots_;
// We care about safepoint data for wasm-to-js functions, since there may be
// stack/register tagged values for large number conversions.
- int safepoint_table_offset_ = 0;
- int handler_table_offset_ = 0;
- int code_comments_offset_ = 0;
- int unpadded_binary_size_ = 0;
+ const int safepoint_table_offset_;
+ const int handler_table_offset_;
+ const int code_comments_offset_;
+ const int unpadded_binary_size_;
int trap_handler_index_ = -1;
- ExecutionTier tier_;
+
+ // Bits encoded in {flags_}:
+ using KindField = base::BitField8<Kind, 0, 3>;
+ using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
+ using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
// WasmCode is ref counted. Counters are held by:
// 1) The jump table / code table.
@@ -325,7 +354,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
// often for rather small functions.
// Increase the limit if needed, but first check if the size increase is
// justified.
-STATIC_ASSERT(sizeof(WasmCode) <= 96);
+STATIC_ASSERT(sizeof(WasmCode) <= 88);
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
@@ -436,14 +465,14 @@ class V8_EXPORT_PRIVATE NativeModule final {
int stack_slots, int tagged_parameter_slots,
Vector<const byte> protected_instructions,
Vector<const byte> source_position_table,
- WasmCode::Kind kind, ExecutionTier tier);
+ WasmCode::Kind kind, ExecutionTier tier,
+ ForDebugging for_debugging);
// {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the
- // given {WasmCode} object.
+ // given {WasmCode} object. Ownership is transferred to the {NativeModule}.
WasmCode* PublishCode(std::unique_ptr<WasmCode>);
- // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
- WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
+ std::vector<WasmCode*> PublishCode(Vector<std::unique_ptr<WasmCode>>);
WasmCode* AddDeserializedCode(
int index, Vector<const byte> instructions, int stack_slots,
@@ -561,27 +590,28 @@ class V8_EXPORT_PRIVATE NativeModule final {
// must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;
- const char* GetRuntimeStubName(Address runtime_stub_target) const;
-
// Sample the current code size of this modules to the given counters.
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
void SampleCodeSize(Counters*, CodeSamplingTime) const;
- WasmCode* AddCompiledCode(WasmCompilationResult);
- std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
+ V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
+ WasmCompilationResult);
+ V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
+ Vector<WasmCompilationResult>);
- // Allows to check whether a function has been redirected to the interpreter
- // by publishing an entry stub with the {Kind::kInterpreterEntry} code kind.
- bool IsRedirectedToInterpreter(uint32_t func_index);
+ // Set a new tiering state, but don't trigger any recompilation yet; use
+ // {TriggerRecompilation} for that. The two steps are split because In some
+ // scenarios we need to drop locks before triggering recompilation.
+ void SetTieringState(TieringState);
- // Set {tier_down_} flag. Return previous state.
- bool SetTieredDown();
+ // Check whether this modules is tiered down for debugging.
bool IsTieredDown();
- // Sets the flag, triggers recompilation of all methods to tier down or up,
- // waits for that to complete.
- void TierDown(Isolate* isolate);
- void TierUp(Isolate* isolate);
+ // Trigger a full recompilation of this module, in the tier set previously via
+ // {SetTieringState}. When tiering down, the calling thread contributes to
+ // compilation and only returns once recompilation is done. Tiering up happens
+ // concurrently, so this method might return before it is complete.
+ void TriggerRecompilation();
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
@@ -620,8 +650,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
int tagged_parameter_slots,
Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
- ExecutionTier tier, Vector<uint8_t> code_space,
- const JumpTablesRef& jump_tables_ref);
+ ExecutionTier tier, ForDebugging for_debugging,
+ Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableInRegion(
int jump_table_size, base::AddressRegion,
@@ -638,29 +668,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
void AddCodeSpace(base::AddressRegion,
const WasmCodeAllocator::OptionalLock&);
- // Hold the {allocation_mutex_} when calling this method.
- bool has_interpreter_redirection(uint32_t func_index) {
- DCHECK_LT(func_index, num_functions());
- DCHECK_LE(module_->num_imported_functions, func_index);
- if (!interpreter_redirections_) return false;
- uint32_t bitset_idx = declared_function_index(module(), func_index);
- uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
- return byte & (1 << (bitset_idx % kBitsPerByte));
- }
-
- // Hold the {allocation_mutex_} when calling this method.
- void SetInterpreterRedirection(uint32_t func_index) {
- DCHECK_LT(func_index, num_functions());
- DCHECK_LE(module_->num_imported_functions, func_index);
- if (!interpreter_redirections_) {
- interpreter_redirections_.reset(
- new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
- kBitsPerByte]{});
- }
- uint32_t bitset_idx = declared_function_index(module(), func_index);
- uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
- byte |= 1 << (bitset_idx % kBitsPerByte);
- }
+ // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
+ WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
// {WasmCodeAllocator} manages all code reservations and allocations for this
// {NativeModule}.
@@ -717,10 +726,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// imported functions.
std::unique_ptr<WasmCode*[]> code_table_;
- // Null if no redirections exist, otherwise a bitset over all functions in
- // this module marking those functions that have been redirected.
- std::unique_ptr<uint8_t[]> interpreter_redirections_;
-
// Data (especially jump table) per code space.
std::vector<CodeSpaceData> code_space_data_;
@@ -730,7 +735,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// mutex.
std::unique_ptr<DebugInfo> debug_info_;
- bool tier_down_ = false;
+ TieringState tiering_state_ = kTieredUp;
+
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -881,6 +887,8 @@ class GlobalWasmCodeRef {
DISALLOW_COPY_AND_ASSIGN(GlobalWasmCodeRef);
};
+const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index acce1eef67..b860ae692c 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -29,10 +29,17 @@ enum ValueTypeCode : uint8_t {
kLocalFuncRef = 0x70,
kLocalAnyRef = 0x6f,
kLocalNullRef = 0x6e,
+ kLocalRef = 0x6d, // GC proposal
+ kLocalOptRef = 0x6c, // GC proposal
+ kLocalEqRef = 0x6b, // GC proposal
+ kLocalI31Ref = 0x6a, // GC proposal
+ kLocalRttRef = 0x69, // GC proposal
kLocalExnRef = 0x68,
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
+constexpr uint8_t kWasmStructTypeCode = 0x5f;
+constexpr uint8_t kWasmArrayTypeCode = 0x5e;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
@@ -80,10 +87,11 @@ enum SectionCode : int8_t {
// The following sections are custom sections, and are identified using a
// string rather than an integer. Their enumeration values are not guaranteed
// to be consistent.
- kNameSectionCode, // Name section (encoded as a string)
- kSourceMappingURLSectionCode, // Source Map URL section
- kDebugInfoSectionCode, // DWARF section .debug_info
- kCompilationHintsSectionCode, // Compilation hints section
+ kNameSectionCode, // Name section (encoded as a string)
+ kSourceMappingURLSectionCode, // Source Map URL section
+ kDebugInfoSectionCode, // DWARF section .debug_info
+ kExternalDebugInfoSectionCode, // Section encoding the external symbol path
+ kCompilationHintsSectionCode, // Compilation hints section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.cc b/deps/v8/src/wasm/wasm-debug-evaluate.cc
index 80f0b04e0e..019ae5f73e 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.cc
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.cc
@@ -5,15 +5,19 @@
#include "src/wasm/wasm-debug-evaluate.h"
#include <algorithm>
+#include <limits>
#include "src/api/api-inl.h"
#include "src/codegen/machine-type.h"
+#include "src/execution/frames-inl.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-value.h"
namespace v8 {
namespace internal {
@@ -76,7 +80,8 @@ static bool CheckRangeOutOfBounds(uint32_t offset, uint32_t size,
class DebugEvaluatorProxy {
public:
- explicit DebugEvaluatorProxy(Isolate* isolate) : isolate_(isolate) {}
+ explicit DebugEvaluatorProxy(Isolate* isolate, StandardFrame* frame)
+ : isolate_(isolate), frame_(frame) {}
static void GetMemoryTrampoline(
const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -89,12 +94,13 @@ class DebugEvaluatorProxy {
proxy.GetMemory(offset, size, result);
}
+ // void __getMemory(uint32_t offset, uint32_t size, void* result);
void GetMemory(uint32_t offset, uint32_t size, uint32_t result) {
wasm::ScheduledErrorThrower thrower(isolate_, "debug evaluate proxy");
// Check all overflows.
- if (CheckRangeOutOfBounds(result, size, debuggee_->memory_size(),
+ if (CheckRangeOutOfBounds(offset, size, debuggee_->memory_size(),
&thrower) ||
- CheckRangeOutOfBounds(offset, size, evaluator_->memory_size(),
+ CheckRangeOutOfBounds(result, size, evaluator_->memory_size(),
&thrower)) {
return;
}
@@ -103,17 +109,68 @@ class DebugEvaluatorProxy {
&debuggee_->memory_start()[offset], size);
}
- template <typename CallableT>
- Handle<JSReceiver> WrapAsV8Function(CallableT callback) {
- v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- v8::Local<v8::Context> context = api_isolate->GetCurrentContext();
- std::string data;
- v8::Local<v8::Function> func =
- v8::Function::New(context, callback,
- v8::External::New(api_isolate, this))
- .ToLocalChecked();
+ // void* __sbrk(intptr_t increment);
+ uint32_t Sbrk(uint32_t increment) {
+ if (increment > 0 && evaluator_->memory_size() <=
+ std::numeric_limits<uint32_t>::max() - increment) {
+ Handle<WasmMemoryObject> memory(evaluator_->memory_object(), isolate_);
+ uint32_t new_pages =
+ (increment - 1 + wasm::kWasmPageSize) / wasm::kWasmPageSize;
+ WasmMemoryObject::Grow(isolate_, memory, new_pages);
+ }
+ return static_cast<uint32_t>(evaluator_->memory_size());
+ }
+
+ static void SbrkTrampoline(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ auto& proxy = GetProxy(args);
+ uint32_t size = proxy.GetArgAsUInt32(args, 0);
- return Utils::OpenHandle(*func);
+ uint32_t result = proxy.Sbrk(size);
+ args.GetReturnValue().Set(result);
+ }
+
+ template <typename T>
+ void write_result(const WasmValue& result, uint32_t result_offset) {
+ wasm::ScheduledErrorThrower thrower(isolate_, "debug evaluate proxy");
+ T val = result.to<T>();
+ static_assert(static_cast<uint32_t>(sizeof(T)) == sizeof(T),
+ "Unexpected size");
+ if (CheckRangeOutOfBounds(result_offset, sizeof(T),
+ evaluator_->memory_size(), &thrower)) {
+ return;
+ }
+ memcpy(&evaluator_->memory_start()[result_offset], &val, sizeof(T));
+ }
+
+ // void __getLocal(uint32_t local, void* result);
+ void GetLocal(uint32_t local, uint32_t result_offset) {
+ WasmValue result = LoadLocalValue(local);
+
+ switch (result.type().kind()) {
+ case ValueType::kI32:
+ write_result<uint32_t>(result, result_offset);
+ break;
+ case ValueType::kI64:
+ write_result<int64_t>(result, result_offset);
+ break;
+ case ValueType::kF32:
+ write_result<float>(result, result_offset);
+ break;
+ case ValueType::kF64:
+ write_result<double>(result, result_offset);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
+ static void GetLocalTrampoline(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ auto& proxy = GetProxy(args);
+ uint32_t local = proxy.GetArgAsUInt32(args, 0);
+ uint32_t result = proxy.GetArgAsUInt32(args, 1);
+
+ proxy.GetLocal(local, result);
}
Handle<JSObject> CreateImports() {
@@ -121,14 +178,16 @@ class DebugEvaluatorProxy {
isolate_->factory()->NewJSObject(isolate_->object_function());
Handle<JSObject> import_module_obj =
isolate_->factory()->NewJSObject(isolate_->object_function());
- Object::SetProperty(isolate_, imports_obj,
- isolate_->factory()->empty_string(), import_module_obj)
+ Object::SetProperty(isolate_, imports_obj, V8String(isolate_, "env"),
+ import_module_obj)
.Assert();
- Object::SetProperty(
- isolate_, import_module_obj, V8String(isolate_, "__getMemory"),
- WrapAsV8Function(DebugEvaluatorProxy::GetMemoryTrampoline))
- .Assert();
+ AddImport(import_module_obj, "__getLocal",
+ DebugEvaluatorProxy::GetLocalTrampoline);
+ AddImport(import_module_obj, "__getMemory",
+ DebugEvaluatorProxy::GetMemoryTrampoline);
+ AddImport(import_module_obj, "__sbrk", DebugEvaluatorProxy::SbrkTrampoline);
+
return imports_obj;
}
@@ -139,6 +198,14 @@ class DebugEvaluatorProxy {
}
private:
+ WasmValue LoadLocalValue(uint32_t local) {
+ DCHECK(frame_->is_wasm());
+ wasm::DebugInfo* debug_info =
+ WasmFrame::cast(frame_)->native_module()->GetDebugInfo();
+ return debug_info->GetLocalValue(local, isolate_, frame_->pc(),
+ frame_->fp(), frame_->callee_fp());
+ }
+
uint32_t GetArgAsUInt32(const v8::FunctionCallbackInfo<v8::Value>& args,
int index) {
// No type/range checks needed on his because this is only called for {args}
@@ -153,7 +220,26 @@ class DebugEvaluatorProxy {
args.Data().As<v8::External>()->Value());
}
+ template <typename CallableT>
+ void AddImport(Handle<JSObject> import_module_obj, const char* function_name,
+ CallableT callback) {
+ v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Local<v8::Context> context = api_isolate->GetCurrentContext();
+ std::string data;
+ v8::Local<v8::Function> v8_function =
+ v8::Function::New(context, callback,
+ v8::External::New(api_isolate, this))
+ .ToLocalChecked();
+
+ auto wrapped_function = Utils::OpenHandle(*v8_function);
+
+ Object::SetProperty(isolate_, import_module_obj,
+ V8String(isolate_, function_name), wrapped_function)
+ .Assert();
+ }
+
Isolate* isolate_;
+ StandardFrame* frame_;
Handle<WasmInstanceObject> evaluator_;
Handle<WasmInstanceObject> debuggee_;
};
@@ -161,30 +247,62 @@ class DebugEvaluatorProxy {
static bool VerifyEvaluatorInterface(const WasmModule* raw_module,
const ModuleWireBytes& bytes,
ErrorThrower* thrower) {
- for (const WasmFunction& F : raw_module->functions) {
- WireBytesRef name_ref = raw_module->function_names.Lookup(
- bytes, F.func_index, VectorOf(raw_module->export_table));
- std::string name(bytes.start() + name_ref.offset(),
- bytes.start() + name_ref.end_offset());
- if (F.exported && name == "wasm_format") {
- if (!CheckSignature(kWasmI32, {}, F.sig, thrower)) return false;
- } else if (F.imported) {
- if (name == "__getMemory") {
- if (!CheckSignature(kWasmBottom, {kWasmI32, kWasmI32, kWasmI32}, F.sig,
- thrower)) {
- return false;
+ for (const WasmImport imported : raw_module->import_table) {
+ if (imported.kind != ImportExportKindCode::kExternalFunction) continue;
+ const WasmFunction& F = raw_module->functions.at(imported.index);
+ std::string module_name(bytes.start() + imported.module_name.offset(),
+ bytes.start() + imported.module_name.end_offset());
+ std::string field_name(bytes.start() + imported.field_name.offset(),
+ bytes.start() + imported.field_name.end_offset());
+
+ if (module_name == "env") {
+ if (field_name == "__getMemory") {
+ // void __getMemory(uint32_t offset, uint32_t size, void* result);
+ if (CheckSignature(kWasmBottom, {kWasmI32, kWasmI32, kWasmI32}, F.sig,
+ thrower)) {
+ continue;
+ }
+ } else if (field_name == "__getLocal") {
+ // void __getLocal(uint32_t local, void* result)
+ if (CheckSignature(kWasmBottom, {kWasmI32, kWasmI32}, F.sig, thrower)) {
+ continue;
+ }
+ } else if (field_name == "__debug") {
+ // void __debug(uint32_t flag, uint32_t value)
+ if (CheckSignature(kWasmBottom, {kWasmI32, kWasmI32}, F.sig, thrower)) {
+ continue;
+ }
+ } else if (field_name == "__sbrk") {
+ // uint32_t __sbrk(uint32_t increment)
+ if (CheckSignature(kWasmI32, {kWasmI32}, F.sig, thrower)) {
+ continue;
}
}
}
+
+ if (!thrower->error()) {
+ thrower->LinkError("Unknown import \"%s\" \"%s\"", module_name.c_str(),
+ field_name.c_str());
+ }
+
+ return false;
+ }
+ for (const WasmExport& exported : raw_module->export_table) {
+ if (exported.kind != ImportExportKindCode::kExternalFunction) continue;
+ const WasmFunction& F = raw_module->functions.at(exported.index);
+ std::string field_name(bytes.start() + exported.name.offset(),
+ bytes.start() + exported.name.end_offset());
+ if (field_name == "wasm_format") {
+ if (!CheckSignature(kWasmI32, {}, F.sig, thrower)) return false;
+ }
}
return true;
}
-
} // namespace
Maybe<std::string> DebugEvaluateImpl(
Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- WasmInterpreter::FramePtr frame) {
+ StandardFrame* frame) {
Isolate* isolate = debuggee_instance->GetIsolate();
HandleScope handle_scope(isolate);
WasmEngine* engine = isolate->wasm_engine();
@@ -206,7 +324,7 @@ Maybe<std::string> DebugEvaluateImpl(
}
// Set up imports.
- DebugEvaluatorProxy proxy(isolate);
+ DebugEvaluatorProxy proxy(isolate, frame);
Handle<JSObject> imports = proxy.CreateImports();
// Instantiate Module.
@@ -261,9 +379,9 @@ Maybe<std::string> DebugEvaluateImpl(
MaybeHandle<String> DebugEvaluate(Vector<const byte> snippet,
Handle<WasmInstanceObject> debuggee_instance,
- WasmInterpreter::FramePtr frame) {
+ StandardFrame* frame) {
Maybe<std::string> result =
- DebugEvaluateImpl(snippet, debuggee_instance, std::move(frame));
+ DebugEvaluateImpl(snippet, debuggee_instance, frame);
if (result.IsNothing()) return {};
std::string result_str = result.ToChecked();
return V8String(debuggee_instance->GetIsolate(), result_str.c_str());
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.h b/deps/v8/src/wasm/wasm-debug-evaluate.h
index 21543eb97a..31eba51a3c 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.h
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.h
@@ -16,7 +16,7 @@ namespace wasm {
MaybeHandle<String> V8_EXPORT_PRIVATE DebugEvaluate(
Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- WasmInterpreter::FramePtr frame);
+ StandardFrame* frame);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 10a2e194a7..a8fd6505f0 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -24,6 +24,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-value.h"
#include "src/zone/accounting-allocator.h"
namespace v8 {
@@ -90,8 +91,6 @@ class InterpreterHandle {
Isolate* isolate_;
const WasmModule* module_;
WasmInterpreter interpreter_;
- StepAction next_step_action_ = StepNone;
- int last_step_stack_depth_ = 0;
std::unordered_map<Address, uint32_t> activations_;
uint32_t StartActivation(Address frame_pointer) {
@@ -145,18 +144,6 @@ class InterpreterHandle {
WasmInterpreter* interpreter() { return &interpreter_; }
const WasmModule* module() const { return module_; }
- void PrepareStep(StepAction step_action) {
- next_step_action_ = step_action;
- last_step_stack_depth_ = CurrentStackDepth();
- }
-
- void ClearStepping() { next_step_action_ = StepNone; }
-
- int CurrentStackDepth() {
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- return interpreter()->GetThread(0)->GetFrameCount();
- }
-
// Returns true if exited regularly, false if a trap/exception occurred and
// was not handled inside this activation. In the latter case, a pending
// exception will have been set on the isolate.
@@ -178,11 +165,10 @@ class InterpreterHandle {
bool finished = false;
while (!finished) {
// TODO(clemensb): Add occasional StackChecks.
- WasmInterpreter::State state = ContinueExecution(thread);
+ WasmInterpreter::State state = thread->Run();
switch (state) {
case WasmInterpreter::State::PAUSED:
- NotifyDebugEventListeners(thread);
- break;
+ UNREACHABLE();
case WasmInterpreter::State::FINISHED:
// Perfect, just break the switch and exit the loop.
finished = true;
@@ -230,109 +216,9 @@ class InterpreterHandle {
FinishActivation(frame_pointer, activation_id);
- // If we do stepping and it exits wasm interpreter then debugger need to
- // prepare for it.
- if (next_step_action_ != StepNone) {
- // Enter the debugger.
- DebugScope debug_scope(isolate_->debug());
-
- isolate_->debug()->PrepareStep(StepOut);
- }
- ClearStepping();
-
return true;
}
- WasmInterpreter::State ContinueExecution(WasmInterpreter::Thread* thread) {
- switch (next_step_action_) {
- case StepNone:
- return thread->Run();
- case StepIn:
- return thread->Step();
- case StepOut:
- thread->AddBreakFlags(WasmInterpreter::BreakFlag::AfterReturn);
- return thread->Run();
- case StepNext: {
- int stack_depth = thread->GetFrameCount();
- if (stack_depth == last_step_stack_depth_) return thread->Step();
- thread->AddBreakFlags(stack_depth > last_step_stack_depth_
- ? WasmInterpreter::BreakFlag::AfterReturn
- : WasmInterpreter::BreakFlag::AfterCall);
- return thread->Run();
- }
- default:
- UNREACHABLE();
- }
- }
-
- Handle<WasmInstanceObject> GetInstanceObject() {
- StackTraceFrameIterator it(isolate_);
- WasmInterpreterEntryFrame* frame =
- WasmInterpreterEntryFrame::cast(it.frame());
- Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
- // Check that this is indeed the instance which is connected to this
- // interpreter.
- DCHECK_EQ(this, Managed<InterpreterHandle>::cast(
- instance_obj->debug_info().interpreter_handle())
- .raw());
- return instance_obj;
- }
-
- void NotifyDebugEventListeners(WasmInterpreter::Thread* thread) {
- // Enter the debugger.
- DebugScope debug_scope(isolate_->debug());
-
- // Check whether we hit a breakpoint.
- if (isolate_->debug()->break_points_active()) {
- Handle<WasmModuleObject> module_object(
- GetInstanceObject()->module_object(), isolate_);
- Handle<Script> script(module_object->script(), isolate_);
- int position = GetTopPosition(module_object);
- Handle<FixedArray> breakpoints;
- if (WasmScript::CheckBreakPoints(isolate_, script, position)
- .ToHandle(&breakpoints)) {
- // We hit one or several breakpoints. Clear stepping, notify the
- // listeners and return.
- ClearStepping();
- isolate_->debug()->OnDebugBreak(breakpoints);
- return;
- }
- }
-
- // We did not hit a breakpoint, so maybe this pause is related to stepping.
- bool hit_step = false;
- switch (next_step_action_) {
- case StepNone:
- break;
- case StepIn:
- hit_step = true;
- break;
- case StepOut:
- hit_step = thread->GetFrameCount() < last_step_stack_depth_;
- break;
- case StepNext: {
- hit_step = thread->GetFrameCount() == last_step_stack_depth_;
- break;
- }
- default:
- UNREACHABLE();
- }
- if (!hit_step) return;
- ClearStepping();
- isolate_->debug()->OnDebugBreak(isolate_->factory()->empty_fixed_array());
- }
-
- int GetTopPosition(Handle<WasmModuleObject> module_object) {
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
- DCHECK_LT(0, thread->GetFrameCount());
-
- auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
- return GetWasmFunctionOffset(module_object->module(),
- frame->function()->func_index) +
- frame->pc();
- }
-
std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
Address frame_pointer) {
DCHECK_EQ(1, interpreter()->GetThreadCount());
@@ -362,116 +248,10 @@ class InterpreterHandle {
return frame_range.second - frame_range.first;
}
- WasmInterpreter::FramePtr GetInterpretedFrame(Address frame_pointer,
- int idx) {
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
-
- std::pair<uint32_t, uint32_t> frame_range =
- GetActivationFrameRange(thread, frame_pointer);
- DCHECK_LE(0, idx);
- DCHECK_GT(frame_range.second - frame_range.first, idx);
-
- return thread->GetFrame(frame_range.first + idx);
- }
-
- uint64_t NumInterpretedCalls() {
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- return interpreter()->GetThread(0)->NumInterpretedCalls();
- }
-
- Handle<JSObject> GetLocalScopeObject(InterpretedFrame* frame,
- Handle<WasmDebugInfo> debug_info) {
- Isolate* isolate = isolate_;
-
- Handle<JSObject> local_scope_object =
- isolate_->factory()->NewJSObjectWithNullProto();
- // Fill parameters and locals.
- int num_locals = frame->GetLocalCount();
- DCHECK_LE(frame->GetParameterCount(), num_locals);
- if (num_locals > 0) {
- Handle<JSObject> locals_obj =
- isolate_->factory()->NewJSObjectWithNullProto();
- Handle<String> locals_name =
- isolate_->factory()->InternalizeString(StaticCharVector("locals"));
- JSObject::AddProperty(isolate, local_scope_object, locals_name,
- locals_obj, NONE);
- NativeModule* native_module =
- debug_info->wasm_instance().module_object().native_module();
- for (int i = 0; i < num_locals; ++i) {
- Handle<Name> name;
- if (!GetLocalNameString(isolate, native_module,
- frame->function()->func_index, i)
- .ToHandle(&name)) {
- name = PrintFToOneByteString<true>(isolate_, "var%d", i);
- }
- WasmValue value = frame->GetLocalValue(i);
- Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
- // {name} can be a string representation of an element index.
- LookupIterator::Key lookup_key{isolate, name};
- LookupIterator it(isolate, locals_obj, lookup_key, locals_obj,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound()) continue;
- Object::AddDataProperty(&it, value_obj, NONE,
- Just(ShouldThrow::kThrowOnError),
- StoreOrigin::kNamed)
- .Check();
- }
- }
-
- // Fill stack values.
- int stack_count = frame->GetStackHeight();
- // Use an object without prototype instead of an Array, for nicer displaying
- // in DevTools. For Arrays, the length field and prototype is displayed,
- // which does not make too much sense here.
- Handle<JSObject> stack_obj =
- isolate_->factory()->NewJSObjectWithNullProto();
- Handle<String> stack_name =
- isolate_->factory()->InternalizeString(StaticCharVector("stack"));
- JSObject::AddProperty(isolate, local_scope_object, stack_name, stack_obj,
- NONE);
- for (int i = 0; i < stack_count; ++i) {
- WasmValue value = frame->GetStackValue(i);
- Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
- JSObject::AddDataElement(stack_obj, static_cast<uint32_t>(i), value_obj,
- NONE);
- }
- return local_scope_object;
- }
-
- Handle<JSObject> GetStackScopeObject(InterpretedFrame* frame,
- Handle<WasmDebugInfo> debug_info) {
- // Fill stack values.
- int stack_count = frame->GetStackHeight();
- // Use an object without prototype instead of an Array, for nicer displaying
- // in DevTools. For Arrays, the length field and prototype is displayed,
- // which does not make too much sense here.
- Handle<JSObject> stack_scope_obj =
- isolate_->factory()->NewJSObjectWithNullProto();
- for (int i = 0; i < stack_count; ++i) {
- WasmValue value = frame->GetStackValue(i);
- Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
- JSObject::AddDataElement(stack_scope_obj, static_cast<uint32_t>(i),
- value_obj, NONE);
- }
- return stack_scope_obj;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterHandle);
};
-int FindByteOffset(int pc_offset, WasmCode* wasm_code) {
- int position = 0;
- SourcePositionTableIterator iterator(wasm_code->source_positions());
- for (SourcePositionTableIterator iterator(wasm_code->source_positions());
- !iterator.done() && iterator.code_offset() < pc_offset;
- iterator.Advance()) {
- position = iterator.source_position().ScriptOffset();
- }
- return position;
-}
-
// Generate a sorted and deduplicated list of byte offsets for this function's
// current positions on the stack.
std::vector<int> StackFramePositions(int func_index, Isolate* isolate) {
@@ -479,14 +259,11 @@ std::vector<int> StackFramePositions(int func_index, Isolate* isolate) {
WasmCodeRefScope code_ref_scope;
for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
if (!it.is_wasm()) continue;
- WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
if (static_cast<int>(frame->function_index()) != func_index) continue;
WasmCode* wasm_code = frame->wasm_code();
if (!wasm_code->is_liftoff()) continue;
- int pc_offset =
- static_cast<int>(frame->pc() - wasm_code->instruction_start());
- int byte_offset = FindByteOffset(pc_offset, wasm_code);
- byte_offsets.push_back(byte_offset);
+ byte_offsets.push_back(frame->byte_offset());
}
std::sort(byte_offsets.begin(), byte_offsets.end());
auto last = std::unique(byte_offsets.begin(), byte_offsets.end());
@@ -527,19 +304,26 @@ Address FindNewPC(WasmCode* wasm_code, int byte_offset,
} // namespace
-Handle<JSObject> GetGlobalScopeObject(Handle<WasmInstanceObject> instance) {
+Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
Isolate* isolate = instance->GetIsolate();
- Handle<JSObject> global_scope_object =
+ Handle<JSObject> module_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
if (instance->has_memory_object()) {
- Handle<String> name =
- isolate->factory()->InternalizeString(StaticCharVector("memory"));
+ Handle<String> name;
+ // TODO(duongn): extend the logic when multiple memories are supported.
+ const uint32_t memory_index = 0;
+ if (!WasmInstanceObject::GetMemoryNameOrNull(isolate, instance,
+ memory_index)
+ .ToHandle(&name)) {
+ const char* label = "memory%d";
+ name = PrintFToOneByteString<true>(isolate, label, memory_index);
+ }
Handle<JSArrayBuffer> memory_buffer(
instance->memory_object().array_buffer(), isolate);
Handle<JSTypedArray> uint8_array = isolate->factory()->NewJSTypedArray(
kExternalUint8Array, memory_buffer, 0, memory_buffer->byte_length());
- JSObject::AddProperty(isolate, global_scope_object, name, uint8_array,
+ JSObject::AddProperty(isolate, module_scope_object, name, uint8_array,
NONE);
}
@@ -549,7 +333,7 @@ Handle<JSObject> GetGlobalScopeObject(Handle<WasmInstanceObject> instance) {
isolate->factory()->NewJSObjectWithNullProto();
Handle<String> globals_name =
isolate->factory()->InternalizeString(StaticCharVector("globals"));
- JSObject::AddProperty(isolate, global_scope_object, globals_name,
+ JSObject::AddProperty(isolate, module_scope_object, globals_name,
globals_obj, NONE);
for (uint32_t i = 0; i < globals.size(); ++i) {
@@ -565,8 +349,7 @@ Handle<JSObject> GetGlobalScopeObject(Handle<WasmInstanceObject> instance) {
JSObject::AddProperty(isolate, globals_obj, name, value_obj, NONE);
}
}
-
- return global_scope_object;
+ return module_scope_object;
}
class DebugInfoImpl {
@@ -574,26 +357,49 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
+ int GetNumLocals(Isolate* isolate, Address pc) {
+ FrameInspectionScope scope(this, isolate, pc);
+ if (!scope.is_inspectable()) return 0;
+ return scope.debug_side_table->num_locals();
+ }
+
+ WasmValue GetLocalValue(int local, Isolate* isolate, Address pc, Address fp,
+ Address debug_break_fp) {
+ FrameInspectionScope scope(this, isolate, pc);
+ return GetValue(scope.debug_side_table_entry, local, fp, debug_break_fp);
+ }
+
+ int GetStackDepth(Isolate* isolate, Address pc) {
+ FrameInspectionScope scope(this, isolate, pc);
+ if (!scope.is_inspectable()) return 0;
+ int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
+ int value_count = scope.debug_side_table_entry->num_values();
+ return value_count - num_locals;
+ }
+
+ WasmValue GetStackValue(int index, Isolate* isolate, Address pc, Address fp,
+ Address debug_break_fp) {
+ FrameInspectionScope scope(this, isolate, pc);
+ int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
+ int value_count = scope.debug_side_table_entry->num_values();
+ if (num_locals + index >= value_count) return {};
+ return GetValue(scope.debug_side_table_entry, num_locals + index, fp,
+ debug_break_fp);
+ }
+
Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
+ FrameInspectionScope scope(this, isolate, pc);
Handle<JSObject> local_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
- wasm::WasmCodeRefScope wasm_code_ref_scope;
- wasm::WasmCode* code =
- isolate->wasm_engine()->code_manager()->LookupCode(pc);
- // Only Liftoff code can be inspected.
- if (!code->is_liftoff()) return local_scope_object;
+ if (!scope.is_inspectable()) return local_scope_object;
auto* module = native_module_->module();
- auto* function = &module->functions[code->index()];
- auto* debug_side_table = GetDebugSideTable(code, isolate->allocator());
- int pc_offset = static_cast<int>(pc - code->instruction_start());
- auto* debug_side_table_entry = debug_side_table->GetEntry(pc_offset);
- DCHECK_NOT_NULL(debug_side_table_entry);
+ auto* function = &module->functions[scope.code->index()];
// Fill parameters and locals.
- int num_locals = static_cast<int>(debug_side_table->num_locals());
+ int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
DCHECK_LE(static_cast<int>(function->sig->parameter_count()), num_locals);
if (num_locals > 0) {
Handle<JSObject> locals_obj =
@@ -610,7 +416,7 @@ class DebugInfoImpl {
name = PrintFToOneByteString<true>(isolate, "var%d", i);
}
WasmValue value =
- GetValue(debug_side_table_entry, i, fp, debug_break_fp);
+ GetValue(scope.debug_side_table_entry, i, fp, debug_break_fp);
Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
// {name} can be a string representation of an element index.
LookupIterator::Key lookup_key{isolate, name};
@@ -623,50 +429,26 @@ class DebugInfoImpl {
.Check();
}
}
-
- // Fill stack values.
- // Use an object without prototype instead of an Array, for nicer displaying
- // in DevTools. For Arrays, the length field and prototype is displayed,
- // which does not make too much sense here.
- Handle<JSObject> stack_obj = isolate->factory()->NewJSObjectWithNullProto();
- Handle<String> stack_name =
- isolate->factory()->InternalizeString(StaticCharVector("stack"));
- JSObject::AddProperty(isolate, local_scope_object, stack_name, stack_obj,
- NONE);
- int value_count = debug_side_table_entry->num_values();
- for (int i = num_locals; i < value_count; ++i) {
- WasmValue value = GetValue(debug_side_table_entry, i, fp, debug_break_fp);
- Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
- JSObject::AddDataElement(stack_obj, static_cast<uint32_t>(i - num_locals),
- value_obj, NONE);
- }
return local_scope_object;
}
Handle<JSObject> GetStackScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
+ FrameInspectionScope scope(this, isolate, pc);
Handle<JSObject> stack_scope_obj =
isolate->factory()->NewJSObjectWithNullProto();
- wasm::WasmCodeRefScope wasm_code_ref_scope;
-
- wasm::WasmCode* code =
- isolate->wasm_engine()->code_manager()->LookupCode(pc);
- // Only Liftoff code can be inspected.
- if (!code->is_liftoff()) return stack_scope_obj;
- auto* debug_side_table = GetDebugSideTable(code, isolate->allocator());
- int pc_offset = static_cast<int>(pc - code->instruction_start());
- auto* debug_side_table_entry = debug_side_table->GetEntry(pc_offset);
- DCHECK_NOT_NULL(debug_side_table_entry);
+ if (!scope.is_inspectable()) return stack_scope_obj;
// Fill stack values.
// Use an object without prototype instead of an Array, for nicer displaying
// in DevTools. For Arrays, the length field and prototype is displayed,
// which does not make too much sense here.
- int num_locals = static_cast<int>(debug_side_table->num_locals());
- int value_count = debug_side_table_entry->num_values();
+ int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
+ int value_count = scope.debug_side_table_entry->num_values();
for (int i = num_locals; i < value_count; ++i) {
- WasmValue value = GetValue(debug_side_table_entry, i, fp, debug_break_fp);
+ WasmValue value =
+ GetValue(scope.debug_side_table_entry, i, fp, debug_break_fp);
Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
JSObject::AddDataElement(stack_scope_obj,
static_cast<uint32_t>(i - num_locals), value_obj,
@@ -684,13 +466,12 @@ class DebugInfoImpl {
return local_names_->GetName(func_index, local_index);
}
- void RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets,
- Isolate* current_isolate) {
- if (func_index == flooded_function_index_) {
- // We should not be flooding a function that is already flooded.
- DCHECK(!(offsets.size() == 1 && offsets[0] == 0));
- flooded_function_index_ = -1;
- }
+ WasmCode* RecompileLiftoffWithBreakpoints(
+ int func_index, Vector<int> offsets, Vector<int> extra_source_positions) {
+ // During compilation, we cannot hold the lock, since compilation takes the
+ // {NativeModule} lock, which could lead to deadlocks.
+ mutex_.AssertUnheld();
+
// Recompile the function with Liftoff, setting the new breakpoints.
// Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv();
@@ -701,94 +482,110 @@ class DebugInfoImpl {
wire_bytes.begin() + function->code.end_offset()};
std::unique_ptr<DebugSideTable> debug_sidetable;
- // Generate additional source positions for current stack frame positions.
- // These source positions are used to find return addresses in the new code.
- std::vector<int> stack_frame_positions =
- StackFramePositions(func_index, current_isolate);
-
+ ForDebugging for_debugging =
+ offsets.size() == 1 && offsets[0] == 0 ? kForStepping : kForDebugging;
WasmCompilationResult result = ExecuteLiftoffCompilation(
- native_module_->engine()->allocator(), &env, body, func_index, nullptr,
- nullptr, offsets, &debug_sidetable, VectorOf(stack_frame_positions));
+ native_module_->engine()->allocator(), &env, body, func_index,
+ for_debugging, nullptr, nullptr, offsets, &debug_sidetable,
+ extra_source_positions);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
if (!result.succeeded()) FATAL("Liftoff compilation failed");
DCHECK_NOT_NULL(debug_sidetable);
- WasmCodeRefScope wasm_code_ref_scope;
- WasmCode* new_code = native_module_->AddCompiledCode(std::move(result));
+ WasmCode* new_code = native_module_->PublishCode(
+ native_module_->AddCompiledCode(std::move(result)));
+
+ DCHECK(new_code->is_inspectable());
bool added =
debug_side_tables_.emplace(new_code, std::move(debug_sidetable)).second;
DCHECK(added);
USE(added);
- UpdateReturnAddresses(current_isolate, new_code);
+ return new_code;
}
void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
- // Hold the mutex while setting the breakpoint. This guards against multiple
- // isolates setting breakpoints at the same time. We don't really support
- // that scenario yet, but concurrently compiling and installing different
- // Liftoff variants of a function would be problematic.
- base::MutexGuard guard(&mutex_);
-
- // offset == 0 indicates flooding and should not happen here.
- DCHECK_NE(0, offset);
-
- std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
- auto insertion_point =
- std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
- if (insertion_point != breakpoints.end() && *insertion_point == offset) {
- // The breakpoint is already set.
- return;
+ std::vector<int> breakpoints_copy;
+ {
+ // Hold the mutex while modifying the set of breakpoints, but release it
+ // before compiling the new code (see comment in
+ // {RecompileLiftoffWithBreakpoints}). This needs to be revisited once we
+ // support setting different breakpoints in different isolates
+ // (https://crbug.com/v8/10351).
+ base::MutexGuard guard(&mutex_);
+
+ // offset == 0 indicates flooding and should not happen here.
+ DCHECK_NE(0, offset);
+
+ std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point != breakpoints.end() && *insertion_point == offset) {
+ // The breakpoint is already set.
+ return;
+ }
+ breakpoints.insert(insertion_point, offset);
+ breakpoints_copy = breakpoints;
}
- breakpoints.insert(insertion_point, offset);
- // No need to recompile if the function is already flooded.
- if (func_index == flooded_function_index_) return;
+ UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
+ }
+
+ void UpdateBreakpoints(int func_index, Vector<int> breakpoints,
+ Isolate* current_isolate) {
+ // Generate additional source positions for current stack frame positions.
+ // These source positions are used to find return addresses in the new code.
+ std::vector<int> stack_frame_positions =
+ StackFramePositions(func_index, current_isolate);
- RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints),
- current_isolate);
+ WasmCodeRefScope wasm_code_ref_scope;
+ WasmCode* new_code = RecompileLiftoffWithBreakpoints(
+ func_index, breakpoints, VectorOf(stack_frame_positions));
+ UpdateReturnAddresses(current_isolate, new_code);
}
- void FloodWithBreakpoints(int func_index, Isolate* current_isolate) {
- base::MutexGuard guard(&mutex_);
+ void FloodWithBreakpoints(WasmFrame* frame, Isolate* current_isolate,
+ ReturnLocation return_location) {
// 0 is an invalid offset used to indicate flooding.
int offset = 0;
- RecompileLiftoffWithBreakpoints(func_index, Vector<int>(&offset, 1),
- current_isolate);
+ WasmCodeRefScope wasm_code_ref_scope;
+ DCHECK(frame->wasm_code()->is_liftoff());
+ // Generate an additional source position for the current byte offset.
+ int byte_offset = frame->byte_offset();
+ WasmCode* new_code = RecompileLiftoffWithBreakpoints(
+ frame->function_index(), VectorOf(&offset, 1),
+ VectorOf(&byte_offset, 1));
+ UpdateReturnAddress(frame, new_code, return_location);
}
void PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
StackTraceFrameIterator it(isolate, break_frame_id);
DCHECK(!it.done());
- DCHECK(it.frame()->is_wasm_compiled());
- WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ DCHECK(it.frame()->is_wasm());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
StepAction step_action = isolate->debug()->last_step_action();
+ // If we are flooding the top frame, the return location is after a
+ // breakpoints. Otherwise, it's after a call.
+ ReturnLocation return_location = kAfterBreakpoint;
+
// If we are at a return instruction, then any stepping action is equivalent
// to StepOut, and we need to flood the parent function.
if (IsAtReturn(frame) || step_action == StepOut) {
it.Advance();
- if (it.done() || !it.frame()->is_wasm_compiled()) return;
- frame = WasmCompiledFrame::cast(it.frame());
+ if (it.done() || !it.frame()->is_wasm()) return;
+ frame = WasmFrame::cast(it.frame());
+ return_location = kAfterWasmCall;
}
- if (static_cast<int>(frame->function_index()) != flooded_function_index_) {
- if (flooded_function_index_ != -1) {
- std::vector<int>& breakpoints =
- breakpoints_per_function_[flooded_function_index_];
- RecompileLiftoffWithBreakpoints(flooded_function_index_,
- VectorOf(breakpoints), isolate);
- }
- FloodWithBreakpoints(frame->function_index(), isolate);
- flooded_function_index_ = frame->function_index();
- }
+ FloodWithBreakpoints(frame, isolate, return_location);
stepping_frame_ = frame->id();
}
void ClearStepping() { stepping_frame_ = NO_ID; }
- bool IsStepping(WasmCompiledFrame* frame) {
+ bool IsStepping(WasmFrame* frame) {
Isolate* isolate = frame->wasm_instance().GetIsolate();
StepAction last_step_action = isolate->debug()->last_step_action();
return stepping_frame_ == frame->id() || last_step_action == StepIn;
@@ -796,19 +593,23 @@ class DebugInfoImpl {
void RemoveBreakpoint(int func_index, int position,
Isolate* current_isolate) {
- base::MutexGuard guard(&mutex_);
- const auto& function = native_module_->module()->functions[func_index];
- int offset = position - function.code.offset();
-
- std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
- DCHECK_LT(0, offset);
- auto insertion_point =
- std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
- if (insertion_point != breakpoints.end() && *insertion_point == offset) {
+ std::vector<int> breakpoints_copy;
+ {
+ base::MutexGuard guard(&mutex_);
+ const auto& function = native_module_->module()->functions[func_index];
+ int offset = position - function.code.offset();
+
+ std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
+ DCHECK_LT(0, offset);
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point == breakpoints.end()) return;
+ if (*insertion_point != offset) return;
breakpoints.erase(insertion_point);
+ breakpoints_copy = breakpoints;
}
- RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints),
- current_isolate);
+
+ UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
}
void RemoveDebugSideTables(Vector<WasmCode* const> codes) {
@@ -819,11 +620,39 @@ class DebugInfoImpl {
}
private:
+ struct FrameInspectionScope {
+ FrameInspectionScope(DebugInfoImpl* debug_info, Isolate* isolate,
+ Address pc)
+ : code(isolate->wasm_engine()->code_manager()->LookupCode(pc)),
+ pc_offset(static_cast<int>(pc - code->instruction_start())),
+ debug_side_table(
+ code->is_inspectable()
+ ? debug_info->GetDebugSideTable(code, isolate->allocator())
+ : nullptr),
+ debug_side_table_entry(debug_side_table
+ ? debug_side_table->GetEntry(pc_offset)
+ : nullptr) {
+ DCHECK_IMPLIES(code->is_inspectable(), debug_side_table_entry != nullptr);
+ }
+
+ bool is_inspectable() const { return debug_side_table_entry; }
+
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ wasm::WasmCode* code;
+ int pc_offset;
+ const DebugSideTable* debug_side_table;
+ const DebugSideTable::Entry* debug_side_table_entry;
+ };
+
const DebugSideTable* GetDebugSideTable(WasmCode* code,
AccountingAllocator* allocator) {
- base::MutexGuard guard(&mutex_);
- if (auto& existing_table = debug_side_tables_[code]) {
- return existing_table.get();
+ DCHECK(code->is_inspectable());
+ {
+ // Only hold the mutex temporarily. We can't hold it while generating the
+ // debug side table, because compilation takes the {NativeModule} lock.
+ base::MutexGuard guard(&mutex_);
+ auto it = debug_side_tables_.find(code);
+ if (it != debug_side_tables_.end()) return it->second.get();
}
// Otherwise create the debug side table now.
@@ -839,7 +668,10 @@ class DebugInfoImpl {
DebugSideTable* ret = debug_side_table.get();
// Install into cache and return.
- debug_side_tables_[code] = std::move(debug_side_table);
+ {
+ base::MutexGuard guard(&mutex_);
+ debug_side_tables_[code] = std::move(debug_side_table);
+ }
return ret;
}
@@ -911,7 +743,6 @@ class DebugInfoImpl {
// code. The frame layout itself should be independent of breakpoints.
// TODO(thibaudm): update other threads as well.
void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code) {
- DCHECK(new_code->is_liftoff());
// The first return location is after the breakpoint, others are after wasm
// calls.
ReturnLocation return_location = kAfterBreakpoint;
@@ -920,25 +751,31 @@ class DebugInfoImpl {
// We still need the flooded function for stepping.
if (it.frame()->id() == stepping_frame_) continue;
if (!it.is_wasm()) continue;
- WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
if (frame->native_module() != new_code->native_module()) continue;
if (frame->function_index() != new_code->index()) continue;
- WasmCode* old_code = frame->wasm_code();
- if (!old_code->is_liftoff()) continue;
- int pc_offset =
- static_cast<int>(frame->pc() - old_code->instruction_start());
- int position = frame->position();
- int byte_offset = FindByteOffset(pc_offset, old_code);
- Address new_pc = FindNewPC(new_code, byte_offset, return_location);
- PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
- kSystemPointerSize);
- USE(position);
- // The frame position should still be the same after OSR.
- DCHECK_EQ(position, frame->position());
+ if (!frame->wasm_code()->is_liftoff()) continue;
+ UpdateReturnAddress(frame, new_code, return_location);
}
}
- bool IsAtReturn(WasmCompiledFrame* frame) {
+ void UpdateReturnAddress(WasmFrame* frame, WasmCode* new_code,
+ ReturnLocation return_location) {
+ DCHECK(new_code->is_liftoff());
+ DCHECK_EQ(frame->function_index(), new_code->index());
+ DCHECK_EQ(frame->native_module(), new_code->native_module());
+ DCHECK(frame->wasm_code()->is_liftoff());
+#ifdef DEBUG
+ int old_position = frame->position();
+#endif
+ Address new_pc = FindNewPC(new_code, frame->byte_offset(), return_location);
+ PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
+ kSystemPointerSize);
+ // The frame position should still be the same after OSR.
+ DCHECK_EQ(old_position, frame->position());
+ }
+
+ bool IsAtReturn(WasmFrame* frame) {
DisallowHeapAllocation no_gc;
int position = frame->position();
NativeModule* native_module =
@@ -967,10 +804,9 @@ class DebugInfoImpl {
// function).
std::unordered_map<int, std::vector<int>> breakpoints_per_function_;
- // Store the frame ID when stepping, to avoid breaking in recursive calls of
- // the same function.
+ // Store the frame ID when stepping, to avoid overwriting that frame when
+ // setting or removing a breakpoint.
StackFrameId stepping_frame_ = NO_ID;
- int flooded_function_index_ = -1;
DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
@@ -980,6 +816,24 @@ DebugInfo::DebugInfo(NativeModule* native_module)
DebugInfo::~DebugInfo() = default;
+int DebugInfo::GetNumLocals(Isolate* isolate, Address pc) {
+ return impl_->GetNumLocals(isolate, pc);
+}
+
+WasmValue DebugInfo::GetLocalValue(int local, Isolate* isolate, Address pc,
+ Address fp, Address debug_break_fp) {
+ return impl_->GetLocalValue(local, isolate, pc, fp, debug_break_fp);
+}
+
+int DebugInfo::GetStackDepth(Isolate* isolate, Address pc) {
+ return impl_->GetStackDepth(isolate, pc);
+}
+
+WasmValue DebugInfo::GetStackValue(int index, Isolate* isolate, Address pc,
+ Address fp, Address debug_break_fp) {
+ return impl_->GetStackValue(index, isolate, pc, fp, debug_break_fp);
+}
+
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
Address fp,
Address debug_break_fp) {
@@ -1007,7 +861,7 @@ void DebugInfo::PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
void DebugInfo::ClearStepping() { impl_->ClearStepping(); }
-bool DebugInfo::IsStepping(WasmCompiledFrame* frame) {
+bool DebugInfo::IsStepping(WasmFrame* frame) {
return impl_->IsStepping(frame);
}
@@ -1022,40 +876,6 @@ void DebugInfo::RemoveDebugSideTables(Vector<WasmCode* const> code) {
} // namespace wasm
-namespace {
-
-wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
- Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<Object> handle(debug_info->interpreter_handle(), isolate);
- if (handle->IsUndefined(isolate)) {
- // Use the maximum stack size to estimate the maximum size of the
- // interpreter. The interpreter keeps its own stack internally, and the size
- // of the stack should dominate the overall size of the interpreter. We
- // multiply by '2' to account for the growing strategy for the backing store
- // of the stack.
- size_t interpreter_size = FLAG_stack_size * KB * 2;
- handle = Managed<wasm::InterpreterHandle>::Allocate(
- isolate, interpreter_size, isolate, debug_info);
- debug_info->set_interpreter_handle(*handle);
- }
-
- return Handle<Managed<wasm::InterpreterHandle>>::cast(handle)->raw();
-}
-
-wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo debug_info) {
- Object handle_obj = debug_info.interpreter_handle();
- DCHECK(!handle_obj.IsUndefined());
- return Managed<wasm::InterpreterHandle>::cast(handle_obj).raw();
-}
-
-wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) {
- Object handle_obj = debug_info.interpreter_handle();
- if (handle_obj.IsUndefined()) return nullptr;
- return Managed<wasm::InterpreterHandle>::cast(handle_obj).raw();
-}
-
-} // namespace
-
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
DCHECK(!instance->has_debug_info());
Factory* factory = instance->GetIsolate()->factory();
@@ -1084,128 +904,6 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
}
// static
-void WasmDebugInfo::PrepareStepIn(Handle<WasmDebugInfo> debug_info,
- int func_index) {
- Isolate* isolate = debug_info->GetIsolate();
- auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
- RedirectToInterpreter(debug_info, Vector<int>(&func_index, 1));
- const wasm::WasmFunction* func = &handle->module()->functions[func_index];
- handle->interpreter()->PrepareStepIn(func);
- // Debug break would be considered as a step-in inside wasm.
- handle->PrepareStep(StepAction::StepIn);
-}
-
-// static
-void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
- int func_index, int offset) {
- Isolate* isolate = debug_info->GetIsolate();
- auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
- RedirectToInterpreter(debug_info, Vector<int>(&func_index, 1));
- const wasm::WasmFunction* func = &handle->module()->functions[func_index];
- handle->interpreter()->SetBreakpoint(func, offset, true);
-}
-
-// static
-void WasmDebugInfo::ClearBreakpoint(Handle<WasmDebugInfo> debug_info,
- int func_index, int offset) {
- Isolate* isolate = debug_info->GetIsolate();
- auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
- // TODO(leese): If there are no more breakpoints left it would be good to
- // undo redirecting to the interpreter.
- const wasm::WasmFunction* func = &handle->module()->functions[func_index];
- handle->interpreter()->SetBreakpoint(func, offset, false);
-}
-
-// static
-void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
- Vector<int> func_indexes) {
- Isolate* isolate = debug_info->GetIsolate();
- // Ensure that the interpreter is instantiated.
- GetOrCreateInterpreterHandle(isolate, debug_info);
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
- wasm::NativeModule* native_module = instance->module_object().native_module();
- const wasm::WasmModule* module = instance->module();
-
- // We may modify the wasm jump table.
- wasm::NativeModuleModificationScope native_module_modification_scope(
- native_module);
-
- for (int func_index : func_indexes) {
- DCHECK_LE(0, func_index);
- DCHECK_GT(module->functions.size(), func_index);
- // Note that this is just a best effort check. Multiple threads can still
- // race at redirecting the same function to the interpreter, which is OK.
- if (native_module->IsRedirectedToInterpreter(func_index)) continue;
-
- wasm::WasmCodeRefScope code_ref_scope;
- wasm::WasmCompilationResult result = compiler::CompileWasmInterpreterEntry(
- isolate->wasm_engine(), native_module->enabled_features(), func_index,
- module->functions[func_index].sig);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), wasm::WasmCode::kInterpreterEntry,
- wasm::ExecutionTier::kInterpreter);
- native_module->PublishCode(std::move(wasm_code));
- DCHECK(native_module->IsRedirectedToInterpreter(func_index));
- }
-}
-
-void WasmDebugInfo::PrepareStep(StepAction step_action) {
- GetInterpreterHandle(*this)->PrepareStep(step_action);
-}
-
-// static
-bool WasmDebugInfo::RunInterpreter(Isolate* isolate,
- Handle<WasmDebugInfo> debug_info,
- Address frame_pointer, int func_index,
- Vector<wasm::WasmValue> argument_values,
- Vector<wasm::WasmValue> return_values) {
- DCHECK_LE(0, func_index);
- auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
- return handle->Execute(instance, frame_pointer,
- static_cast<uint32_t>(func_index), argument_values,
- return_values);
-}
-
-std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
- Address frame_pointer) {
- return GetInterpreterHandle(*this)->GetInterpretedStack(frame_pointer);
-}
-
-int WasmDebugInfo::NumberOfActiveFrames(Address frame_pointer) {
- return GetInterpreterHandle(*this)->NumberOfActiveFrames(frame_pointer);
-}
-
-wasm::WasmInterpreter::FramePtr WasmDebugInfo::GetInterpretedFrame(
- Address frame_pointer, int idx) {
- return GetInterpreterHandle(*this)->GetInterpretedFrame(frame_pointer, idx);
-}
-
-uint64_t WasmDebugInfo::NumInterpretedCalls() {
- auto* handle = GetInterpreterHandleOrNull(*this);
- return handle ? handle->NumInterpretedCalls() : 0;
-}
-
-// static
-Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
- Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- auto* interp_handle = GetInterpreterHandle(*debug_info);
- auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
- return interp_handle->GetLocalScopeObject(frame.get(), debug_info);
-}
-
-// static
-Handle<JSObject> WasmDebugInfo::GetStackScopeObject(
- Handle<WasmDebugInfo> debug_info, Address frame_pointer, int frame_index) {
- auto* interp_handle = GetInterpreterHandle(*debug_info);
- auto frame = interp_handle->GetInterpretedFrame(frame_pointer, frame_index);
- return interp_handle->GetStackScopeObject(frame.get(), debug_info);
-}
-
-// static
Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
const wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
@@ -1315,61 +1013,71 @@ bool WasmScript::SetBreakPointForFunction(Handle<Script> script, int func_index,
WasmScript::AddBreakpointToInfo(script, func.code.offset() + offset,
break_point);
- if (FLAG_debug_in_liftoff) {
- native_module->GetDebugInfo()->SetBreakpoint(func_index, offset, isolate);
- } else {
- // Iterate over all instances and tell them to set this new breakpoint.
- // We do this using the weak list of all instances from the script.
- Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
- isolate);
- for (int i = 0; i < weak_instance_list->length(); ++i) {
- MaybeObject maybe_instance = weak_instance_list->Get(i);
- if (maybe_instance->IsWeak()) {
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
- isolate);
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset);
- }
+ native_module->GetDebugInfo()->SetBreakpoint(func_index, offset, isolate);
+
+ return true;
+}
+
+namespace {
+
+int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
+ if (break_point_info_or_undef.IsUndefined(isolate)) return kMaxInt;
+ return BreakPointInfo::cast(break_point_info_or_undef).source_position();
+}
+
+int FindBreakpointInfoInsertPos(Isolate* isolate,
+ Handle<FixedArray> breakpoint_infos,
+ int position) {
+ // Find insert location via binary search, taking care of undefined values on
+ // the right. Position is always greater than zero.
+ DCHECK_LT(0, position);
+
+ int left = 0; // inclusive
+ int right = breakpoint_infos->length(); // exclusive
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ Object mid_obj = breakpoint_infos->get(mid);
+ if (GetBreakpointPos(isolate, mid_obj) <= position) {
+ left = mid;
+ } else {
+ right = mid;
}
}
- return true;
+ int left_pos = GetBreakpointPos(isolate, breakpoint_infos->get(left));
+ return left_pos < position ? left + 1 : left;
}
+} // namespace
+
// static
bool WasmScript::ClearBreakPoint(Handle<Script> script, int position,
Handle<BreakPoint> break_point) {
+ if (!script->has_wasm_breakpoint_infos()) return false;
+
Isolate* isolate = script->GetIsolate();
+ Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
- // Find the function for this breakpoint.
- const wasm::WasmModule* module = script->wasm_native_module()->module();
- int func_index = GetContainingWasmFunction(module, position);
- if (func_index < 0) return false;
- const wasm::WasmFunction& func = module->functions[func_index];
- int offset_in_func = position - func.code.offset();
+ int pos = FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
- if (!WasmScript::RemoveBreakpointFromInfo(script, position, break_point)) {
- return false;
- }
+ // Does a BreakPointInfo object already exist for this position?
+ if (pos == breakpoint_infos->length()) return false;
- // Iterate over all instances and tell them to remove this breakpoint.
- // We do this using the weak list of all instances from the script.
- Handle<WeakArrayList> weak_instance_list(script->wasm_weak_instance_list(),
- isolate);
- for (int i = 0; i < weak_instance_list->length(); ++i) {
- MaybeObject maybe_instance = weak_instance_list->Get(i);
- if (maybe_instance->IsWeak()) {
- Handle<WasmInstanceObject> instance(
- WasmInstanceObject::cast(maybe_instance->GetHeapObjectAssumeWeak()),
- isolate);
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::ClearBreakpoint(debug_info, func_index, offset_in_func);
+ Handle<BreakPointInfo> info(BreakPointInfo::cast(breakpoint_infos->get(pos)),
+ isolate);
+ BreakPointInfo::ClearBreakPoint(isolate, info, break_point);
+
+ // Check if there are no more breakpoints at this location.
+ if (info->GetBreakPointCount(isolate) == 0) {
+ // Update array by moving breakpoints up one position.
+ for (int i = pos; i < breakpoint_infos->length() - 1; i++) {
+ Object entry = breakpoint_infos->get(i + 1);
+ breakpoint_infos->set(i, entry);
+ if (entry.IsUndefined(isolate)) break;
}
+ // Make sure last array element is empty as a result.
+ breakpoint_infos->set_undefined(breakpoint_infos->length() - 1);
}
-
return true;
}
@@ -1401,38 +1109,12 @@ bool WasmScript::ClearBreakPointById(Handle<Script> script, int breakpoint_id) {
return false;
}
-namespace {
-
-int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
- if (break_point_info_or_undef.IsUndefined(isolate)) return kMaxInt;
- return BreakPointInfo::cast(break_point_info_or_undef).source_position();
-}
-
-int FindBreakpointInfoInsertPos(Isolate* isolate,
- Handle<FixedArray> breakpoint_infos,
- int position) {
- // Find insert location via binary search, taking care of undefined values on
- // the right. Position is always greater than zero.
- DCHECK_LT(0, position);
-
- int left = 0; // inclusive
- int right = breakpoint_infos->length(); // exclusive
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- Object mid_obj = breakpoint_infos->get(mid);
- if (GetBreakpointPos(isolate, mid_obj) <= position) {
- left = mid;
- } else {
- right = mid;
- }
- }
-
- int left_pos = GetBreakpointPos(isolate, breakpoint_infos->get(left));
- return left_pos < position ? left + 1 : left;
+// static
+void WasmScript::ClearAllBreakpoints(Script script) {
+ script.set_wasm_breakpoint_infos(
+ ReadOnlyRoots(script.GetIsolate()).empty_fixed_array());
}
-} // namespace
-
// static
void WasmScript::AddBreakpointToInfo(Handle<Script> script, int position,
Handle<BreakPoint> break_point) {
@@ -1490,69 +1172,6 @@ void WasmScript::AddBreakpointToInfo(Handle<Script> script, int position,
}
// static
-bool WasmScript::RemoveBreakpointFromInfo(Handle<Script> script, int position,
- Handle<BreakPoint> break_point) {
- if (!script->has_wasm_breakpoint_infos()) return false;
-
- Isolate* isolate = script->GetIsolate();
- Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
-
- int pos = FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
-
- // Does a BreakPointInfo object already exist for this position?
- if (pos == breakpoint_infos->length()) return false;
-
- Handle<BreakPointInfo> info(BreakPointInfo::cast(breakpoint_infos->get(pos)),
- isolate);
- BreakPointInfo::ClearBreakPoint(isolate, info, break_point);
-
- // Check if there are no more breakpoints at this location.
- if (info->GetBreakPointCount(isolate) == 0) {
- // Update array by moving breakpoints up one position.
- for (int i = pos; i < breakpoint_infos->length() - 1; i++) {
- Object entry = breakpoint_infos->get(i + 1);
- breakpoint_infos->set(i, entry);
- if (entry.IsUndefined(isolate)) break;
- }
- // Make sure last array element is empty as a result.
- breakpoint_infos->set_undefined(breakpoint_infos->length() - 1);
- }
- return true;
-}
-
-void WasmScript::SetBreakpointsOnNewInstance(
- Handle<Script> script, Handle<WasmInstanceObject> instance) {
- if (!script->has_wasm_breakpoint_infos()) return;
- Isolate* isolate = script->GetIsolate();
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
-
- Handle<FixedArray> breakpoint_infos(script->wasm_breakpoint_infos(), isolate);
- // If the array exists, it should not be empty.
- DCHECK_LT(0, breakpoint_infos->length());
-
- for (int i = 0, e = breakpoint_infos->length(); i < e; ++i) {
- Handle<Object> obj(breakpoint_infos->get(i), isolate);
- if (obj->IsUndefined(isolate)) {
- for (; i < e; ++i) {
- DCHECK(breakpoint_infos->get(i).IsUndefined(isolate));
- }
- break;
- }
- Handle<BreakPointInfo> breakpoint_info = Handle<BreakPointInfo>::cast(obj);
- int position = breakpoint_info->source_position();
-
- // Find the function for this breakpoint, and set the breakpoint.
- const wasm::WasmModule* module = script->wasm_native_module()->module();
- int func_index = GetContainingWasmFunction(module, position);
- DCHECK_LE(0, func_index);
- const wasm::WasmFunction& func = module->functions[func_index];
- int offset_in_func = position - func.code.offset();
- WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
- }
-}
-
-// static
bool WasmScript::GetPossibleBreakpoints(
wasm::NativeModule* native_module, const v8::debug::Location& start,
const v8::debug::Location& end,
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 2611b7facc..1eacd6ff52 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -23,7 +23,7 @@ class Handle;
class JSObject;
template <typename T>
class Vector;
-class WasmCompiledFrame;
+class WasmFrame;
class WasmInstanceObject;
namespace wasm {
@@ -33,6 +33,7 @@ class LocalNames;
class NativeModule;
class WasmCode;
class WireBytesRef;
+class WasmValue;
// Side table storing information used to inspect Liftoff frames at runtime.
// This table is only created on demand for debugging, so it is not optimized
@@ -130,19 +131,27 @@ class DebugSideTable {
std::vector<Entry> entries_;
};
-// Get the global scope for a given instance. This will contain the wasm memory
+// Get the module scope for a given instance. This will contain the wasm memory
// (if the instance has a memory) and the values of all globals.
-Handle<JSObject> GetGlobalScopeObject(Handle<WasmInstanceObject>);
+Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject>);
// Debug info per NativeModule, created lazily on demand.
// Implementation in {wasm-debug.cc} using PIMPL.
-class DebugInfo {
+class V8_EXPORT_PRIVATE DebugInfo {
public:
explicit DebugInfo(NativeModule*);
~DebugInfo();
+ // For the frame inspection methods below:
// {fp} is the frame pointer of the Liftoff frame, {debug_break_fp} that of
// the {WasmDebugBreak} frame (if any).
+ int GetNumLocals(Isolate*, Address pc);
+ WasmValue GetLocalValue(int local, Isolate*, Address pc, Address fp,
+ Address debug_break_fp);
+ int GetStackDepth(Isolate*, Address pc);
+ WasmValue GetStackValue(int index, Isolate*, Address pc, Address fp,
+ Address debug_break_fp);
+
Handle<JSObject> GetLocalScopeObject(Isolate*, Address pc, Address fp,
Address debug_break_fp);
@@ -157,7 +166,7 @@ class DebugInfo {
void ClearStepping();
- bool IsStepping(WasmCompiledFrame*);
+ bool IsStepping(WasmFrame*);
void RemoveBreakpoint(int func_index, int offset, Isolate* current_isolate);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 573812e8fa..324d1b1d49 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -120,7 +120,7 @@ class WasmGCForegroundTask : public CancelableTask {
// report an empty set of live wasm code.
#ifdef ENABLE_SLOW_DCHECKS
for (StackFrameIterator it(isolate_); !it.done(); it.Advance()) {
- DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
+ DCHECK_NE(StackFrame::WASM, it.frame()->type());
}
#endif
CheckNoArchivedThreads(isolate_);
@@ -131,6 +131,33 @@ class WasmGCForegroundTask : public CancelableTask {
Isolate* isolate_;
};
+class WeakScriptHandle {
+ public:
+ explicit WeakScriptHandle(Handle<Script> handle) {
+ auto global_handle =
+ handle->GetIsolate()->global_handles()->Create(*handle);
+ location_ = std::make_unique<Address*>(global_handle.location());
+ GlobalHandles::MakeWeak(location_.get());
+ }
+
+ // Usually the destructor of this class should always be called after the weak
+ // callback because the Script keeps the NativeModule alive. So we expect the
+ // handle to be destroyed and the location to be reset already.
+ // We cannot check this because of one exception. When the native module is
+ // freed during isolate shutdown, the destructor will be called
+ // first, and the callback will never be called.
+ ~WeakScriptHandle() = default;
+
+ WeakScriptHandle(WeakScriptHandle&&) V8_NOEXCEPT = default;
+
+ Handle<Script> handle() { return Handle<Script>(*location_); }
+
+ private:
+ // Store the location in a unique_ptr so that its address stays the same even
+ // when this object is moved/copied.
+ std::unique_ptr<Address*> location_;
+};
+
} // namespace
std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
@@ -320,6 +347,9 @@ struct WasmEngine::IsolateInfo {
// grows, never shrinks).
std::set<NativeModule*> native_modules;
+ // Scripts created for each native module in this isolate.
+ std::unordered_map<NativeModule*, WeakScriptHandle> scripts;
+
// Caches whether code needs to be logged on this isolate.
bool log_codes;
@@ -362,7 +392,7 @@ WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
// Synchronize on the GDB-remote thread, if running.
- gdb_server_ = nullptr;
+ gdb_server_.reset();
#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
// Synchronize on all background compile tasks.
@@ -461,10 +491,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
}
#endif
- Handle<Script> script =
- CreateWasmScript(isolate, bytes.module_bytes(),
- VectorOf(native_module->module()->source_map_url),
- native_module->module()->name);
+ Handle<Script> script = GetOrCreateScript(isolate, native_module);
// Create the compiled module object and populate with compiled functions
// and information needed at instantiation time. This object needs to be
@@ -588,12 +615,6 @@ void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
&native_module->module()->functions[function_index], tier);
}
-void WasmEngine::RecompileAllFunctions(Isolate* isolate,
- NativeModule* native_module,
- ExecutionTier tier) {
- RecompileNativeModule(isolate, native_module, tier);
-}
-
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
std::vector<NativeModule*> native_modules;
{
@@ -602,24 +623,40 @@ void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
isolates_[isolate]->keep_tiered_down = true;
for (auto* native_module : isolates_[isolate]->native_modules) {
native_modules.push_back(native_module);
+ native_module->SetTieringState(kTieredDown);
}
}
for (auto* native_module : native_modules) {
- native_module->TierDown(isolate);
+ native_module->TriggerRecompilation();
}
}
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
- std::vector<NativeModule*> native_modules;
+ // Only trigger recompilation after releasing the mutex, otherwise we risk
+ // deadlocks because of lock inversion.
+ std::vector<NativeModule*> native_modules_to_recompile;
{
base::MutexGuard lock(&mutex_);
isolates_[isolate]->keep_tiered_down = false;
+ auto test_keep_tiered_down = [this](NativeModule* native_module) {
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ for (auto* isolate : native_modules_[native_module]->isolates) {
+ DCHECK_EQ(1, isolates_.count(isolate));
+ if (isolates_[isolate]->keep_tiered_down) return true;
+ }
+ return false;
+ };
for (auto* native_module : isolates_[isolate]->native_modules) {
- native_modules.push_back(native_module);
+ if (!native_module->IsTieredDown()) continue;
+ // Only start tier-up if no other isolate needs this modules in tiered
+ // down state.
+ if (test_keep_tiered_down(native_module)) continue;
+ native_module->SetTieringState(kTieredUp);
+ native_modules_to_recompile.push_back(native_module);
}
}
- for (auto* native_module : native_modules) {
- native_module->TierUp(isolate);
+ for (auto* native_module : native_modules_to_recompile) {
+ native_module->TriggerRecompilation();
}
}
@@ -628,19 +665,104 @@ std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
return module_object->shared_native_module();
}
+namespace {
+Handle<Script> CreateWasmScript(Isolate* isolate,
+ std::shared_ptr<NativeModule> native_module,
+ Vector<const char> source_url = {}) {
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ script->set_context_data(isolate->native_context()->debug_context_id());
+ script->set_type(Script::TYPE_WASM);
+
+ Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ int hash = StringHasher::HashSequentialString(
+ reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
+ kZeroHashSeed);
+
+ const int kBufferSize = 32;
+ char buffer[kBufferSize];
+
+ // Script name is "<module_name>-hash" if name is available and "hash"
+ // otherwise.
+ const WasmModule* module = native_module->module();
+ Handle<String> name_str;
+ if (module->name.is_set()) {
+ int name_chars = SNPrintF(ArrayVector(buffer), "-%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ Handle<String> name_hash =
+ isolate->factory()
+ ->NewStringFromOneByte(
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ AllocationType::kOld)
+ .ToHandleChecked();
+ Handle<String> module_name =
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, wire_bytes, module->name, kNoInternalize);
+ name_str = isolate->factory()
+ ->NewConsString(module_name, name_hash)
+ .ToHandleChecked();
+ } else {
+ int name_chars = SNPrintF(ArrayVector(buffer), "%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ name_str = isolate->factory()
+ ->NewStringFromOneByte(
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ AllocationType::kOld)
+ .ToHandleChecked();
+ }
+ script->set_name(*name_str);
+ MaybeHandle<String> url_str;
+ if (!source_url.empty()) {
+ url_str =
+ isolate->factory()->NewStringFromUtf8(source_url, AllocationType::kOld);
+ } else {
+ Handle<String> url_prefix =
+ isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
+ url_str = isolate->factory()->NewConsString(url_prefix, name_str);
+ }
+ script->set_source_url(*url_str.ToHandleChecked());
+
+ const WasmDebugSymbols& debug_symbols =
+ native_module->module()->debug_symbols;
+ if (debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
+ !debug_symbols.external_url.is_empty()) {
+ Vector<const char> external_url =
+ ModuleWireBytes(wire_bytes).GetNameOrNull(debug_symbols.external_url);
+ MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
+ external_url, AllocationType::kOld);
+ script->set_source_mapping_url(*src_map_str.ToHandleChecked());
+ }
+
+ // Use the given shared {NativeModule}, but increase its reference count by
+ // allocating a new {Managed<T>} that the {Script} references.
+ size_t code_size_estimate = native_module->committed_code_space();
+ size_t memory_estimate =
+ code_size_estimate +
+ wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
+ Handle<Managed<wasm::NativeModule>> managed_native_module =
+ Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
+ std::move(native_module));
+ script->set_wasm_managed_native_module(*managed_native_module);
+ script->set_wasm_breakpoint_infos(ReadOnlyRoots(isolate).empty_fixed_array());
+ script->set_wasm_weak_instance_list(
+ ReadOnlyRoots(isolate).empty_weak_array_list());
+ return script;
+}
+} // namespace
+
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
- Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module) {
+ Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module,
+ Vector<const char> source_url) {
+ DCHECK_EQ(this, shared_native_module->engine());
NativeModule* native_module = shared_native_module.get();
ModuleWireBytes wire_bytes(native_module->wire_bytes());
Handle<Script> script =
- CreateWasmScript(isolate, wire_bytes.module_bytes(),
- VectorOf(native_module->module()->source_map_url),
- native_module->module()->name);
+ GetOrCreateScript(isolate, shared_native_module, source_url);
Handle<FixedArray> export_wrappers;
CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, std::move(shared_native_module), script, export_wrappers,
- native_module->committed_code_space());
+ isolate, std::move(shared_native_module), script, export_wrappers);
{
base::MutexGuard lock(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
@@ -769,9 +891,20 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
nullptr);
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+ if (gdb_server_) {
+ gdb_server_->AddIsolate(isolate);
+ }
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
}
void WasmEngine::RemoveIsolate(Isolate* isolate) {
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+ if (gdb_server_) {
+ gdb_server_->RemoveIsolate(isolate);
+ }
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
base::MutexGuard guard(&mutex_);
auto it = isolates_.find(isolate);
DCHECK_NE(isolates_.end(), it);
@@ -856,7 +989,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
if (FLAG_wasm_gdb_remote && !gdb_server_) {
- gdb_server_ = std::make_unique<gdb_server::GdbServer>();
+ gdb_server_ = gdb_server::GdbServer::Create();
+ gdb_server_->AddIsolate(isolate);
}
#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
@@ -867,10 +1001,15 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
native_module.get(), std::make_unique<NativeModuleInfo>()));
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
- isolates_[isolate]->native_modules.insert(native_module.get());
+ auto& modules_per_isolate = isolates_[isolate]->native_modules;
+ modules_per_isolate.insert(native_module.get());
if (isolates_[isolate]->keep_tiered_down) {
- native_module->SetTieredDown();
+ native_module->SetTieringState(kTieredDown);
}
+ isolate->counters()->wasm_modules_per_isolate()->AddSample(
+ static_cast<int>(modules_per_isolate.size()));
+ isolate->counters()->wasm_modules_per_engine()->AddSample(
+ static_cast<int>(native_modules_.size()));
return native_module;
}
@@ -878,6 +1017,7 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
std::shared_ptr<NativeModule> native_module =
native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
+ bool recompile_module = false;
if (native_module) {
base::MutexGuard guard(&mutex_);
auto& native_module_info = native_modules_[native_module.get()];
@@ -886,13 +1026,20 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
}
native_module_info->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
+ if (isolates_[isolate]->keep_tiered_down) {
+ native_module->SetTieringState(kTieredDown);
+ recompile_module = true;
+ }
}
+ // Potentially recompile the module for tier down, after releasing the mutex.
+ if (recompile_module) native_module->TriggerRecompilation();
return native_module;
}
bool WasmEngine::UpdateNativeModuleCache(
bool error, std::shared_ptr<NativeModule>* native_module,
Isolate* isolate) {
+ DCHECK_EQ(this, native_module->get()->engine());
// Pass {native_module} by value here to keep it alive until at least after
// we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
// which would lock the mutex twice.
@@ -901,13 +1048,20 @@ bool WasmEngine::UpdateNativeModuleCache(
if (prev == native_module->get()) return true;
- base::MutexGuard guard(&mutex_);
- auto& native_module_info = native_modules_[native_module->get()];
- if (!native_module_info) {
- native_module_info = std::make_unique<NativeModuleInfo>();
+ bool recompile_module = false;
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, native_modules_.count(native_module->get()));
+ native_modules_[native_module->get()]->isolates.insert(isolate);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ isolates_[isolate]->native_modules.insert(native_module->get());
+ if (isolates_[isolate]->keep_tiered_down) {
+ native_module->get()->SetTieringState(kTieredDown);
+ recompile_module = true;
+ }
}
- native_module_info->isolates.insert(isolate);
- isolates_[isolate]->native_modules.insert((*native_module).get());
+ // Potentially recompile the module for tier down, after releasing the mutex.
+ if (recompile_module) native_module->get()->TriggerRecompilation();
return false;
}
@@ -928,6 +1082,7 @@ void WasmEngine::FreeNativeModule(NativeModule* native_module) {
IsolateInfo* info = isolates_[isolate].get();
DCHECK_EQ(1, info->native_modules.count(native_module));
info->native_modules.erase(native_module);
+ info->scripts.erase(native_module);
// If there are {WasmCode} objects of the deleted {NativeModule}
// outstanding to be logged in this isolate, remove them. Decrementing the
// ref count is not needed, since the {NativeModule} dies anyway.
@@ -1018,8 +1173,8 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
std::unordered_set<wasm::WasmCode*> live_wasm_code;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
StackFrame* const frame = it.frame();
- if (frame->type() != StackFrame::WASM_COMPILED) continue;
- live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
+ if (frame->type() != StackFrame::WASM) continue;
+ live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
}
CheckNoArchivedThreads(isolate);
@@ -1089,7 +1244,37 @@ void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
}
}
+Handle<Script> WasmEngine::GetOrCreateScript(
+ Isolate* isolate, const std::shared_ptr<NativeModule>& native_module,
+ Vector<const char> source_url) {
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ auto& scripts = isolates_[isolate]->scripts;
+ auto it = scripts.find(native_module.get());
+ if (it != scripts.end()) {
+ Handle<Script> weak_global_handle = it->second.handle();
+ if (weak_global_handle.is_null()) {
+ scripts.erase(it);
+ } else {
+ return Handle<Script>::New(*weak_global_handle, isolate);
+ }
+ }
+ }
+ // Temporarily release the mutex to let the GC collect native modules.
+ auto script = CreateWasmScript(isolate, native_module, source_url);
+ {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ auto& scripts = isolates_[isolate]->scripts;
+ DCHECK_EQ(0, scripts.count(native_module.get()));
+ scripts.emplace(native_module.get(), WeakScriptHandle(script));
+ return script;
+ }
+}
+
void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
+ DCHECK(!mutex_.TryLock());
DCHECK_NULL(current_gc_info_);
DCHECK(FLAG_wasm_code_gc);
new_potentially_dead_code_size_ = 0;
@@ -1117,6 +1302,11 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
TRACE_CODE_GC(
"Starting GC. Total number of potentially dead code objects: %zu\n",
current_gc_info_->dead_code.size());
+ // Ensure that there are outstanding isolates that will eventually finish this
+ // GC. If there are no outstanding isolates, we finish the GC immediately.
+ PotentiallyFinishCurrentGC();
+ DCHECK(current_gc_info_ == nullptr ||
+ !current_gc_info_->outstanding_isolates.empty());
}
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 7d14fef506..8e59b3f5df 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -31,15 +31,17 @@ class JSArrayBuffer;
namespace wasm {
+#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+namespace gdb_server {
+class GdbServer;
+}
+#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
+
class AsyncCompileJob;
class ErrorThrower;
struct ModuleWireBytes;
class WasmFeatures;
-namespace gdb_server {
-class GdbServer;
-}
-
class V8_EXPORT_PRIVATE CompilationResultResolver {
public:
virtual void OnCompilationSucceeded(Handle<WasmModuleObject> result) = 0;
@@ -193,10 +195,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
void CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier);
- // Recompiles all functions at a specific compilation tier.
- void RecompileAllFunctions(Isolate* isolate, NativeModule* native_module,
- ExecutionTier tier);
-
void TierDownAllModulesPerIsolate(Isolate* isolate);
void TierUpAllModulesPerIsolate(Isolate* isolate);
@@ -208,7 +206,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Imports the shared part of a module from a different Context/Isolate using
// the the same engine, recreating a full module object in the given Isolate.
Handle<WasmModuleObject> ImportNativeModule(
- Isolate* isolate, std::shared_ptr<NativeModule> shared_module);
+ Isolate* isolate, std::shared_ptr<NativeModule> shared_module,
+ Vector<const char> source_url);
WasmCodeManager* code_manager() { return &code_manager_; }
@@ -335,6 +334,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
void FreeDeadCode(const DeadCodeMap&);
void FreeDeadCodeLocked(const DeadCodeMap&);
+ Handle<Script> GetOrCreateScript(Isolate*,
+ const std::shared_ptr<NativeModule>&,
+ Vector<const char> source_url = {});
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 4ae5de9109..ab8eb612a8 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -27,7 +27,13 @@
\
/* No official proposal (yet?). */ \
/* V8 side owner: clemensb */ \
- V(compilation_hints, "compilation hints section", false)
+ V(compilation_hints, "compilation hints section", false) \
+ \
+ /* GC proposal (early prototype, might change dramatically) */ \
+ /* Official proposal: https://github.com/WebAssembly/gc */ \
+ /* Prototype engineering spec: https://bit.ly/3cWcm6Q */ \
+ /* V8 side owner: jkummerow */ \
+ V(gc, "garbage collection", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index 2e0909a8e3..42ae237ed2 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -27,6 +27,9 @@ WasmFeatures WasmFeatures::FromIsolate(Isolate* isolate) {
if (isolate->AreWasmThreadsEnabled(handle(isolate->context(), isolate))) {
features.Add(kFeature_threads);
}
+ if (isolate->IsWasmSimdEnabled(handle(isolate->context(), isolate))) {
+ features.Add(kFeature_simd);
+ }
return features;
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index b1dc8f22ef..96255ef818 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -819,7 +819,7 @@ class SideTable : public ZoneObject {
BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
i.pc());
if (imm.type == kWasmBottom) {
- imm.sig = module->signatures[imm.sig_index];
+ imm.sig = module->signature(imm.sig_index);
}
TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
@@ -835,7 +835,7 @@ class SideTable : public ZoneObject {
BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
i.pc());
if (imm.type == kWasmBottom) {
- imm.sig = module->signatures[imm.sig_index];
+ imm.sig = module->signature(imm.sig_index);
}
TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
imm.in_arity(), imm.out_arity());
@@ -870,7 +870,7 @@ class SideTable : public ZoneObject {
BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
i.pc());
if (imm.type == kWasmBottom) {
- imm.sig = module->signatures[imm.sig_index];
+ imm.sig = module->signature(imm.sig_index);
}
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
imm.in_arity(), imm.out_arity());
@@ -1468,7 +1468,10 @@ class ThreadImpl {
case ValueType::kAnyRef:
case ValueType::kFuncRef:
case ValueType::kNullRef:
- case ValueType::kExnRef: {
+ case ValueType::kExnRef:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef: {
val = WasmValue(isolate_->factory()->null_value());
break;
}
@@ -1748,7 +1751,7 @@ class ThreadImpl {
DoTrap(kTrapUnalignedAccess, pc);
return false;
}
- *len = 2 + imm.length;
+ *len += imm.length;
return true;
}
@@ -1777,7 +1780,7 @@ class ThreadImpl {
DoTrap(kTrapUnalignedAccess, pc);
return false;
}
- *len = 2 + imm.length;
+ *len += imm.length;
return true;
}
@@ -2167,7 +2170,7 @@ class ThreadImpl {
#undef ATOMIC_STORE_CASE
case kExprAtomicFence:
std::atomic_thread_fence(std::memory_order_seq_cst);
- *len += 2;
+ *len += 1;
break;
case kExprI32AtomicWait: {
int32_t val;
@@ -2223,7 +2226,7 @@ class ThreadImpl {
}
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
- pc_t pc, int* const len) {
+ pc_t pc, int* const len, uint32_t opcode_length) {
switch (opcode) {
#define SPLAT_CASE(format, sType, valType, num) \
case kExpr##format##Splat: { \
@@ -2241,50 +2244,54 @@ class ThreadImpl {
SPLAT_CASE(I16x8, int8, int32_t, 8)
SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE
-#define EXTRACT_LANE_CASE(format, name) \
- case kExpr##format##ExtractLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
- return true; \
+#define EXTRACT_LANE_CASE(format, name) \
+ case kExpr##format##ExtractLane: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
+ opcode_length); \
+ *len += 1; \
+ WasmValue val = Pop(); \
+ Simd128 s = val.to_s128(); \
+ auto ss = s.to_##name(); \
+ Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
+ return true; \
}
EXTRACT_LANE_CASE(F64x2, f64x2)
EXTRACT_LANE_CASE(F32x4, f32x4)
EXTRACT_LANE_CASE(I64x2, i64x2)
EXTRACT_LANE_CASE(I32x4, i32x4)
#undef EXTRACT_LANE_CASE
-#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \
- case kExpr##format##ExtractLane##sign: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \
- return true; \
+#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \
+ case kExpr##format##ExtractLane##sign: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
+ opcode_length); \
+ *len += 1; \
+ WasmValue val = Pop(); \
+ Simd128 s = val.to_s128(); \
+ auto ss = s.to_##name(); \
+ Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \
+ return true; \
}
EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
#undef EXTRACT_LANE_EXTEND_CASE
-#define BINOP_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[LANE(i, s1)]; \
- auto b = s2.val[LANE(i, s1)]; \
- res.val[LANE(i, s1)] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define BINOP_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s1.val[LANE(i, s1)]; \
+ auto b = s2.val[LANE(i, s1)]; \
+ auto result = expr; \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ res.val[LANE(i, s1)] = expr; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
BINOP_CASE(F64x2Add, f64x2, float2, 2, a + b)
BINOP_CASE(F64x2Sub, f64x2, float2, 2, a - b)
@@ -2292,12 +2299,16 @@ class ThreadImpl {
BINOP_CASE(F64x2Div, f64x2, float2, 2, base::Divide(a, b))
BINOP_CASE(F64x2Min, f64x2, float2, 2, JSMin(a, b))
BINOP_CASE(F64x2Max, f64x2, float2, 2, JSMax(a, b))
+ BINOP_CASE(F64x2Pmin, f64x2, float2, 2, std::min(a, b))
+ BINOP_CASE(F64x2Pmax, f64x2, float2, 2, std::max(a, b))
BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
BINOP_CASE(F32x4Div, f32x4, float4, 4, a / b)
BINOP_CASE(F32x4Min, f32x4, float4, 4, JSMin(a, b))
BINOP_CASE(F32x4Max, f32x4, float4, 4, JSMax(a, b))
+ BINOP_CASE(F32x4Pmin, f32x4, float4, 4, std::min(a, b))
+ BINOP_CASE(F32x4Pmax, f32x4, float4, 4, std::max(a, b))
BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
@@ -2353,17 +2364,19 @@ class ThreadImpl {
BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
base::RoundingAverageUnsigned<uint8_t>(a, b))
#undef BINOP_CASE
-#define UNOP_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
- res.val[i] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define UNOP_CASE(op, name, stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v = Pop(); \
+ stype s = v.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s.val[i]; \
+ auto result = expr; \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ res.val[i] = result; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
@@ -2402,20 +2415,22 @@ class ThreadImpl {
BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
#undef BITMASK_CASE
-#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- out_stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[i]; \
- auto b = s2.val[i]; \
- res.val[i] = expr ? -1 : 0; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ out_stype res; \
+ for (size_t i = 0; i < count; ++i) { \
+ auto a = s1.val[i]; \
+ auto b = s2.val[i]; \
+ auto result = expr; \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ res.val[i] = result ? -1 : 0; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
@@ -2486,16 +2501,17 @@ class ThreadImpl {
CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
#undef CMPOP_CASE
-#define REPLACE_LANE_CASE(format, name, stype, ctype) \
- case kExpr##format##ReplaceLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- *len += 1; \
- WasmValue new_val = Pop(); \
- WasmValue simd_val = Pop(); \
- stype s = simd_val.to_s128().to_##name(); \
- s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
- Push(WasmValue(Simd128(s))); \
- return true; \
+#define REPLACE_LANE_CASE(format, name, stype, ctype) \
+ case kExpr##format##ReplaceLane: { \
+ SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
+ opcode_length); \
+ *len += 1; \
+ WasmValue new_val = Pop(); \
+ WasmValue simd_val = Pop(); \
+ stype s = simd_val.to_s128().to_##name(); \
+ s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
+ Push(WasmValue(Simd128(s))); \
+ return true; \
}
REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
@@ -2507,11 +2523,11 @@ class ThreadImpl {
case kExprS128LoadMem:
return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
MachineRepresentation::kSimd128,
- /*prefix_len=*/1);
+ /*prefix_len=*/opcode_length);
case kExprS128StoreMem:
return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
MachineRepresentation::kSimd128,
- /*prefix_len=*/1);
+ /*prefix_len=*/opcode_length);
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
uint32_t shift = Pop().to<uint32_t>(); \
@@ -2554,6 +2570,8 @@ class ThreadImpl {
dst_type res; \
for (size_t i = 0; i < count; ++i) { \
ctype a = s.val[LANE(start_index + i, s)]; \
+ auto result = expr; \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
res.val[LANE(i, res)] = expr; \
} \
Push(WasmValue(Simd128(res))); \
@@ -2622,21 +2640,23 @@ class ThreadImpl {
Push(WasmValue(Simd128(res)));
return true;
}
-#define ADD_HORIZ_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count / 2; ++i) { \
- res.val[LANE(i, s1)] = \
- s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
- res.val[LANE(i + count / 2, s1)] = \
- s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define ADD_HORIZ_CASE(op, name, stype, count) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count / 2; ++i) { \
+ auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
+ possible_nondeterminism_ |= has_nondeterminism(result1); \
+ res.val[LANE(i, s1)] = result1; \
+ auto result2 = s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
+ possible_nondeterminism_ |= has_nondeterminism(result2); \
+ res.val[LANE(i + count / 2, s1)] = result2; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
@@ -2655,8 +2675,8 @@ class ThreadImpl {
return true;
}
case kExprS8x16Shuffle: {
- Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc));
+ Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(
+ decoder, code->at(pc), opcode_length);
*len += 16;
int16 v2 = Pop().to_s128().to_i8x16();
int16 v1 = Pop().to_s128().to_i8x16();
@@ -2759,8 +2779,11 @@ class ThreadImpl {
template <typename s_type, typename result_type, typename load_type>
bool DoSimdLoadSplat(Decoder* decoder, InterpreterCode* code, pc_t pc,
int* const len, MachineRepresentation rep) {
+ // len is the number of bytes the make up this op, including prefix byte, so
+ // the prefix_len for ExecuteLoad is len, minus the prefix byte itself.
+ // Think of prefix_len as: number of extra bytes that make up this op.
if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
- /*prefix_len=*/1)) {
+ /*prefix_len=*/*len - 1)) {
return false;
}
result_type v = Pop().to<result_type>();
@@ -2776,7 +2799,7 @@ class ThreadImpl {
static_assert(sizeof(wide_type) == sizeof(narrow_type) * 2,
"size mismatch for wide and narrow types");
if (!ExecuteLoad<uint64_t, uint64_t>(decoder, code, pc, len, rep,
- /*prefix_len=*/1)) {
+ /*prefix_len=*/*len - 1)) {
return false;
}
constexpr int lanes = kSimd128Size / sizeof(wide_type);
@@ -2890,6 +2913,11 @@ class ThreadImpl {
encoded_values->set(encoded_index++, *anyref);
break;
}
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
case ValueType::kStmt:
case ValueType::kBottom:
UNREACHABLE();
@@ -2995,6 +3023,11 @@ class ThreadImpl {
value = WasmValue(anyref);
break;
}
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
case ValueType::kStmt:
case ValueType::kBottom:
UNREACHABLE();
@@ -3035,16 +3068,21 @@ class ThreadImpl {
// Do first check for a breakpoint, in order to set hit_break correctly.
const char* skip = " ";
int len = 1;
+ // We need to store this, because SIMD opcodes are LEB encoded, and later
+ // on when executing, we need to know where to read immediates from.
+ uint32_t simd_opcode_length = 0;
byte orig = code->start[pc];
WasmOpcode opcode = static_cast<WasmOpcode>(orig);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
+ opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
+ &code->start[pc], &simd_opcode_length);
+ len += simd_opcode_length;
}
if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
orig = code->orig_start[pc];
if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
- opcode =
- static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
+ opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
+ &code->start[pc]);
}
if (SkipBreakpoint(code, pc)) {
// skip breakpoint by switching on original code.
@@ -3149,7 +3187,8 @@ class ThreadImpl {
break;
}
case kExprSelectWithType: {
- SelectTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ SelectTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
+ &decoder, code->at(pc));
len = 1 + imm.length;
V8_FALLTHROUGH;
}
@@ -3453,7 +3492,11 @@ class ThreadImpl {
case ValueType::kAnyRef:
case ValueType::kFuncRef:
case ValueType::kNullRef:
- case ValueType::kExnRef: {
+ case ValueType::kExnRef:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef: {
+ // TODO(7748): Type checks or DCHECKs for ref types?
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index; // The index into the buffer.
@@ -3647,7 +3690,6 @@ class ThreadImpl {
break;
}
case kNumericPrefix: {
- ++len;
if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
break;
}
@@ -3656,8 +3698,9 @@ class ThreadImpl {
break;
}
case kSimdPrefix: {
- ++len;
- if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len)) return;
+ if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len,
+ simd_opcode_length))
+ return;
break;
}
@@ -3867,7 +3910,10 @@ class ThreadImpl {
case ValueType::kFuncRef:
case ValueType::kExnRef:
case ValueType::kNullRef:
- PrintF("(func|null|exn)ref:unimplemented");
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef:
+ PrintF("(func|null|exn|opt|eq|)ref:unimplemented");
break;
case ValueType::kBottom:
UNREACHABLE();
@@ -4025,7 +4071,7 @@ class ThreadImpl {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
- module()->signature_map.Find(*module()->signatures[sig_index]));
+ module()->signature_map.Find(*module()->signature(sig_index)));
// Bounds check against table size.
if (entry_index >=
static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
@@ -4040,7 +4086,7 @@ class ThreadImpl {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- const FunctionSig* signature = module()->signatures[sig_index];
+ const FunctionSig* signature = module()->signature(sig_index);
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
WasmCode* code = GetTargetCode(isolate_, entry.target());
@@ -4053,8 +4099,7 @@ class ThreadImpl {
return CallExternalWasmFunction(isolate_, object_ref, code, signature);
}
- DCHECK(code->kind() == WasmCode::kInterpreterEntry ||
- code->kind() == WasmCode::kFunction);
+ DCHECK_EQ(WasmCode::kFunction, code->kind());
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index fdc02771b4..64719fb59a 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -73,8 +73,12 @@ class WasmStreaming::WasmStreamingImpl {
void SetClient(std::shared_ptr<Client> client) {
streaming_decoder_->SetModuleCompiledCallback(
- [client](const std::shared_ptr<i::wasm::NativeModule>& native_module) {
- client->OnModuleCompiled(Utils::Convert(native_module));
+ [client, streaming_decoder = streaming_decoder_](
+ const std::shared_ptr<i::wasm::NativeModule>& native_module) {
+ i::Vector<const char> url = streaming_decoder->url();
+ auto compiled_wasm_module =
+ CompiledWasmModule(native_module, url.begin(), url.size());
+ client->OnModuleCompiled(compiled_wasm_module);
});
}
@@ -331,14 +335,10 @@ class InstantiateBytesResultResolver
isolate_->factory()->NewJSObject(isolate_->object_function());
i::Handle<i::String> instance_name =
- isolate_->factory()
- ->NewStringFromOneByte(i::StaticCharVector("instance"))
- .ToHandleChecked();
+ isolate_->factory()->NewStringFromStaticChars("instance");
i::Handle<i::String> module_name =
- isolate_->factory()
- ->NewStringFromOneByte(i::StaticCharVector("module"))
- .ToHandleChecked();
+ isolate_->factory()->NewStringFromStaticChars("module");
i::JSObject::AddProperty(isolate_, result, instance_name, instance,
i::NONE);
@@ -1364,6 +1364,11 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
break;
}
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ case i::wasm::ValueType::kEqRef:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
case i::wasm::ValueType::kStmt:
case i::wasm::ValueType::kS128:
case i::wasm::ValueType::kBottom:
@@ -1812,6 +1817,11 @@ void WebAssemblyGlobalGetValueCommon(
receiver->GetRef()->IsNull());
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ case i::wasm::ValueType::kEqRef:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
case i::wasm::ValueType::kBottom:
case i::wasm::ValueType::kStmt:
case i::wasm::ValueType::kS128:
@@ -1897,6 +1907,11 @@ void WebAssemblyGlobalSetValue(
}
break;
}
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ case i::wasm::ValueType::kEqRef:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
case i::wasm::ValueType::kBottom:
case i::wasm::ValueType::kStmt:
case i::wasm::ValueType::kS128:
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index c11a69ad46..bcfc49dcba 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/signature.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/base/memory.h"
+#include "src/codegen/signature.h"
#include "src/handles/handles.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
-#include "src/zone/zone-containers.h"
-
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-constants.h"
-#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
-
-#include "src/base/memory.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -55,6 +53,8 @@ WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
direct_calls_(builder->zone()),
asm_offsets_(builder->zone(), 8) {}
+void WasmFunctionBuilder::EmitByte(byte val) { body_.write_u8(val); }
+
void WasmFunctionBuilder::EmitI32V(int32_t val) { body_.write_i32v(val); }
void WasmFunctionBuilder::EmitU32V(uint32_t val) { body_.write_u32v(val); }
@@ -91,7 +91,12 @@ void WasmFunctionBuilder::Emit(WasmOpcode opcode) { body_.write_u8(opcode); }
void WasmFunctionBuilder::EmitWithPrefix(WasmOpcode opcode) {
DCHECK_NE(0, opcode & 0xff00);
body_.write_u8(opcode >> 8);
- body_.write_u8(opcode);
+ if ((opcode >> 8) == WasmOpcode::kSimdPrefix) {
+ // SIMD opcodes are LEB encoded
+ body_.write_u32v(opcode & 0xff);
+ } else {
+ body_.write_u8(opcode);
+ }
}
void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
@@ -234,7 +239,7 @@ void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const {
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
- signatures_(zone),
+ types_(zone),
function_imports_(zone),
global_imports_(zone),
exports_(zone),
@@ -269,9 +274,21 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
auto sig_entry = signature_map_.find(*sig);
if (sig_entry != signature_map_.end()) return sig_entry->second;
- uint32_t index = static_cast<uint32_t>(signatures_.size());
+ uint32_t index = static_cast<uint32_t>(types_.size());
signature_map_.emplace(*sig, index);
- signatures_.push_back(sig);
+ types_.push_back(Type(sig));
+ return index;
+}
+
+uint32_t WasmModuleBuilder::AddStructType(StructType* type) {
+ uint32_t index = static_cast<uint32_t>(types_.size());
+ types_.push_back(Type(type));
+ return index;
+}
+
+uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
+ uint32_t index = static_cast<uint32_t>(types_.size());
+ types_.push_back(Type(type));
return index;
}
@@ -334,16 +351,17 @@ uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
return static_cast<uint32_t>(tables_.size() - 1);
}
-uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
- FunctionSig* sig) {
+uint32_t WasmModuleBuilder::AddImport(Vector<const char> name, FunctionSig* sig,
+ Vector<const char> module) {
DCHECK(adding_imports_allowed_);
- function_imports_.push_back({name, AddSignature(sig)});
+ function_imports_.push_back({module, name, AddSignature(sig)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
- ValueType type, bool mutability) {
- global_imports_.push_back({name, type.value_type_code(), mutability});
+ ValueType type, bool mutability,
+ Vector<const char> module) {
+ global_imports_.push_back({module, name, type.value_type_code(), mutability});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -393,25 +411,56 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
+namespace {
+void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
+ buffer->write_u8(type.value_type_code());
+ if (type.has_immediate()) {
+ buffer->write_u32v(type.ref_index());
+ }
+}
+
+} // namespace
+
void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// == Emit magic =============================================================
buffer->write_u32(kWasmMagic);
buffer->write_u32(kWasmVersion);
- // == Emit signatures ========================================================
- if (signatures_.size() > 0) {
+ // == Emit types =============================================================
+ if (types_.size() > 0) {
size_t start = EmitSection(kTypeSectionCode, buffer);
- buffer->write_size(signatures_.size());
-
- for (FunctionSig* sig : signatures_) {
- buffer->write_u8(kWasmFunctionTypeCode);
- buffer->write_size(sig->parameter_count());
- for (auto param : sig->parameters()) {
- buffer->write_u8(param.value_type_code());
- }
- buffer->write_size(sig->return_count());
- for (auto ret : sig->returns()) {
- buffer->write_u8(ret.value_type_code());
+ buffer->write_size(types_.size());
+
+ for (const Type& type : types_) {
+ switch (type.kind) {
+ case Type::kFunctionSig: {
+ FunctionSig* sig = type.sig;
+ buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_size(sig->parameter_count());
+ for (auto param : sig->parameters()) {
+ WriteValueType(buffer, param);
+ }
+ buffer->write_size(sig->return_count());
+ for (auto ret : sig->returns()) {
+ WriteValueType(buffer, ret);
+ }
+ break;
+ }
+ case Type::kStructType: {
+ StructType* struct_type = type.struct_type;
+ buffer->write_u8(kWasmStructTypeCode);
+ buffer->write_size(struct_type->field_count());
+ for (auto field : struct_type->fields()) {
+ WriteValueType(buffer, field);
+ }
+ break;
+ }
+ case Type::kArrayType: {
+ ArrayType* array_type = type.array_type;
+ buffer->write_u8(kWasmArrayTypeCode);
+ WriteValueType(buffer, array_type->element_type());
+ break;
+ }
}
}
FixupSection(buffer, start);
@@ -422,15 +471,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kImportSectionCode, buffer);
buffer->write_size(global_imports_.size() + function_imports_.size());
for (auto import : global_imports_) {
- buffer->write_u32v(0); // module name (length)
- buffer->write_string(import.name); // field name
+ buffer->write_string(import.module); // module name
+ buffer->write_string(import.name); // field name
buffer->write_u8(kExternalGlobal);
buffer->write_u8(import.type_code);
buffer->write_u8(import.mutability ? 1 : 0);
}
for (auto import : function_imports_) {
- buffer->write_u32v(0); // module name (length)
- buffer->write_string(import.name); // field name
+ buffer->write_string(import.module); // module name
+ buffer->write_string(import.name); // field name
buffer->write_u8(kExternalFunction);
buffer->write_u32v(import.sig_index);
}
@@ -486,7 +535,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_size(globals_.size());
for (const WasmGlobal& global : globals_) {
- buffer->write_u8(global.type.value_type_code());
+ WriteValueType(buffer, global.type);
buffer->write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
@@ -540,6 +589,12 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_u8(kExprF64Const);
buffer->write_f64(0.);
break;
+ case ValueType::kOptRef:
+ case ValueType::kFuncRef:
+ case ValueType::kExnRef:
+ case ValueType::kEqRef:
+ buffer->write_u8(kExprRefNull);
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 93ee913bad..896fc3bf9c 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -5,16 +5,15 @@
#ifndef V8_WASM_WASM_MODULE_BUILDER_H_
#define V8_WASM_WASM_MODULE_BUILDER_H_
-#include "src/codegen/signature.h"
-#include "src/zone/zone-containers.h"
-
#include "src/base/memory.h"
+#include "src/codegen/signature.h"
#include "src/utils/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -83,6 +82,7 @@ class ZoneBuffer : public ZoneObject {
void write_f64(double val) { write_u64(bit_cast<uint64_t>(val)); }
void write(const byte* data, size_t size) {
+ if (size == 0) return;
EnsureSpace(size);
memcpy(pos_, data, size);
pos_ += size;
@@ -159,6 +159,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
// Building methods.
void SetSignature(FunctionSig* sig);
uint32_t AddLocal(ValueType type);
+ void EmitByte(byte b);
void EmitI32V(int32_t val);
void EmitU32V(uint32_t val);
void EmitCode(const byte* code, uint32_t code_size);
@@ -231,14 +232,17 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
explicit WasmModuleBuilder(Zone* zone);
// Building methods.
- uint32_t AddImport(Vector<const char> name, FunctionSig* sig);
+ uint32_t AddImport(Vector<const char> name, FunctionSig* sig,
+ Vector<const char> module = {});
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
uint32_t AddGlobal(ValueType type, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
uint32_t AddGlobalImport(Vector<const char> name, ValueType type,
- bool mutability);
+ bool mutability, Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
+ uint32_t AddStructType(StructType* type);
+ uint32_t AddArrayType(ArrayType* type);
// In the current implementation, it's supported to have uninitialized slots
// at the beginning and/or end of the indirect function table, as long as
// the filled slots form a contiguous block in the middle.
@@ -266,15 +270,36 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone() { return zone_; }
- FunctionSig* GetSignature(uint32_t index) { return signatures_[index]; }
+ FunctionSig* GetSignature(uint32_t index) {
+ DCHECK(types_[index].kind == Type::kFunctionSig);
+ return types_[index].sig;
+ }
private:
+ struct Type {
+ enum Kind { kFunctionSig, kStructType, kArrayType };
+ explicit Type(FunctionSig* signature)
+ : kind(kFunctionSig), sig(signature) {}
+ explicit Type(StructType* struct_type)
+ : kind(kStructType), struct_type(struct_type) {}
+ explicit Type(ArrayType* array_type)
+ : kind(kArrayType), array_type(array_type) {}
+ Kind kind;
+ union {
+ FunctionSig* sig;
+ StructType* struct_type;
+ ArrayType* array_type;
+ };
+ };
+
struct WasmFunctionImport {
+ Vector<const char> module;
Vector<const char> name;
uint32_t sig_index;
};
struct WasmGlobalImport {
+ Vector<const char> module;
Vector<const char> name;
ValueTypeCode type_code;
bool mutability;
@@ -306,7 +331,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
friend class WasmFunctionBuilder;
Zone* zone_;
- ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<Type> types_;
ZoneVector<WasmFunctionImport> function_imports_;
ZoneVector<WasmGlobalImport> global_imports_;
ZoneVector<WasmExport> exports_;
@@ -331,7 +356,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
};
inline FunctionSig* WasmFunctionBuilder::signature() {
- return builder_->signatures_[signature_index_];
+ return builder_->types_[signature_index_].sig;
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index f5905615e2..5111a78372 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -30,7 +30,7 @@ namespace wasm {
// static
const uint32_t WasmElemSegment::kNullIndex;
-WireBytesRef DecodedFunctionNames::Lookup(
+WireBytesRef LazilyGeneratedNames::LookupFunctionName(
const ModuleWireBytes& wire_bytes, uint32_t function_index,
Vector<const WasmExport> export_table) const {
base::MutexGuard lock(&mutex_);
@@ -44,18 +44,23 @@ WireBytesRef DecodedFunctionNames::Lookup(
return it->second;
}
-std::pair<WireBytesRef, WireBytesRef> DecodedGlobalNames::Lookup(
- uint32_t global_index, Vector<const WasmImport> import_table,
+std::pair<WireBytesRef, WireBytesRef>
+LazilyGeneratedNames::LookupNameFromImportsAndExports(
+ ImportExportKindCode kind, uint32_t index,
+ Vector<const WasmImport> import_table,
Vector<const WasmExport> export_table) const {
base::MutexGuard lock(&mutex_);
- if (!global_names_) {
- global_names_.reset(
+ DCHECK(kind == kExternalGlobal || kind == kExternalMemory);
+ auto& names = kind == kExternalGlobal ? global_names_ : memory_names_;
+ if (!names) {
+ names.reset(
new std::unordered_map<uint32_t,
std::pair<WireBytesRef, WireBytesRef>>());
- DecodeGlobalNames(import_table, export_table, global_names_.get());
+ GenerateNamesFromImportsAndExports(kind, import_table, export_table,
+ names.get());
}
- auto it = global_names_->find(global_index);
- if (it == global_names_->end()) return {};
+ auto it = names->find(index);
+ if (it == names->end()) return {};
return it->second;
}
@@ -118,7 +123,7 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
return func_index;
}
-void DecodedFunctionNames::AddForTesting(int function_index,
+void LazilyGeneratedNames::AddForTesting(int function_index,
WireBytesRef name) {
base::MutexGuard lock(&mutex_);
if (!function_names_) {
@@ -129,7 +134,7 @@ void DecodedFunctionNames::AddForTesting(int function_index,
AsmJsOffsetInformation::AsmJsOffsetInformation(
Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<uint8_t>::Of(encoded_offsets)) {}
+ : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
@@ -192,7 +197,7 @@ WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
// Get a string stored in the module bytes representing a function name.
WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
const WasmModule* module) const {
- return GetNameOrNull(module->function_names.Lookup(
+ return GetNameOrNull(module->lazily_generated_names.LookupFunctionName(
*this, function->func_index, VectorOf(module->export_table)));
}
@@ -652,11 +657,11 @@ size_t EstimateStoredSize(const WasmModule* module) {
return sizeof(WasmModule) + VectorSize(module->globals) +
(module->signature_zone ? module->signature_zone->allocation_size()
: 0) +
- VectorSize(module->signatures) + VectorSize(module->signature_ids) +
- VectorSize(module->functions) + VectorSize(module->data_segments) +
- VectorSize(module->tables) + VectorSize(module->import_table) +
- VectorSize(module->export_table) + VectorSize(module->exceptions) +
- VectorSize(module->elem_segments);
+ VectorSize(module->types) + VectorSize(module->type_kinds) +
+ VectorSize(module->signature_ids) + VectorSize(module->functions) +
+ VectorSize(module->data_segments) + VectorSize(module->tables) +
+ VectorSize(module->import_table) + VectorSize(module->export_table) +
+ VectorSize(module->exceptions) + VectorSize(module->elem_segments);
}
size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig* sig) {
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 6c782607bb..a189964ad7 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -12,6 +12,7 @@
#include "src/handles/handles.h"
#include "src/utils/vector.h"
#include "src/wasm/signature-map.h"
+#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-opcodes.h"
@@ -167,9 +168,8 @@ enum class WasmCompilationHintStrategy : uint8_t {
enum class WasmCompilationHintTier : uint8_t {
kDefault = 0,
- kInterpreter = 1,
- kBaseline = 2,
- kOptimized = 3,
+ kBaseline = 1,
+ kOptimized = 2,
};
// Static representation of a wasm compilation hint
@@ -191,34 +191,33 @@ enum ModuleOrigin : uint8_t {
struct ModuleWireBytes;
-class V8_EXPORT_PRIVATE DecodedFunctionNames {
+class V8_EXPORT_PRIVATE LazilyGeneratedNames {
public:
- WireBytesRef Lookup(const ModuleWireBytes& wire_bytes,
- uint32_t function_index,
- Vector<const WasmExport> export_table) const;
+ WireBytesRef LookupFunctionName(const ModuleWireBytes& wire_bytes,
+ uint32_t function_index,
+ Vector<const WasmExport> export_table) const;
+
+ // For memory and global.
+ std::pair<WireBytesRef, WireBytesRef> LookupNameFromImportsAndExports(
+ ImportExportKindCode kind, uint32_t index,
+ const Vector<const WasmImport> import_table,
+ const Vector<const WasmExport> export_table) const;
+
void AddForTesting(int function_index, WireBytesRef name);
private:
- // {function_names_} is populated lazily after decoding, and therefore needs a
- // mutex to protect concurrent modifications from multiple {WasmModuleObject}.
+ // {function_names_}, {global_names_} and {memory_names_} are
+ // populated lazily after decoding, and therefore need a mutex to protect
+ // concurrent modifications from multiple {WasmModuleObject}.
mutable base::Mutex mutex_;
mutable std::unique_ptr<std::unordered_map<uint32_t, WireBytesRef>>
function_names_;
-};
-
-class V8_EXPORT_PRIVATE DecodedGlobalNames {
- public:
- std::pair<WireBytesRef, WireBytesRef> Lookup(
- uint32_t global_index, Vector<const WasmImport> import_table,
- Vector<const WasmExport> export_table) const;
-
- private:
- // {global_names_} is populated lazily after decoding, and therefore needs a
- // mutex to protect concurrent modifications from multiple {WasmModuleObject}.
- mutable base::Mutex mutex_;
mutable std::unique_ptr<
std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
global_names_;
+ mutable std::unique_ptr<
+ std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
+ memory_names_;
};
class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
@@ -249,6 +248,23 @@ class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
std::unique_ptr<AsmJsOffsets> decoded_offsets_;
};
+struct TypeDefinition {
+ explicit TypeDefinition(const FunctionSig* sig) : function_sig(sig) {}
+ explicit TypeDefinition(const StructType* type) : struct_type(type) {}
+ explicit TypeDefinition(const ArrayType* type) : array_type(type) {}
+ union {
+ const FunctionSig* function_sig;
+ const StructType* struct_type;
+ const ArrayType* array_type;
+ };
+};
+
+struct V8_EXPORT_PRIVATE WasmDebugSymbols {
+ enum class Type { None, SourceMap, EmbeddedDWARF, ExternalDWARF };
+ Type type = Type::None;
+ WireBytesRef external_url;
+};
+
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<Zone> signature_zone;
@@ -273,8 +289,44 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_data_segments = 0; // From the DataCount section.
WireBytesRef code = {0, 0};
WireBytesRef name = {0, 0};
- std::vector<const FunctionSig*> signatures; // by signature index
- std::vector<uint32_t> signature_ids; // by signature index
+ std::vector<TypeDefinition> types; // by type index
+ std::vector<uint8_t> type_kinds; // by type index
+ std::vector<uint32_t> signature_ids; // by signature index
+ void add_signature(const FunctionSig* sig) {
+ types.push_back(TypeDefinition(sig));
+ type_kinds.push_back(kWasmFunctionTypeCode);
+ uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
+ signature_ids.push_back(canonical_id);
+ }
+ const FunctionSig* signature(uint32_t index) const {
+ DCHECK(type_kinds[index] == kWasmFunctionTypeCode);
+ return types[index].function_sig;
+ }
+ bool has_signature(uint32_t index) const {
+ return index < types.size() && type_kinds[index] == kWasmFunctionTypeCode;
+ }
+ void add_struct_type(const StructType* type) {
+ types.push_back(TypeDefinition(type));
+ type_kinds.push_back(kWasmStructTypeCode);
+ }
+ const StructType* struct_type(uint32_t index) const {
+ DCHECK(type_kinds[index] == kWasmStructTypeCode);
+ return types[index].struct_type;
+ }
+ bool has_struct(uint32_t index) const {
+ return index < types.size() && type_kinds[index] == kWasmStructTypeCode;
+ }
+ void add_array_type(const ArrayType* type) {
+ types.push_back(TypeDefinition(type));
+ type_kinds.push_back(kWasmArrayTypeCode);
+ }
+ const ArrayType* array_type(uint32_t index) const {
+ DCHECK(type_kinds[index] == kWasmArrayTypeCode);
+ return types[index].array_type;
+ }
+ bool has_array(uint32_t index) const {
+ return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
+ }
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
@@ -286,9 +338,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
SignatureMap signature_map; // canonicalizing map for signature indexes.
ModuleOrigin origin = kWasmOrigin; // origin of the module
- DecodedFunctionNames function_names;
- DecodedGlobalNames global_names;
- std::string source_map_url;
+ LazilyGeneratedNames lazily_generated_names;
+ WasmDebugSymbols debug_symbols;
// Asm.js source position information. Only available for modules compiled
// from asm.js.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 2e75981ff1..9323449344 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -37,6 +37,8 @@ OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmTableObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
NEVER_READ_ONLY_SPACE_IMPL(WasmDebugInfo)
@@ -49,6 +51,8 @@ CAST_ACCESSOR(WasmMemoryObject)
CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
CAST_ACCESSOR(AsmWasmData)
+CAST_ACCESSOR(WasmStruct)
+CAST_ACCESSOR(WasmArray)
#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
DEF_GETTER(holder, has_##name, bool) { \
@@ -264,6 +268,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
kExceptionsTableOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_external_functions, FixedArray,
kWasmExternalFunctionsOffset)
+ACCESSORS(WasmInstanceObject, managed_object_maps, FixedArray,
+ kManagedObjectMapsOffset)
void WasmInstanceObject::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
@@ -412,6 +418,51 @@ ACCESSORS(AsmWasmData, managed_native_module, Managed<wasm::NativeModule>,
ACCESSORS(AsmWasmData, export_wrappers, FixedArray, kExportWrappersOffset)
ACCESSORS(AsmWasmData, uses_bitset, HeapNumber, kUsesBitsetOffset)
+wasm::StructType* WasmStruct::type(Map map) {
+ Foreign foreign = map.wasm_type_info();
+ return reinterpret_cast<wasm::StructType*>(foreign.foreign_address());
+}
+
+wasm::StructType* WasmStruct::GcSafeType(Map map) {
+ DCHECK_EQ(WASM_STRUCT_TYPE, map.instance_type());
+ HeapObject raw = HeapObject::cast(map.constructor_or_backpointer());
+ MapWord map_word = raw.map_word();
+ HeapObject forwarded =
+ map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
+ Foreign foreign = Foreign::cast(forwarded);
+ return reinterpret_cast<wasm::StructType*>(foreign.foreign_address());
+}
+
+wasm::StructType* WasmStruct::type() const { return type(map()); }
+
+ObjectSlot WasmStruct::RawField(int raw_offset) {
+ int offset = WasmStruct::kHeaderSize + raw_offset;
+ return ObjectSlot(FIELD_ADDR(*this, offset));
+}
+
+wasm::ArrayType* WasmArray::type(Map map) {
+ DCHECK_EQ(WASM_ARRAY_TYPE, map.instance_type());
+ Foreign foreign = map.wasm_type_info();
+ return reinterpret_cast<wasm::ArrayType*>(foreign.foreign_address());
+}
+
+wasm::ArrayType* WasmArray::GcSafeType(Map map) {
+ DCHECK_EQ(WASM_ARRAY_TYPE, map.instance_type());
+ HeapObject raw = HeapObject::cast(map.constructor_or_backpointer());
+ MapWord map_word = raw.map_word();
+ HeapObject forwarded =
+ map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
+ Foreign foreign = Foreign::cast(forwarded);
+ return reinterpret_cast<wasm::ArrayType*>(foreign.foreign_address());
+}
+
+wasm::ArrayType* WasmArray::type() const { return type(map()); }
+
+int WasmArray::SizeFor(Map map, int length) {
+ int element_size = type(map)->element_type().element_size_bytes();
+ return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
+}
+
#include "src/objects/object-macros-undef.h"
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 41b9d50312..2883467889 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -165,43 +165,24 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<Script> script, Handle<FixedArray> export_wrappers) {
- const WasmModule* module = native_module->module();
- const bool uses_liftoff =
- FLAG_liftoff && native_module->module()->origin == wasm::kWasmOrigin;
- size_t code_size_estimate =
- wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module, uses_liftoff);
- return New(isolate, std::move(native_module), script, export_wrappers,
- code_size_estimate);
-}
-
-// static
-Handle<WasmModuleObject> WasmModuleObject::New(
- Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
- Handle<Script> script, Handle<FixedArray> export_wrappers,
- size_t code_size_estimate) {
- const WasmModule* module = native_module->module();
-
- // Use the given shared {NativeModule}, but increase its reference count by
- // allocating a new {Managed<T>} that the {WasmModuleObject} references.
- size_t memory_estimate =
- code_size_estimate +
- wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
- Handle<Managed<wasm::NativeModule>> managed_native_module =
- Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
- std::move(native_module));
-
+ Handle<Managed<wasm::NativeModule>> managed_native_module;
+ if (script->type() == Script::TYPE_WASM) {
+ managed_native_module = handle(
+ Managed<wasm::NativeModule>::cast(script->wasm_managed_native_module()),
+ isolate);
+ } else {
+ const WasmModule* module = native_module->module();
+ size_t memory_estimate =
+ native_module->committed_code_space() +
+ wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
+ managed_native_module = Managed<wasm::NativeModule>::FromSharedPtr(
+ isolate, memory_estimate, std::move(native_module));
+ }
Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
isolate->factory()->NewJSObject(isolate->wasm_module_constructor()));
module_object->set_export_wrappers(*export_wrappers);
- if (script->type() == Script::TYPE_WASM) {
- script->set_wasm_breakpoint_infos(
- ReadOnlyRoots(isolate).empty_fixed_array());
- script->set_wasm_managed_native_module(*managed_native_module);
- script->set_wasm_weak_instance_list(
- ReadOnlyRoots(isolate).empty_weak_array_list());
- }
- module_object->set_script(*script);
module_object->set_managed_native_module(*managed_native_module);
+ module_object->set_script(*script);
return module_object;
}
@@ -241,9 +222,10 @@ MaybeHandle<String> WasmModuleObject::GetFunctionNameOrNull(
Isolate* isolate, Handle<WasmModuleObject> module_object,
uint32_t func_index) {
DCHECK_LT(func_index, module_object->module()->functions.size());
- wasm::WireBytesRef name = module_object->module()->function_names.Lookup(
- wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()),
- func_index, VectorOf(module_object->module()->export_table));
+ wasm::WireBytesRef name =
+ module_object->module()->lazily_generated_names.LookupFunctionName(
+ wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()),
+ func_index, VectorOf(module_object->module()->export_table));
if (!name.is_set()) return {};
return ExtractUtf8StringFromModuleBytes(isolate, module_object, name,
kNoInternalize);
@@ -267,8 +249,9 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
uint32_t func_index) {
DCHECK_GT(module()->functions.size(), func_index);
wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
- wasm::WireBytesRef name_ref = module()->function_names.Lookup(
- wire_bytes, func_index, VectorOf(module()->export_table));
+ wasm::WireBytesRef name_ref =
+ module()->lazily_generated_names.LookupFunctionName(
+ wire_bytes, func_index, VectorOf(module()->export_table));
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
return Vector<const uint8_t>::cast(name);
}
@@ -894,18 +877,17 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
if (!backing_store) return -1;
- // Compute new size.
- size_t new_pages = old_pages + pages;
-
// Try to handle shared memory first.
if (old_buffer->is_shared()) {
if (FLAG_wasm_grow_shared_memory) {
+ base::Optional<size_t> result =
+ backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
// Shared memories can only be grown in place; no copying.
- if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
- BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store,
- new_pages);
+ if (result.has_value()) {
+ BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store);
// Broadcasting the update should update this memory object too.
CHECK_NE(*old_buffer, memory_object->array_buffer());
+ size_t new_pages = result.value() + pages;
// If the allocation succeeded, then this can't possibly overflow:
size_t new_byte_length = new_pages * wasm::kWasmPageSize;
// This is a less than check, as it is not guaranteed that the SAB
@@ -914,21 +896,29 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// It is also possible that a call to Grow was in progress when
// handling this call.
CHECK_LE(new_byte_length, memory_object->array_buffer().byte_length());
- return static_cast<int32_t>(old_pages); // success
+ // As {old_pages} was read racefully, we return here the synchronized
+ // value provided by {GrowWasmMemoryInPlace}, to provide the atomic
+ // read-modify-write behavior required by the spec.
+ return static_cast<int32_t>(result.value()); // success
}
}
return -1;
}
+ base::Optional<size_t> result =
+ backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
// Try to grow non-shared memory in-place.
- if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
+ if (result.has_value()) {
// Detach old and create a new one with the grown backing store.
old_buffer->Detach(true);
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
memory_object->update_instances(isolate, new_buffer);
- return static_cast<int32_t>(old_pages); // success
+ DCHECK_EQ(result.value(), old_pages);
+ return static_cast<int32_t>(result.value()); // success
}
+
+ size_t new_pages = old_pages + pages;
// Try allocating a new backing store and copying.
std::unique_ptr<BackingStore> new_backing_store =
backing_store->CopyWasmMemory(isolate, new_pages);
@@ -1240,6 +1230,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
module_object->native_module()->jump_table_start());
instance->set_hook_on_function_call_address(
isolate->debug()->hook_on_function_call_address());
+ instance->set_managed_object_maps(*isolate->factory()->empty_fixed_array());
// Insert the new instance into the scripts weak list of instances. This list
// is used for breakpoints affecting all instances belonging to the script.
@@ -1472,7 +1463,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
- wasm::ExecutionTier::kNone);
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
wasm::WasmCode* published_code =
native_module->PublishCode(std::move(wasm_code));
isolate->counters()->wasm_generated_code_size()->Increment(
@@ -1523,27 +1514,48 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
MaybeHandle<String> WasmInstanceObject::GetGlobalNameOrNull(
Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t global_index) {
+ return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalGlobal,
+ global_index);
+}
+
+// static
+MaybeHandle<String> WasmInstanceObject::GetMemoryNameOrNull(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t global_index) {
+ return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalMemory,
+ global_index);
+}
+
+// static
+MaybeHandle<String> WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ wasm::ImportExportKindCode kind, uint32_t index) {
+ DCHECK(kind == wasm::ImportExportKindCode::kExternalGlobal ||
+ kind == wasm::ImportExportKindCode::kExternalMemory);
wasm::ModuleWireBytes wire_bytes(
instance->module_object().native_module()->wire_bytes());
// This is pair of <module_name, field_name>.
// If field_name is not set then we don't generate a name. Else if module_name
- // is set then it is imported global. Otherwise it is exported global.
+ // is set then it is an imported one. Otherwise it is an exported one.
std::pair<wasm::WireBytesRef, wasm::WireBytesRef> name_ref =
- instance->module()->global_names.Lookup(
- global_index, VectorOf(instance->module()->import_table),
- VectorOf(instance->module()->export_table));
+ instance->module()
+ ->lazily_generated_names.LookupNameFromImportsAndExports(
+ kind, index, VectorOf(instance->module()->import_table),
+ VectorOf(instance->module()->export_table));
if (!name_ref.second.is_set()) return {};
Vector<const char> field_name = wire_bytes.GetNameOrNull(name_ref.second);
if (!name_ref.first.is_set()) {
return isolate->factory()->NewStringFromUtf8(VectorOf(field_name));
}
Vector<const char> module_name = wire_bytes.GetNameOrNull(name_ref.first);
- std::string global_name;
- global_name.append(module_name.begin(), module_name.end());
- global_name.append(".");
- global_name.append(field_name.begin(), field_name.end());
- return isolate->factory()->NewStringFromUtf8(VectorOf(global_name));
+ std::string full_name;
+ full_name.append(module_name.begin(), module_name.end());
+ full_name.append(".");
+ full_name.append(field_name.begin(), field_name.end());
+ return isolate->factory()->NewStringFromUtf8(VectorOf(full_name));
}
// static
@@ -1719,6 +1731,9 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
case wasm::ValueType::kFuncRef:
case wasm::ValueType::kNullRef:
case wasm::ValueType::kExnRef:
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kEqRef:
encoded_size += 1;
break;
case wasm::ValueType::kStmt:
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 972e8d31cd..217bd50d15 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -13,7 +13,9 @@
#include "src/debug/debug.h"
#include "src/heap/heap.h"
#include "src/objects/objects.h"
+#include "src/wasm/struct-types.h"
#include "src/wasm/value-type.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -150,10 +152,6 @@ class WasmModuleObject : public JSObject {
V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<Script> script, Handle<FixedArray> export_wrappers);
- V8_EXPORT_PRIVATE static Handle<WasmModuleObject> New(
- Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
- Handle<Script> script, Handle<FixedArray> export_wrappers,
- size_t code_size_estimate);
// Check whether this module was generated from asm.js source.
inline bool is_asm_js();
@@ -389,6 +387,7 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
DECL_OPTIONAL_ACCESSORS(wasm_external_functions, FixedArray)
+ DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
@@ -448,6 +447,7 @@ class WasmInstanceObject : public JSObject {
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kExceptionsTableOffset, kTaggedSize) \
V(kWasmExternalFunctionsOffset, kTaggedSize) \
+ V(kManagedObjectMapsOffset, kTaggedSize) \
V(kRealStackLimitAddressOffset, kSystemPointerSize) \
V(kDataSegmentStartsOffset, kSystemPointerSize) \
V(kDataSegmentSizesOffset, kSystemPointerSize) \
@@ -485,7 +485,8 @@ class WasmInstanceObject : public JSObject {
kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
kExceptionsTableOffset,
- kWasmExternalFunctionsOffset};
+ kWasmExternalFunctionsOffset,
+ kManagedObjectMapsOffset};
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
@@ -572,9 +573,19 @@ class WasmInstanceObject : public JSObject {
Handle<WasmInstanceObject>,
uint32_t global_index);
+ // Get the name of a memory in the given instance by index.
+ static MaybeHandle<String> GetMemoryNameOrNull(Isolate*,
+ Handle<WasmInstanceObject>,
+ uint32_t memory_index);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
+ // Get the name in the given instance by index and kind.
+ static MaybeHandle<String> GetNameFromImportsAndExportsOrNull(
+ Isolate*, Handle<WasmInstanceObject>, wasm::ImportExportKindCode kind,
+ uint32_t index);
+
static void InitDataSegmentArrays(Handle<WasmInstanceObject>,
Handle<WasmModuleObject>);
static void InitElemSegmentArrays(Handle<WasmInstanceObject>,
@@ -831,65 +842,6 @@ class WasmDebugInfo : public Struct {
V8_EXPORT_PRIVATE static wasm::WasmInterpreter* SetupForTesting(
Handle<WasmInstanceObject>);
- // Prepare WasmDebugInfo for stepping in the given function.
- V8_EXPORT_PRIVATE static void PrepareStepIn(Handle<WasmDebugInfo>,
- int func_index);
-
- // Set a breakpoint in the given function at the given byte offset within that
- // function. This will redirect all future calls to this function to the
- // interpreter and will always pause at the given offset.
- V8_EXPORT_PRIVATE static void SetBreakpoint(Handle<WasmDebugInfo>,
- int func_index, int offset);
-
- // Clear a previously set breakpoint in the given function at the given byte
- // offset within that function.
- V8_EXPORT_PRIVATE static void ClearBreakpoint(Handle<WasmDebugInfo>,
- int func_index, int offset);
-
- // Make a set of functions always execute in the interpreter without setting
- // breakpoints.
- V8_EXPORT_PRIVATE static void RedirectToInterpreter(Handle<WasmDebugInfo>,
- Vector<int> func_indexes);
-
- void PrepareStep(StepAction);
-
- // Execute the specified function in the interpreter. Read arguments from the
- // {argument_values} vector and write to {return_values} on regular exit.
- // The frame_pointer will be used to identify the new activation of the
- // interpreter for unwinding and frame inspection.
- // Returns true if exited regularly, false if a trap occurred. In the latter
- // case, a pending exception will have been set on the isolate.
- static bool RunInterpreter(Isolate* isolate, Handle<WasmDebugInfo>,
- Address frame_pointer, int func_index,
- Vector<wasm::WasmValue> argument_values,
- Vector<wasm::WasmValue> return_values);
-
- // Get the stack of the wasm interpreter as pairs of <function index, byte
- // offset>. The list is ordered bottom-to-top, i.e. caller before callee.
- std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
- Address frame_pointer);
-
- int NumberOfActiveFrames(Address frame_pointer);
-
- V8_EXPORT_PRIVATE
- std::unique_ptr<wasm::InterpretedFrame, wasm::InterpretedFrameDeleter>
- GetInterpretedFrame(Address frame_pointer, int frame_index);
-
- // Returns the number of calls / function frames executed in the interpreter.
- V8_EXPORT_PRIVATE uint64_t NumInterpretedCalls();
-
- // Get local scope details for a specific interpreted frame. It contains
- // information about parameters, locals, and stack values.
- static Handle<JSObject> GetLocalScopeObject(Handle<WasmDebugInfo>,
- Address frame_pointer,
- int frame_index);
-
- // Get stack scope details for a specific interpreted frame. It contains
- // information about stack values.
- static Handle<JSObject> GetStackScopeObject(Handle<WasmDebugInfo>,
- Address frame_pointer,
- int frame_index);
-
V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
const wasm::FunctionSig*);
@@ -930,8 +882,8 @@ class WasmScript : public AllStatic {
V8_EXPORT_PRIVATE static bool ClearBreakPointById(Handle<Script>,
int breakpoint_id);
- static void SetBreakpointsOnNewInstance(Handle<Script>,
- Handle<WasmInstanceObject>);
+ // Remove all set breakpoints.
+ static void ClearAllBreakpoints(Script);
// Get a list of all possible breakpoints within a given range of this module.
V8_EXPORT_PRIVATE static bool GetPossibleBreakpoints(
@@ -947,9 +899,6 @@ class WasmScript : public AllStatic {
// Helper functions that update the breakpoint info list.
static void AddBreakpointToInfo(Handle<Script>, int position,
Handle<BreakPoint> break_point);
-
- static bool RemoveBreakpointFromInfo(Handle<Script>, int position,
- Handle<BreakPoint> break_point);
};
// Tags provide an object identity for each exception defined in a wasm module
@@ -989,6 +938,38 @@ class AsmWasmData : public Struct {
OBJECT_CONSTRUCTORS(AsmWasmData, Struct);
};
+class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
+ public:
+ static inline wasm::StructType* type(Map map);
+ inline wasm::StructType* type() const;
+ static inline wasm::StructType* GcSafeType(Map map);
+
+ inline ObjectSlot RawField(int raw_offset);
+
+ DECL_CAST(WasmStruct)
+ DECL_PRINTER(WasmStruct)
+
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(WasmStruct)
+};
+
+class WasmArray : public TorqueGeneratedWasmArray<WasmArray, HeapObject> {
+ public:
+ static inline wasm::ArrayType* type(Map map);
+ inline wasm::ArrayType* type() const;
+ static inline wasm::ArrayType* GcSafeType(Map map);
+
+ static inline int SizeFor(Map map, int length);
+
+ DECL_CAST(WasmArray)
+ DECL_PRINTER(WasmArray)
+
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(WasmArray)
+};
+
#undef DECL_OPTIONAL_ACCESSORS
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index 8eda9aba2e..e611ced16e 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -102,3 +102,15 @@ extern class AsmWasmData extends Struct {
export_wrappers: FixedArray;
uses_bitset: HeapNumber;
}
+
+@generateCppClass
+extern class WasmStruct extends HeapObject {
+}
+
+@generateCppClass
+extern class WasmArray extends HeapObject {
+ length: uint32;
+
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
+}
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 769eba35a1..53869e86a5 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -116,6 +116,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_REF_OP(Null, "null")
CASE_REF_OP(IsNull, "is_null")
CASE_REF_OP(Func, "func")
+ CASE_REF_OP(AsNonNull, "as_non_null")
CASE_I32_OP(ConvertI64, "wrap_i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
@@ -148,6 +149,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(CallIndirect, "call_indirect")
CASE_OP(ReturnCall, "return_call")
CASE_OP(ReturnCallIndirect, "return_call_indirect")
+ CASE_OP(BrOnNull, "br_on_null")
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(SelectWithType, "select")
@@ -321,6 +323,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I16x8_OP(BitMask, "bitmask")
CASE_I32x4_OP(BitMask, "bitmask")
+ CASE_F32x4_OP(Pmin, "pmin")
+ CASE_F32x4_OP(Pmax, "pmax")
+ CASE_F64x2_OP(Pmin, "pmin")
+ CASE_F64x2_OP(Pmax, "pmax")
+
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
@@ -335,9 +342,44 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic.xchng")
CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic.cmpxchng")
- default : return "unknown";
+ // GC operations.
+ CASE_OP(StructNew, "struct.new")
+ CASE_OP(StructNewSub, "struct.new_sub")
+ CASE_OP(StructNewDefault, "struct.new_default")
+ CASE_OP(StructGet, "struct.get")
+ CASE_OP(StructGetS, "struct.get_s")
+ CASE_OP(StructGetU, "struct.get_u")
+ CASE_OP(StructSet, "struct.set")
+ CASE_OP(ArrayNew, "array.new")
+ CASE_OP(ArrayNewSub, "array.new_sub")
+ CASE_OP(ArrayNewDefault, "array.new_default")
+ CASE_OP(ArrayGet, "array.get")
+ CASE_OP(ArrayGetS, "array.get_s")
+ CASE_OP(ArrayGetU, "array.get_u")
+ CASE_OP(ArrayLen, "array.len")
+ CASE_OP(ArraySet, "array.set")
+ CASE_OP(I31New, "i31.new")
+ CASE_OP(I31GetS, "i31.get_s")
+ CASE_OP(I31GetU, "i31.get_u")
+ CASE_OP(RttGet, "rtt.get")
+ CASE_OP(RttSub, "rtt.sub")
+ CASE_OP(RefTest, "ref.test")
+ CASE_OP(RefCast, "ref.cast")
+ CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(RefEq, "ref.eq")
+
+
+ case kNumericPrefix:
+ case kSimdPrefix:
+ case kAtomicPrefix:
+ case kGCPrefix:
+ return "unknown";
// clang-format on
}
+ // Even though the switch above handles all well-defined enum values,
+ // random modules (e.g. fuzzer generated) can call this function with
+ // random (invalid) opcodes. Handle those here:
+ return "invalid opcode";
}
#undef CASE_OP
@@ -427,6 +469,7 @@ bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
case kExprRefNull:
case kExprRefIsNull:
case kExprRefFunc:
+ case kExprRefAsNonNull:
return true;
default:
return false;
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index d5c1644824..8a17b9984e 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -37,7 +37,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(Br, 0x0c, _) \
V(BrIf, 0x0d, _) \
V(BrTable, 0x0e, _) \
- V(Return, 0x0f, _)
+ V(Return, 0x0f, _) \
+ V(BrOnNull, 0xd4, _) /* gc prototype */
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
@@ -60,7 +61,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32Const, 0x43, _) \
V(F64Const, 0x44, _) \
V(RefNull, 0xd0, _) \
- V(RefFunc, 0xd2, _)
+ V(RefFunc, 0xd2, _) \
+ V(RefAsNonNull, 0xd3, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -227,9 +229,13 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64SExtendI16, 0xc3, l_l) \
V(I64SExtendI32, 0xc4, l_l)
-#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) V(RefIsNull, 0xd1, i_r)
+#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) \
+ V(RefIsNull, 0xd1, i_r) \
+ V(RefEq, 0xd5, i_rr) // made-up opcode, guessing future spec (GC)
// For compatibility with Asm.js.
+// These opcodes are not spec'ed (or visible) externally; the idea is
+// to use unused ranges for internal purposes.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
V(F64Acos, 0xc5, d_d) \
V(F64Asin, 0xc6, d_d) \
@@ -242,9 +248,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F64Atan2, 0xcd, d_dd) \
V(F64Pow, 0xce, d_dd) \
V(F64Mod, 0xcf, d_dd) \
- V(I32AsmjsDivS, 0xd3, i_ii) \
- V(I32AsmjsDivU, 0xd4, i_ii) \
- V(I32AsmjsRemS, 0xd5, i_ii) \
+ V(I32AsmjsDivS, 0xe7, i_ii) \
+ V(I32AsmjsDivU, 0xe8, i_ii) \
+ V(I32AsmjsRemS, 0xe9, i_ii) \
V(I32AsmjsRemU, 0xd6, i_ii) \
V(I32AsmjsLoadMem8S, 0xd7, i_i) \
V(I32AsmjsLoadMem8U, 0xd8, i_i) \
@@ -265,217 +271,221 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd00, s_i) \
- V(S128StoreMem, 0xfd01, v_is) \
- V(S8x16LoadSplat, 0xfdc2, s_i) \
- V(S16x8LoadSplat, 0xfdc3, s_i) \
- V(S32x4LoadSplat, 0xfdc4, s_i) \
- V(S64x2LoadSplat, 0xfdc5, s_i) \
- V(I16x8Load8x8S, 0xfdd2, s_i) \
- V(I16x8Load8x8U, 0xfdd3, s_i) \
- V(I32x4Load16x4S, 0xfdd4, s_i) \
- V(I32x4Load16x4U, 0xfdd5, s_i) \
- V(I64x2Load32x2S, 0xfdd6, s_i) \
- V(I64x2Load32x2U, 0xfdd7, s_i)
-
-#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd03, s_ss)
+ V(I16x8Load8x8S, 0xfd01, s_i) \
+ V(I16x8Load8x8U, 0xfd02, s_i) \
+ V(I32x4Load16x4S, 0xfd03, s_i) \
+ V(I32x4Load16x4U, 0xfd04, s_i) \
+ V(I64x2Load32x2S, 0xfd05, s_i) \
+ V(I64x2Load32x2U, 0xfd06, s_i) \
+ V(S8x16LoadSplat, 0xfd07, s_i) \
+ V(S16x8LoadSplat, 0xfd08, s_i) \
+ V(S32x4LoadSplat, 0xfd09, s_i) \
+ V(S64x2LoadSplat, 0xfd0a, s_i) \
+ V(S128StoreMem, 0xfd0b, v_is)
+
+#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd0d, s_ss)
#define FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
- V(I8x16Splat, 0xfd04, s_i) \
- V(I16x8Splat, 0xfd08, s_i) \
- V(I32x4Splat, 0xfd0c, s_i) \
- V(I64x2Splat, 0xfd0f, s_l) \
- V(F32x4Splat, 0xfd12, s_f) \
- V(F64x2Splat, 0xfd15, s_d) \
- V(I8x16Eq, 0xfd18, s_ss) \
- V(I8x16Ne, 0xfd19, s_ss) \
- V(I8x16LtS, 0xfd1a, s_ss) \
- V(I8x16LtU, 0xfd1b, s_ss) \
- V(I8x16GtS, 0xfd1c, s_ss) \
- V(I8x16GtU, 0xfd1d, s_ss) \
- V(I8x16LeS, 0xfd1e, s_ss) \
- V(I8x16LeU, 0xfd1f, s_ss) \
- V(I8x16GeS, 0xfd20, s_ss) \
- V(I8x16GeU, 0xfd21, s_ss) \
- V(I16x8Eq, 0xfd22, s_ss) \
- V(I16x8Ne, 0xfd23, s_ss) \
- V(I16x8LtS, 0xfd24, s_ss) \
- V(I16x8LtU, 0xfd25, s_ss) \
- V(I16x8GtS, 0xfd26, s_ss) \
- V(I16x8GtU, 0xfd27, s_ss) \
- V(I16x8LeS, 0xfd28, s_ss) \
- V(I16x8LeU, 0xfd29, s_ss) \
- V(I16x8GeS, 0xfd2a, s_ss) \
- V(I16x8GeU, 0xfd2b, s_ss) \
- V(I32x4Eq, 0xfd2c, s_ss) \
- V(I32x4Ne, 0xfd2d, s_ss) \
- V(I32x4LtS, 0xfd2e, s_ss) \
- V(I32x4LtU, 0xfd2f, s_ss) \
- V(I32x4GtS, 0xfd30, s_ss) \
- V(I32x4GtU, 0xfd31, s_ss) \
- V(I32x4LeS, 0xfd32, s_ss) \
- V(I32x4LeU, 0xfd33, s_ss) \
- V(I32x4GeS, 0xfd34, s_ss) \
- V(I32x4GeU, 0xfd35, s_ss) \
- V(F32x4Eq, 0xfd40, s_ss) \
- V(F32x4Ne, 0xfd41, s_ss) \
- V(F32x4Lt, 0xfd42, s_ss) \
- V(F32x4Gt, 0xfd43, s_ss) \
- V(F32x4Le, 0xfd44, s_ss) \
- V(F32x4Ge, 0xfd45, s_ss) \
- V(F64x2Eq, 0xfd46, s_ss) \
- V(F64x2Ne, 0xfd47, s_ss) \
- V(F64x2Lt, 0xfd48, s_ss) \
- V(F64x2Gt, 0xfd49, s_ss) \
- V(F64x2Le, 0xfd4a, s_ss) \
- V(F64x2Ge, 0xfd4b, s_ss) \
- V(S128Not, 0xfd4c, s_s) \
- V(S128And, 0xfd4d, s_ss) \
- V(S128Or, 0xfd4e, s_ss) \
- V(S128Xor, 0xfd4f, s_ss) \
- V(S128Select, 0xfd50, s_sss) \
- V(I8x16Neg, 0xfd51, s_s) \
- V(S1x16AnyTrue, 0xfd52, i_s) \
- V(S1x16AllTrue, 0xfd53, i_s) \
- V(I8x16Shl, 0xfd54, s_si) \
- V(I8x16ShrS, 0xfd55, s_si) \
- V(I8x16ShrU, 0xfd56, s_si) \
- V(I8x16Add, 0xfd57, s_ss) \
- V(I8x16AddSaturateS, 0xfd58, s_ss) \
- V(I8x16AddSaturateU, 0xfd59, s_ss) \
- V(I8x16Sub, 0xfd5a, s_ss) \
- V(I8x16SubSaturateS, 0xfd5b, s_ss) \
- V(I8x16SubSaturateU, 0xfd5c, s_ss) \
- V(I8x16Mul, 0xfd5d, s_ss) \
- V(I8x16MinS, 0xfd5e, s_ss) \
- V(I8x16MinU, 0xfd5f, s_ss) \
- V(I8x16MaxS, 0xfd60, s_ss) \
- V(I8x16MaxU, 0xfd61, s_ss) \
- V(I16x8Neg, 0xfd62, s_s) \
- V(S1x8AnyTrue, 0xfd63, i_s) \
- V(S1x8AllTrue, 0xfd64, i_s) \
- V(I16x8Shl, 0xfd65, s_si) \
- V(I16x8ShrS, 0xfd66, s_si) \
- V(I16x8ShrU, 0xfd67, s_si) \
- V(I16x8Add, 0xfd68, s_ss) \
- V(I16x8AddSaturateS, 0xfd69, s_ss) \
- V(I16x8AddSaturateU, 0xfd6a, s_ss) \
- V(I16x8Sub, 0xfd6b, s_ss) \
- V(I16x8SubSaturateS, 0xfd6c, s_ss) \
- V(I16x8SubSaturateU, 0xfd6d, s_ss) \
- V(I16x8Mul, 0xfd6e, s_ss) \
- V(I16x8MinS, 0xfd6f, s_ss) \
- V(I16x8MinU, 0xfd70, s_ss) \
- V(I16x8MaxS, 0xfd71, s_ss) \
- V(I16x8MaxU, 0xfd72, s_ss) \
- V(I32x4Neg, 0xfd73, s_s) \
- V(S1x4AnyTrue, 0xfd74, i_s) \
- V(S1x4AllTrue, 0xfd75, i_s) \
- V(I32x4Shl, 0xfd76, s_si) \
- V(I32x4ShrS, 0xfd77, s_si) \
- V(I32x4ShrU, 0xfd78, s_si) \
- V(I32x4Add, 0xfd79, s_ss) \
- V(I32x4Sub, 0xfd7c, s_ss) \
- V(I32x4Mul, 0xfd7f, s_ss) \
- V(I32x4MinS, 0xfd80, s_ss) \
- V(I32x4MinU, 0xfd81, s_ss) \
- V(I32x4MaxS, 0xfd82, s_ss) \
- V(I32x4MaxU, 0xfd83, s_ss) \
- V(I64x2Neg, 0xfd84, s_s) \
- V(I64x2Shl, 0xfd87, s_si) \
- V(I64x2ShrS, 0xfd88, s_si) \
- V(I64x2ShrU, 0xfd89, s_si) \
- V(I64x2Add, 0xfd8a, s_ss) \
- V(I64x2Sub, 0xfd8d, s_ss) \
- V(I64x2Mul, 0xfd8c, s_ss) \
- V(F32x4Abs, 0xfd95, s_s) \
- V(F32x4Neg, 0xfd96, s_s) \
- V(F32x4Sqrt, 0xfd97, s_s) \
- V(F32x4Add, 0xfd9a, s_ss) \
- V(F32x4Sub, 0xfd9b, s_ss) \
- V(F32x4Mul, 0xfd9c, s_ss) \
- V(F32x4Div, 0xfd9d, s_ss) \
- V(F32x4Min, 0xfd9e, s_ss) \
- V(F32x4Max, 0xfd9f, s_ss) \
- V(F64x2Abs, 0xfda0, s_s) \
- V(F64x2Neg, 0xfda1, s_s) \
- V(F64x2Sqrt, 0xfda2, s_s) \
- V(F64x2Add, 0xfda5, s_ss) \
- V(F64x2Sub, 0xfda6, s_ss) \
- V(F64x2Mul, 0xfda7, s_ss) \
- V(F64x2Div, 0xfda8, s_ss) \
- V(F64x2Min, 0xfda9, s_ss) \
- V(F64x2Max, 0xfdaa, s_ss) \
- V(I32x4SConvertF32x4, 0xfdab, s_s) \
- V(I32x4UConvertF32x4, 0xfdac, s_s) \
- V(F32x4SConvertI32x4, 0xfdaf, s_s) \
- V(F32x4UConvertI32x4, 0xfdb0, s_s) \
- V(S8x16Swizzle, 0xfdc0, s_ss) \
- V(I8x16SConvertI16x8, 0xfdc6, s_ss) \
- V(I8x16UConvertI16x8, 0xfdc7, s_ss) \
- V(I16x8SConvertI32x4, 0xfdc8, s_ss) \
- V(I16x8UConvertI32x4, 0xfdc9, s_ss) \
- V(I16x8SConvertI8x16Low, 0xfdca, s_s) \
- V(I16x8SConvertI8x16High, 0xfdcb, s_s) \
- V(I16x8UConvertI8x16Low, 0xfdcc, s_s) \
- V(I16x8UConvertI8x16High, 0xfdcd, s_s) \
- V(I32x4SConvertI16x8Low, 0xfdce, s_s) \
- V(I32x4SConvertI16x8High, 0xfdcf, s_s) \
- V(I32x4UConvertI16x8Low, 0xfdd0, s_s) \
- V(I32x4UConvertI16x8High, 0xfdd1, s_s) \
- V(S128AndNot, 0xfdd8, s_ss) \
- V(I8x16RoundingAverageU, 0xfdd9, s_ss) \
- V(I16x8RoundingAverageU, 0xfdda, s_ss) \
- V(I8x16Abs, 0xfde1, s_s) \
- V(I16x8Abs, 0xfde2, s_s) \
- V(I32x4Abs, 0xfde3, s_s)
+ V(S8x16Swizzle, 0xfd0e, s_ss) \
+ V(I8x16Splat, 0xfd0f, s_i) \
+ V(I16x8Splat, 0xfd10, s_i) \
+ V(I32x4Splat, 0xfd11, s_i) \
+ V(I64x2Splat, 0xfd12, s_l) \
+ V(F32x4Splat, 0xfd13, s_f) \
+ V(F64x2Splat, 0xfd14, s_d) \
+ V(I8x16Eq, 0xfd23, s_ss) \
+ V(I8x16Ne, 0xfd24, s_ss) \
+ V(I8x16LtS, 0xfd25, s_ss) \
+ V(I8x16LtU, 0xfd26, s_ss) \
+ V(I8x16GtS, 0xfd27, s_ss) \
+ V(I8x16GtU, 0xfd28, s_ss) \
+ V(I8x16LeS, 0xfd29, s_ss) \
+ V(I8x16LeU, 0xfd2a, s_ss) \
+ V(I8x16GeS, 0xfd2b, s_ss) \
+ V(I8x16GeU, 0xfd2c, s_ss) \
+ V(I16x8Eq, 0xfd2d, s_ss) \
+ V(I16x8Ne, 0xfd2e, s_ss) \
+ V(I16x8LtS, 0xfd2f, s_ss) \
+ V(I16x8LtU, 0xfd30, s_ss) \
+ V(I16x8GtS, 0xfd31, s_ss) \
+ V(I16x8GtU, 0xfd32, s_ss) \
+ V(I16x8LeS, 0xfd33, s_ss) \
+ V(I16x8LeU, 0xfd34, s_ss) \
+ V(I16x8GeS, 0xfd35, s_ss) \
+ V(I16x8GeU, 0xfd36, s_ss) \
+ V(I32x4Eq, 0xfd37, s_ss) \
+ V(I32x4Ne, 0xfd38, s_ss) \
+ V(I32x4LtS, 0xfd39, s_ss) \
+ V(I32x4LtU, 0xfd3a, s_ss) \
+ V(I32x4GtS, 0xfd3b, s_ss) \
+ V(I32x4GtU, 0xfd3c, s_ss) \
+ V(I32x4LeS, 0xfd3d, s_ss) \
+ V(I32x4LeU, 0xfd3e, s_ss) \
+ V(I32x4GeS, 0xfd3f, s_ss) \
+ V(I32x4GeU, 0xfd40, s_ss) \
+ V(F32x4Eq, 0xfd41, s_ss) \
+ V(F32x4Ne, 0xfd42, s_ss) \
+ V(F32x4Lt, 0xfd43, s_ss) \
+ V(F32x4Gt, 0xfd44, s_ss) \
+ V(F32x4Le, 0xfd45, s_ss) \
+ V(F32x4Ge, 0xfd46, s_ss) \
+ V(F64x2Eq, 0xfd47, s_ss) \
+ V(F64x2Ne, 0xfd48, s_ss) \
+ V(F64x2Lt, 0xfd49, s_ss) \
+ V(F64x2Gt, 0xfd4a, s_ss) \
+ V(F64x2Le, 0xfd4b, s_ss) \
+ V(F64x2Ge, 0xfd4c, s_ss) \
+ V(S128Not, 0xfd4d, s_s) \
+ V(S128And, 0xfd4e, s_ss) \
+ V(S128AndNot, 0xfd4f, s_ss) \
+ V(S128Or, 0xfd50, s_ss) \
+ V(S128Xor, 0xfd51, s_ss) \
+ V(S128Select, 0xfd52, s_sss) \
+ V(I8x16Abs, 0xfd60, s_s) \
+ V(I8x16Neg, 0xfd61, s_s) \
+ V(S1x16AnyTrue, 0xfd62, i_s) \
+ V(S1x16AllTrue, 0xfd63, i_s) \
+ V(I8x16SConvertI16x8, 0xfd65, s_ss) \
+ V(I8x16UConvertI16x8, 0xfd66, s_ss) \
+ V(I8x16Shl, 0xfd6b, s_si) \
+ V(I8x16ShrS, 0xfd6c, s_si) \
+ V(I8x16ShrU, 0xfd6d, s_si) \
+ V(I8x16Add, 0xfd6e, s_ss) \
+ V(I8x16AddSaturateS, 0xfd6f, s_ss) \
+ V(I8x16AddSaturateU, 0xfd70, s_ss) \
+ V(I8x16Sub, 0xfd71, s_ss) \
+ V(I8x16SubSaturateS, 0xfd72, s_ss) \
+ V(I8x16SubSaturateU, 0xfd73, s_ss) \
+ V(I8x16MinS, 0xfd76, s_ss) \
+ V(I8x16MinU, 0xfd77, s_ss) \
+ V(I8x16MaxS, 0xfd78, s_ss) \
+ V(I8x16MaxU, 0xfd79, s_ss) \
+ V(I8x16RoundingAverageU, 0xfd7b, s_ss) \
+ V(I16x8Abs, 0xfd80, s_s) \
+ V(I16x8Neg, 0xfd81, s_s) \
+ V(S1x8AnyTrue, 0xfd82, i_s) \
+ V(S1x8AllTrue, 0xfd83, i_s) \
+ V(I16x8SConvertI32x4, 0xfd85, s_ss) \
+ V(I16x8UConvertI32x4, 0xfd86, s_ss) \
+ V(I16x8SConvertI8x16Low, 0xfd87, s_s) \
+ V(I16x8SConvertI8x16High, 0xfd88, s_s) \
+ V(I16x8UConvertI8x16Low, 0xfd89, s_s) \
+ V(I16x8UConvertI8x16High, 0xfd8a, s_s) \
+ V(I16x8Shl, 0xfd8b, s_si) \
+ V(I16x8ShrS, 0xfd8c, s_si) \
+ V(I16x8ShrU, 0xfd8d, s_si) \
+ V(I16x8Add, 0xfd8e, s_ss) \
+ V(I16x8AddSaturateS, 0xfd8f, s_ss) \
+ V(I16x8AddSaturateU, 0xfd90, s_ss) \
+ V(I16x8Sub, 0xfd91, s_ss) \
+ V(I16x8SubSaturateS, 0xfd92, s_ss) \
+ V(I16x8SubSaturateU, 0xfd93, s_ss) \
+ V(I16x8Mul, 0xfd95, s_ss) \
+ V(I16x8MinS, 0xfd96, s_ss) \
+ V(I16x8MinU, 0xfd97, s_ss) \
+ V(I16x8MaxS, 0xfd98, s_ss) \
+ V(I16x8MaxU, 0xfd99, s_ss) \
+ V(I16x8RoundingAverageU, 0xfd9b, s_ss) \
+ V(I32x4Abs, 0xfda0, s_s) \
+ V(I32x4Neg, 0xfda1, s_s) \
+ V(S1x4AnyTrue, 0xfda2, i_s) \
+ V(S1x4AllTrue, 0xfda3, i_s) \
+ V(I32x4SConvertI16x8Low, 0xfda7, s_s) \
+ V(I32x4SConvertI16x8High, 0xfda8, s_s) \
+ V(I32x4UConvertI16x8Low, 0xfda9, s_s) \
+ V(I32x4UConvertI16x8High, 0xfdaa, s_s) \
+ V(I32x4Shl, 0xfdab, s_si) \
+ V(I32x4ShrS, 0xfdac, s_si) \
+ V(I32x4ShrU, 0xfdad, s_si) \
+ V(I32x4Add, 0xfdae, s_ss) \
+ V(I32x4Sub, 0xfdb1, s_ss) \
+ V(I32x4Mul, 0xfdb5, s_ss) \
+ V(I32x4MinS, 0xfdb6, s_ss) \
+ V(I32x4MinU, 0xfdb7, s_ss) \
+ V(I32x4MaxS, 0xfdb8, s_ss) \
+ V(I32x4MaxU, 0xfdb9, s_ss) \
+ V(I64x2Neg, 0xfdc1, s_s) \
+ V(I64x2Shl, 0xfdcb, s_si) \
+ V(I64x2ShrS, 0xfdcc, s_si) \
+ V(I64x2ShrU, 0xfdcd, s_si) \
+ V(I64x2Add, 0xfdce, s_ss) \
+ V(I64x2Sub, 0xfdd1, s_ss) \
+ V(I64x2Mul, 0xfdd5, s_ss) \
+ V(F32x4Abs, 0xfde0, s_s) \
+ V(F32x4Neg, 0xfde1, s_s) \
+ V(F32x4Sqrt, 0xfde3, s_s) \
+ V(F32x4Add, 0xfde4, s_ss) \
+ V(F32x4Sub, 0xfde5, s_ss) \
+ V(F32x4Mul, 0xfde6, s_ss) \
+ V(F32x4Div, 0xfde7, s_ss) \
+ V(F32x4Min, 0xfde8, s_ss) \
+ V(F32x4Max, 0xfde9, s_ss) \
+ V(F64x2Abs, 0xfdec, s_s) \
+ V(F64x2Neg, 0xfded, s_s) \
+ V(F64x2Sqrt, 0xfdef, s_s) \
+ V(F64x2Add, 0xfdf0, s_ss) \
+ V(F64x2Sub, 0xfdf1, s_ss) \
+ V(F64x2Mul, 0xfdf2, s_ss) \
+ V(F64x2Div, 0xfdf3, s_ss) \
+ V(F64x2Min, 0xfdf4, s_ss) \
+ V(F64x2Max, 0xfdf5, s_ss) \
+ V(I32x4SConvertF32x4, 0xfdf8, s_s) \
+ V(I32x4UConvertF32x4, 0xfdf9, s_s) \
+ V(F32x4SConvertI32x4, 0xfdfa, s_s) \
+ V(F32x4UConvertI32x4, 0xfdfb, s_s)
#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
- V(I64x2Eq, 0xfd36, s_ss) \
- V(I64x2Ne, 0xfd37, s_ss) \
- V(I64x2LtS, 0xfd38, s_ss) \
- V(I64x2LtU, 0xfd39, s_ss) \
- V(I64x2GtS, 0xfd3a, s_ss) \
- V(I64x2GtU, 0xfd3b, s_ss) \
- V(I64x2LeS, 0xfd3c, s_ss) \
- V(I64x2LeU, 0xfd3d, s_ss) \
- V(I64x2GeS, 0xfd3e, s_ss) \
- V(I64x2GeU, 0xfd3f, s_ss) \
- V(S1x2AnyTrue, 0xfd85, i_s) \
- V(S1x2AllTrue, 0xfd86, i_s) \
- V(I64x2MinS, 0xfd8e, s_ss) \
- V(I64x2MinU, 0xfd8f, s_ss) \
- V(I64x2MaxS, 0xfd90, s_ss) \
- V(I64x2MaxU, 0xfd91, s_ss) \
- V(F32x4Qfma, 0xfd98, s_sss) \
- V(F32x4Qfms, 0xfd99, s_sss) \
- V(F64x2Qfma, 0xfda3, s_sss) \
- V(F64x2Qfms, 0xfda4, s_sss) \
- V(I16x8AddHoriz, 0xfdbd, s_ss) \
- V(I32x4AddHoriz, 0xfdbe, s_ss) \
- V(F32x4AddHoriz, 0xfdbf, s_ss) \
- V(I8x16BitMask, 0xfde4, i_s) \
- V(I16x8BitMask, 0xfde5, i_s) \
- V(I32x4BitMask, 0xfde6, i_s) \
- V(F32x4RecipApprox, 0xfdee, s_s) \
- V(F32x4RecipSqrtApprox, 0xfdef, s_s)
+ V(I8x16Mul, 0xfd75, s_ss) \
+ V(I8x16BitMask, 0xfd64, i_s) \
+ V(I16x8BitMask, 0xfd84, i_s) \
+ V(I32x4BitMask, 0xfda4, i_s) \
+ V(S1x2AnyTrue, 0xfdc2, i_s) \
+ V(S1x2AllTrue, 0xfdc3, i_s) \
+ V(I64x2Eq, 0xfdc0, s_ss) \
+ V(I64x2Ne, 0xfdc4, s_ss) \
+ V(I64x2LtS, 0xfdc5, s_ss) \
+ V(I64x2LtU, 0xfdc6, s_ss) \
+ V(I64x2GtS, 0xfdc7, s_ss) \
+ V(I64x2GtU, 0xfdc8, s_ss) \
+ V(I64x2LeS, 0xfdc9, s_ss) \
+ V(I64x2LeU, 0xfdca, s_ss) \
+ V(I64x2GeS, 0xfdcf, s_ss) \
+ V(I64x2GeU, 0xfdd0, s_ss) \
+ V(I64x2MinS, 0xfdd6, s_ss) \
+ V(I64x2MinU, 0xfdd7, s_ss) \
+ V(I64x2MaxS, 0xfdd8, s_ss) \
+ V(I64x2MaxU, 0xfdd9, s_ss) \
+ V(F32x4Qfma, 0xfdfc, s_sss) \
+ V(F32x4Qfms, 0xfdfd, s_sss) \
+ V(F64x2Qfma, 0xfdfe, s_sss) \
+ V(F64x2Qfms, 0xfdff, s_sss) \
+ V(I16x8AddHoriz, 0xfdaf, s_ss) \
+ V(I32x4AddHoriz, 0xfdb0, s_ss) \
+ V(F32x4AddHoriz, 0xfdb2, s_ss) \
+ V(F32x4RecipApprox, 0xfdb3, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfdba, s_s) \
+ V(F32x4Pmin, 0xfdda, s_ss) \
+ V(F32x4Pmax, 0xfddb, s_ss) \
+ V(F64x2Pmin, 0xfddc, s_ss) \
+ V(F64x2Pmax, 0xfddd, s_ss)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
- V(I8x16ExtractLaneS, 0xfd05, _) \
- V(I8x16ExtractLaneU, 0xfd06, _) \
- V(I16x8ExtractLaneS, 0xfd09, _) \
- V(I16x8ExtractLaneU, 0xfd0a, _) \
- V(I32x4ExtractLane, 0xfd0d, _) \
- V(I64x2ExtractLane, 0xfd10, _) \
- V(F32x4ExtractLane, 0xfd13, _) \
- V(F64x2ExtractLane, 0xfd16, _)
+ V(I8x16ExtractLaneS, 0xfd15, _) \
+ V(I8x16ExtractLaneU, 0xfd16, _) \
+ V(I16x8ExtractLaneS, 0xfd18, _) \
+ V(I16x8ExtractLaneU, 0xfd19, _) \
+ V(I32x4ExtractLane, 0xfd1b, _) \
+ V(I64x2ExtractLane, 0xfd1d, _) \
+ V(F32x4ExtractLane, 0xfd1f, _) \
+ V(F64x2ExtractLane, 0xfd21, _)
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
- V(I8x16ReplaceLane, 0xfd07, _) \
- V(I16x8ReplaceLane, 0xfd0b, _) \
- V(I32x4ReplaceLane, 0xfd0e, _) \
- V(I64x2ReplaceLane, 0xfd11, _) \
- V(F32x4ReplaceLane, 0xfd14, _) \
- V(F64x2ReplaceLane, 0xfd17, _)
+ V(I8x16ReplaceLane, 0xfd17, _) \
+ V(I16x8ReplaceLane, 0xfd1a, _) \
+ V(I32x4ReplaceLane, 0xfd1c, _) \
+ V(I64x2ReplaceLane, 0xfd1e, _) \
+ V(F32x4ReplaceLane, 0xfd20, _) \
+ V(F64x2ReplaceLane, 0xfd22, _)
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
FOREACH_SIMD_MVP_0_OPERAND_OPCODE(V) \
@@ -574,6 +584,33 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
+// Opcode values are guesswork for now, see:
+// https://docs.google.com/document/d/1DklC3qVuOdLHSXB5UXghM_syCh-4cMinQ50ICiXnK3Q/edit
+#define FOREACH_GC_OPCODE(V) \
+ V(StructNew, 0xfb00, _) \
+ V(StructNewSub, 0xfb01, _) \
+ V(StructNewDefault, 0xfb02, _) \
+ V(StructGet, 0xfb03, _) \
+ V(StructGetS, 0xfb04, _) \
+ V(StructGetU, 0xfb05, _) \
+ V(StructSet, 0xfb06, _) \
+ V(ArrayNew, 0xfb10, _) \
+ V(ArrayNewSub, 0xfb11, _) \
+ V(ArrayNewDefault, 0xfb12, _) \
+ V(ArrayGet, 0xfb13, _) \
+ V(ArrayGetS, 0xfb14, _) \
+ V(ArrayGetU, 0xfb15, _) \
+ V(ArraySet, 0xfb16, _) \
+ V(ArrayLen, 0xfb17, _) \
+ V(I31New, 0xfb20, _) \
+ V(I31GetS, 0xfb21, _) \
+ V(I31GetU, 0xfb22, _) \
+ V(RttGet, 0xfb30, _) \
+ V(RttSub, 0xfb31, _) \
+ V(RefTest, 0xfb40, _) \
+ V(RefCast, 0xfb41, _) \
+ V(BrOnCast, 0xfb42, _)
+
#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
/* AtomicFence does not target a particular linear memory. */ \
V(AtomicFence, 0xfe03, v_v)
@@ -594,7 +631,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V) \
FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
- FOREACH_NUMERIC_OPCODE(V)
+ FOREACH_NUMERIC_OPCODE(V) \
+ FOREACH_GC_OPCODE(V)
// All signatures.
#define FOREACH_SIGNATURE(V) \
@@ -637,7 +675,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32)
+ V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32) \
+ V(i_rr, kWasmI32, kWasmEqRef, kWasmEqRef)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
@@ -654,7 +693,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
#define FOREACH_PREFIX(V) \
V(Numeric, 0xfc) \
V(Simd, 0xfd) \
- V(Atomic, 0xfe)
+ V(Atomic, 0xfe) \
+ V(GC, 0xfb)
enum WasmOpcode {
// Declare expression opcodes.
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 773a709721..8df5d4c88e 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -10,7 +10,6 @@
#include "src/objects/objects.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
-#include "src/snapshot/serializer-common.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
#include "src/utils/version.h"
@@ -301,8 +300,7 @@ NativeModuleSerializer::NativeModuleSerializer(
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(bool);
- DCHECK(code->kind() == WasmCode::kFunction ||
- code->kind() == WasmCode::kInterpreterEntry);
+ DCHECK_EQ(WasmCode::kFunction, code->kind());
return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() +
code->protected_instructions_data().size();
@@ -330,8 +328,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
return;
}
writer->Write(true);
- DCHECK(code->kind() == WasmCode::kFunction ||
- code->kind() == WasmCode::kInterpreterEntry);
+ DCHECK_EQ(WasmCode::kFunction, code->kind());
// Write the size of the entire code section, followed by the code header.
writer->Write(code->constant_pool_offset());
writer->Write(code->safepoint_table_offset());
@@ -611,9 +608,6 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
if (decode_result.failed()) return {};
std::shared_ptr<WasmModule> module = std::move(decode_result.value());
CHECK_NOT_NULL(module);
- Handle<Script> script = CreateWasmScript(isolate, wire_bytes_vec,
- VectorOf(module->source_map_url),
- module->name, source_url);
auto shared_native_module = wasm_engine->MaybeGetNativeModule(
module->origin, wire_bytes_vec, isolate);
@@ -641,6 +635,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
CompileJsToWasmWrappers(isolate, shared_native_module->module(),
&export_wrappers);
+ Handle<Script> script =
+ wasm_engine->GetOrCreateScript(isolate, shared_native_module, source_url);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
isolate, std::move(shared_native_module), script, export_wrappers);
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index 8434a2e14b..24a143d069 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -32,6 +32,12 @@ inline const char* ExecutionTierToString(ExecutionTier tier) {
}
}
+// {kForDebugging} is used for default tiered-down code (potentially with
+// breakpoints), {kForStepping} is code that is flooded with breakpoints.
+enum ForDebugging : int8_t { kNoDebugging = 0, kForDebugging, kForStepping };
+
+enum TieringState : int8_t { kTieredUp, kTieredDown };
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 1ad433bb23..127375f26f 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -82,4 +82,9 @@
'octane/typescript': [SKIP],
}], # 'predictable'
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 89fe36f65b..2c9363130a 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -35,6 +35,14 @@ v8_executable("cctest") {
ldflags = []
+ if (v8_use_perfetto) {
+ deps += [
+ # TODO(skyostil): Switch the test to use protozero.
+ "//third_party/perfetto/protos/perfetto/trace/interned_data:lite",
+ "//third_party/perfetto/protos/perfetto/trace/track_event:lite",
+ ]
+ }
+
# TODO(machenbach): Translate from gyp.
#["OS=="aix"", {
# "ldflags": [ "-Wl,-bbigtoc" ],
@@ -125,6 +133,7 @@ v8_source_set("cctest_sources") {
"heap/test-alloc.cc",
"heap/test-array-buffer-tracker.cc",
"heap/test-compaction.cc",
+ "heap/test-concurrent-allocation.cc",
"heap/test-concurrent-marking.cc",
"heap/test-embedder-tracing.cc",
"heap/test-external-string-tracker.cc",
@@ -169,6 +178,7 @@ v8_source_set("cctest_sources") {
"test-allocation.cc",
"test-api-accessors.cc",
"test-api-array-buffer.cc",
+ "test-api-icu.cc",
"test-api-interceptors.cc",
"test-api-stack-traces.cc",
"test-api-typed-array.cc",
@@ -228,6 +238,7 @@ v8_source_set("cctest_sources") {
"test-object.cc",
"test-orderedhashtable.cc",
"test-parsing.cc",
+ "test-persistent-handles.cc",
"test-platform.cc",
"test-profile-generator.cc",
"test-random-number-generator.cc",
@@ -264,6 +275,7 @@ v8_source_set("cctest_sources") {
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
"wasm/test-compilation-cache.cc",
+ "wasm/test-gc.cc",
"wasm/test-grow-memory.cc",
"wasm/test-jump-table-assembler.cc",
"wasm/test-liftoff-inspection.cc",
@@ -278,6 +290,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd-liftoff.cc",
+ "wasm/test-run-wasm-simd-scalar-lowering.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm.cc",
"wasm/test-streaming-compilation.cc",
@@ -286,7 +299,6 @@ v8_source_set("cctest_sources") {
"wasm/test-wasm-debug-evaluate.cc",
"wasm/test-wasm-debug-evaluate.h",
"wasm/test-wasm-import-wrapper-cache.cc",
- "wasm/test-wasm-interpreter-entry.cc",
"wasm/test-wasm-serialization.cc",
"wasm/test-wasm-shared-engine.cc",
"wasm/test-wasm-stack.cc",
@@ -375,6 +387,11 @@ v8_source_set("cctest_sources") {
]
}
+ if (v8_use_perfetto) {
+ # Perfetto doesn't use TraceObject.
+ sources -= [ "test-trace-event.cc" ]
+ }
+
configs = [
"../..:external_config",
"../..:internal_config_base",
@@ -432,10 +449,8 @@ v8_source_set("cctest_sources") {
if (v8_use_perfetto) {
deps += [
- "//third_party/perfetto/include/perfetto/tracing",
- "//third_party/perfetto/protos/perfetto/trace/chrome:lite",
- "//third_party/perfetto/protos/perfetto/trace/chrome:zero",
- "//third_party/perfetto/src/tracing:in_process_backend",
+ # TODO(skyostil): Migrate to protozero.
+ "//third_party/perfetto/protos/perfetto/trace:lite",
]
}
}
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 6adf2041cf..09e390a693 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -40,7 +40,7 @@
#include "test/cctest/trace-extension.h"
#ifdef V8_USE_PERFETTO
-#include "perfetto/tracing.h"
+#include "src/tracing/trace-event.h"
#endif // V8_USE_PERFETTO
#if V8_OS_WIN
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index e503b51914..15b0f6adf1 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -704,6 +704,12 @@ class TestPlatform : public v8::Platform {
old_platform_->CallDelayedOnWorkerThread(std::move(task), delay_in_seconds);
}
+ std::unique_ptr<v8::JobHandle> PostJob(
+ v8::TaskPriority priority,
+ std::unique_ptr<v8::JobTask> job_task) override {
+ return old_platform_->PostJob(priority, std::move(job_task));
+ }
+
double MonotonicallyIncreasingTime() override {
return old_platform_->MonotonicallyIncreasingTime();
}
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 5862e34108..73fdad69fe 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -236,7 +236,7 @@
'test-cpu-profiler/FunctionCallSample': [SKIP],
# BUG(5920): Flaky crash.
- 'test-serialize/PartialSerializerContext': [PASS, ['arch == x64 and mode == debug', SKIP]],
+ 'test-serialize/ContextSerializerContext': [PASS, ['arch == x64 and mode == debug', SKIP]],
# BUG(10107): Failing flakily
'test-cpu-profiler/Inlining2': ['arch == ia32 and mode == debug', SKIP],
@@ -246,8 +246,8 @@
##############################################################################
['system == windows and arch == x64 and mode == debug', {
# BUG(v8:6328).
- 'test-serialize/PartialSerializerCustomContext': [SKIP],
- 'test-serialize/PartialSerializerObject': [SKIP],
+ 'test-serialize/ContextSerializerCustomContext': [SKIP],
+ 'test-serialize/ContextSerializerObject': [SKIP],
'test-serialize/StartupSerializerOnce': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
@@ -370,7 +370,6 @@
'test-run-wasm-module/Run_WasmModule_CallAdd' : [SKIP],
'test-run-wasm-module/Run_WasmModule_CallMain_recursive' : [SKIP],
# TODO(ppc): Implement load/store reverse byte instructions
- 'test-run-wasm-simd/RunWasmCompiled_SimdLoadStoreLoad': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_turbofan': [SKIP],
@@ -393,6 +392,8 @@
# Liftoff is not currently supported on ppc and s390
'test-liftoff-*': [SKIP],
+ 'test-wasm-breakpoints/*' : [SKIP],
+ 'test-wasm-debug-evaluate/*': [SKIP],
# SIMD not fully implemented yet
'test-run-wasm-simd-liftoff/*': [SKIP],
@@ -410,9 +411,12 @@
}], # variant == stress_incremental_marking
##############################################################################
-# The test relies on deterministic compilation.
['variant == stress_js_bg_compile_wasm_code_gc', {
+ # The test relies on deterministic compilation.
'test-compiler/DecideToPretenureDuringCompilation': [SKIP],
+
+ # The test relies on deterministic allocation during compilation.
+ 'test-compiler/DeepEagerCompilationPeakMemory': [SKIP],
}], # variant == stress_js_bg_compile_wasm_code_gc
##############################################################################
@@ -465,6 +469,7 @@
'test-c-wasm-entry/*': [SKIP],
'test-compilation-cache/*': [SKIP],
'test-jump-table-assembler/*': [SKIP],
+ 'test-gc/*': [SKIP],
'test-grow-memory/*': [SKIP],
'test-run-wasm-64/*': [SKIP],
'test-run-wasm-asmjs/*': [SKIP],
@@ -478,13 +483,13 @@
'test-run-wasm-module/*': [SKIP],
'test-run-wasm-sign-extension/*': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
+ 'test-run-wasm-simd-scalar-lowering/*': [SKIP],
'test-run-wasm-simd/*': [SKIP],
'test-streaming-compilation/*': [SKIP],
'test-wasm-breakpoints/*': [SKIP],
'test-wasm-codegen/*': [SKIP],
'test-wasm-debug-evaluate/*': [SKIP],
'test-wasm-import-wrapper-cache/*': [SKIP],
- 'test-wasm-interpreter-entry/*': [SKIP],
'test-wasm-serialization/*': [SKIP],
'test-wasm-shared-engine/*': [SKIP],
'test-wasm-stack/*': [SKIP],
@@ -615,6 +620,21 @@
'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [SKIP],
'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
+ # Turboprop doesn't use call feedback and hence doesn't inline even if
+ # the inlining flag is explicitly set.
+ 'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
+ 'serializer-tester/BoundFunctionArguments': [SKIP],
+ 'serializer-tester/BoundFunctionTarget': [SKIP],
}], # variant == turboprop
+##############################################################################
+['no_i18n == True', {
+ 'test-regexp/UnicodePropertyEscapeCodeSize': [SKIP],
+}], # no_i18n == True
+
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index f46c6c5793..36ab68533b 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -179,6 +179,9 @@ TEST(ReduceJSLoadContext0) {
CHECK(match.HasValue());
CHECK_EQ(*expected, *match.Value());
}
+
+ // Clean up so that verifiers don't complain.
+ native->set(slot, Smi::zero());
}
TEST(ReduceJSLoadContext1) {
@@ -477,6 +480,9 @@ TEST(ReduceJSStoreContext0) {
CHECK_EQ(0, static_cast<int>(access.depth()));
CHECK_EQ(false, access.immutable());
}
+
+ // Clean up so that verifiers don't complain.
+ native->set(slot, Smi::zero());
}
TEST(ReduceJSStoreContext1) {
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 75899aeaac..f08ff023f2 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -104,6 +104,7 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
+ // TODO(bbudge) Add tests for FP registers.
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = Builtins::CallableFor(isolate, Builtins::kToNumber);
@@ -116,9 +117,36 @@ TEST(TestLinkageStubCall) {
CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
CHECK_EQ(false, call_descriptor->IsJSFunctionCall());
+
+ CHECK_EQ(call_descriptor->GetParameterType(0), MachineType::AnyTagged());
+ CHECK_EQ(call_descriptor->GetReturnType(0), MachineType::AnyTagged());
// TODO(titzer): test linkage creation for outgoing stub calls.
}
+TEST(TestFPLinkageStubCall) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Callable callable =
+ Builtins::CallableFor(isolate, Builtins::kWasmFloat64ToNumber);
+ OptimizedCompilationInfo info(ArrayVector("test"), &zone, Code::STUB);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ &zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoProperties);
+ CHECK(call_descriptor);
+ CHECK_EQ(0, static_cast<int>(call_descriptor->StackParameterCount()));
+ CHECK_EQ(1, static_cast<int>(call_descriptor->ParameterCount()));
+ CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
+ CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
+ CHECK_EQ(false, call_descriptor->IsJSFunctionCall());
+
+ CHECK_EQ(call_descriptor->GetInputType(1), MachineType::Float64());
+ CHECK(call_descriptor->GetInputLocation(1).IsRegister());
+ CHECK_EQ(call_descriptor->GetReturnType(0), MachineType::AnyTagged());
+ CHECK(call_descriptor->GetReturnLocation(0).IsRegister());
+ CHECK_EQ(call_descriptor->GetReturnLocation(0).GetLocation(),
+ kReturnRegister0.code());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index a612a6432d..cda606ec16 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -248,7 +248,7 @@ class ValueHelper {
return ArrayVector(uint32_array);
}
- static constexpr Vector<const int32_t> int32_vector() {
+ static Vector<const int32_t> int32_vector() {
return Vector<const int32_t>::cast(uint32_vector());
}
@@ -282,7 +282,7 @@ class ValueHelper {
return ArrayVector(uint64_array);
}
- static constexpr Vector<const int64_t> int64_vector() {
+ static Vector<const int64_t> int64_vector() {
return Vector<const int64_t>::cast(uint64_vector());
}
@@ -293,7 +293,7 @@ class ValueHelper {
return ArrayVector(int16_array);
}
- static constexpr Vector<const uint16_t> uint16_vector() {
+ static Vector<const uint16_t> uint16_vector() {
return Vector<const uint16_t>::cast(int16_vector());
}
@@ -304,7 +304,7 @@ class ValueHelper {
return ArrayVector(int8_array);
}
- static constexpr Vector<const uint8_t> uint8_vector() {
+ static Vector<const uint8_t> uint8_vector() {
return Vector<const uint8_t>::cast(ArrayVector(int8_array));
}
@@ -317,41 +317,41 @@ class ValueHelper {
}
template <typename T>
- static constexpr Vector<const T> GetVector();
+ static inline Vector<const T> GetVector();
};
template <>
-constexpr Vector<const int8_t> ValueHelper::GetVector() {
+inline Vector<const int8_t> ValueHelper::GetVector() {
return int8_vector();
}
template <>
-constexpr Vector<const uint8_t> ValueHelper::GetVector() {
+inline Vector<const uint8_t> ValueHelper::GetVector() {
return uint8_vector();
}
template <>
-constexpr Vector<const int16_t> ValueHelper::GetVector() {
+inline Vector<const int16_t> ValueHelper::GetVector() {
return int16_vector();
}
template <>
-constexpr Vector<const uint16_t> ValueHelper::GetVector() {
+inline Vector<const uint16_t> ValueHelper::GetVector() {
return uint16_vector();
}
template <>
-constexpr Vector<const int32_t> ValueHelper::GetVector() {
+inline Vector<const int32_t> ValueHelper::GetVector() {
return int32_vector();
}
template <>
-constexpr Vector<const uint32_t> ValueHelper::GetVector() {
+inline Vector<const uint32_t> ValueHelper::GetVector() {
return uint32_vector();
}
template <>
-constexpr Vector<const int64_t> ValueHelper::GetVector() {
+inline Vector<const int64_t> ValueHelper::GetVector() {
return int64_vector();
}
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 15f9c2d89f..0abc075063 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-chunk.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index d5a5df9cf2..e836f37db5 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -6,7 +6,8 @@
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
new file mode 100644
index 0000000000..6d77460bd8
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -0,0 +1,100 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "src/api/api.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/common/globals.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/local-handles-inl.h"
+#include "src/handles/persistent-handles.h"
+#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/safepoint.h"
+#include "src/objects/heap-number.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+const int kNumIterations = 2000;
+const int kObjectSize = 10 * kTaggedSize;
+const int kLargeObjectSize = 8 * KB;
+
+class ConcurrentAllocationThread final : public v8::base::Thread {
+ public:
+ explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ pending_(pending) {}
+
+ void Run() override {
+ LocalHeap local_heap(heap_);
+ ConcurrentAllocator* allocator = local_heap.old_space_allocator();
+
+ for (int i = 0; i < kNumIterations; i++) {
+ Address address = allocator->AllocateOrFail(
+ kObjectSize, AllocationAlignment::kWordAligned,
+ AllocationOrigin::kRuntime);
+ heap_->CreateFillerObjectAt(address, kObjectSize,
+ ClearRecordedSlots::kNo);
+ address = allocator->AllocateOrFail(kLargeObjectSize,
+ AllocationAlignment::kWordAligned,
+ AllocationOrigin::kRuntime);
+ heap_->CreateFillerObjectAt(address, kLargeObjectSize,
+ ClearRecordedSlots::kNo);
+ if (i % 10 == 0) {
+ local_heap.Safepoint();
+ }
+ }
+
+ pending_->fetch_sub(1);
+ }
+
+ Heap* heap_;
+ std::atomic<int>* pending_;
+};
+
+UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
+ FLAG_max_old_space_size = 32;
+ FLAG_concurrent_allocation = true;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ FLAG_local_heaps = true;
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+
+ const int kThreads = 4;
+
+ std::atomic<int> pending(kThreads);
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread = std::make_unique<ConcurrentAllocationThread>(
+ i_isolate->heap(), &pending);
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ while (pending > 0) {
+ v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(), isolate);
+ }
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+
+ isolate->Dispose();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 37b31c2c33..3933c6ec46 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -281,7 +281,8 @@ TEST(GarbageCollectionForTesting) {
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
int saved_gc_counter = i_isolate->heap()->gc_count();
- tracer.GarbageCollectionForTesting(EmbedderHeapTracer::kUnknown);
+ tracer.GarbageCollectionForTesting(
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
CHECK_GT(i_isolate->heap()->gc_count(), saved_gc_counter);
}
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index d181f764f8..b9a4b2101c 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+
#include <utility>
#include "src/api/api-inl.h"
@@ -42,8 +43,9 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/memory-reducer.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/ic/ic.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/elements.h"
@@ -5051,7 +5053,8 @@ TEST(Regress3877) {
CHECK(weak_prototype_holder->Get(0)->IsCleared());
}
-Handle<WeakFixedArray> AddRetainedMap(Isolate* isolate, Heap* heap) {
+Handle<WeakFixedArray> AddRetainedMap(Isolate* isolate,
+ Handle<NativeContext> context) {
HandleScope inner_scope(isolate);
Handle<Map> map = Map::Create(isolate, 1);
v8::Local<v8::Value> result =
@@ -5059,18 +5062,24 @@ Handle<WeakFixedArray> AddRetainedMap(Isolate* isolate, Heap* heap) {
Handle<JSReceiver> proto =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Map::SetPrototype(isolate, map, proto);
- heap->AddRetainedMap(map);
+ isolate->heap()->AddRetainedMap(context, map);
Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(1);
array->Set(0, HeapObjectReference::Weak(*map));
return inner_scope.CloseAndEscape(array);
}
-
void CheckMapRetainingFor(int n) {
FLAG_retain_maps_for_n_gc = n;
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- Handle<WeakFixedArray> array_with_map = AddRetainedMap(isolate, heap);
+ v8::Local<v8::Context> ctx = v8::Context::New(CcTest::isolate());
+ Handle<Context> context = Utils::OpenHandle(*ctx);
+ CHECK(context->IsNativeContext());
+ Handle<NativeContext> native_context = Handle<NativeContext>::cast(context);
+
+ ctx->Enter();
+ Handle<WeakFixedArray> array_with_map =
+ AddRetainedMap(isolate, native_context);
CHECK(array_with_map->Get(0)->IsWeak());
for (int i = 0; i < n; i++) {
heap::SimulateIncrementalMarking(heap);
@@ -5080,6 +5089,8 @@ void CheckMapRetainingFor(int n) {
heap::SimulateIncrementalMarking(heap);
CcTest::CollectGarbage(OLD_SPACE);
CHECK(array_with_map->Get(0)->IsCleared());
+
+ ctx->Exit();
}
@@ -5094,6 +5105,30 @@ TEST(MapRetaining) {
CheckMapRetainingFor(7);
}
+TEST(RetainedMapsCleanup) {
+ if (!FLAG_incremental_marking) return;
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ v8::Local<v8::Context> ctx = v8::Context::New(CcTest::isolate());
+ Handle<Context> context = Utils::OpenHandle(*ctx);
+ CHECK(context->IsNativeContext());
+ Handle<NativeContext> native_context = Handle<NativeContext>::cast(context);
+
+ ctx->Enter();
+ Handle<WeakFixedArray> array_with_map =
+ AddRetainedMap(isolate, native_context);
+ CHECK(array_with_map->Get(0)->IsWeak());
+ heap->NotifyContextDisposed(true);
+ CcTest::CollectAllGarbage();
+ ctx->Exit();
+
+ CHECK_EQ(ReadOnlyRoots(heap).empty_weak_array_list(),
+ native_context->retained_maps());
+}
+
TEST(PreprocessStackTrace) {
// Do not automatically trigger early GC.
FLAG_gc_interval = -1;
@@ -6446,7 +6481,6 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
v8::Context::Scope context_scope(context);
}
isolate->Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
}
}
@@ -6933,6 +6967,41 @@ TEST(NoCodeRangeInJitlessMode) {
CcTest::i_isolate()->heap()->memory_allocator()->code_range().is_empty());
}
+TEST(Regress978156) {
+ if (!FLAG_incremental_marking) return;
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+
+ HandleScope handle_scope(CcTest::i_isolate());
+ Heap* heap = CcTest::i_isolate()->heap();
+
+ // 1. Ensure that the new space is empty.
+ CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectGarbage(NEW_SPACE);
+ // 2. Fill the first page of the new space with FixedArrays.
+ std::vector<Handle<FixedArray>> arrays;
+ i::heap::FillCurrentPage(heap->new_space(), &arrays);
+ // 3. Trim the last array by one word thus creating a one-word filler.
+ Handle<FixedArray> last = arrays.back();
+ CHECK_GT(last->length(), 0);
+ heap->RightTrimFixedArray(*last, 1);
+ // 4. Get the last filler on the page.
+ HeapObject filler = HeapObject::FromAddress(
+ MemoryChunk::FromHeapObject(*last)->area_end() - kTaggedSize);
+ HeapObject::FromAddress(last->address() + last->Size());
+ CHECK(filler.IsFiller());
+ // 5. Start incremental marking.
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (marking->IsStopped()) {
+ marking->Start(i::GarbageCollectionReason::kTesting);
+ }
+ IncrementalMarking::MarkingState* marking_state = marking->marking_state();
+ // 6. Mark the filler black to access its two markbits. This triggers
+ // an out-of-bounds access of the marking bitmap in a bad case.
+ marking_state->WhiteToGrey(filler);
+ marking_state->GreyToBlack(filler);
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index 67e5c0d48e..c597fe839d 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -4,12 +4,12 @@
#include <stdlib.h>
-#include "src/init/v8.h"
-
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
+#include "src/heap/memory-chunk.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index eb91a5e671..6b1fe1dbdc 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -30,6 +30,8 @@
#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/heap/factory.h"
+#include "src/heap/large-spaces.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/free-space.h"
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 8701e50592..a055e87822 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -208,9 +208,9 @@ snippet: "
async function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 19
+frame size: 18
parameter count: 1
-bytecode array length: 361
+bytecode array length: 341
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -230,7 +230,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(4),
B(Mov), R(8), R(5),
- B(JumpConstant), U8(15),
+ B(Jump), U8(247),
/* 36 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
B(Star), R(10),
B(GetIterator), R(10), U8(1), U8(3),
@@ -285,34 +285,26 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(13),
B(Ldar), R(10),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(15),
B(LdaNamedProperty), R(9), U8(10), U8(13),
- B(Star), R(15),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(16),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(17),
- B(LdaConstant), U8(11),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
- B(Throw),
- B(CallProperty0), R(15), R(9), U8(15),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(16),
+ B(CallProperty0), R(16), R(9), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(17),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(17), U8(1),
B(Jump), U8(12),
- B(Star), R(16),
+ B(Star), R(15),
B(LdaZero),
B(TestReferenceEqual), R(11),
B(JumpIfTrue), U8(5),
- B(Ldar), R(16),
+ B(Ldar), R(15),
B(ReThrow),
B(Ldar), R(13),
B(SetPendingMessage),
B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
B(Jump), U8(14),
B(Ldar), R(12),
B(ReThrow),
@@ -326,7 +318,7 @@ bytecodes: [
B(Star), R(4),
B(Jump), U8(41),
B(Star), R(8),
- B(CreateCatchContext), R(8), U8(14),
+ B(CreateCatchContext), R(8), U8(13),
B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
@@ -351,7 +343,7 @@ bytecodes: [
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
- B(SwitchOnSmiNoFeedback), U8(16), U8(3), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(3), I8(0),
B(Jump), U8(22),
B(Ldar), R(5),
B(ReThrow),
@@ -378,20 +370,18 @@ constant pool: [
Smi [16],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
- Smi [267],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [19, 315, 315],
- [22, 281, 281],
+ [19, 295, 295],
+ [22, 261, 261],
[86, 172, 180],
- [204, 237, 239],
+ [196, 217, 219],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 4bb89c6179..e8d53c3ff3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -10,9 +10,9 @@ snippet: "
var x, a = [0,1,2,3];
[x] = a;
"
-frame size: 14
+frame size: 13
parameter count: 1
-bytecode array length: 166
+bytecode array length: 146
bytecodes: [
/* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(1),
@@ -55,29 +55,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(8),
B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(10),
B(LdaNamedProperty), R(4), U8(4), U8(13),
- B(Star), R(10),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(11),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(12),
- B(LdaConstant), U8(5),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
- B(Throw),
- B(CallProperty0), R(10), R(4), U8(15),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(4), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(Jump), U8(12),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaZero),
B(TestReferenceEqual), R(6),
B(JumpIfTrue), U8(5),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(ReThrow),
B(Ldar), R(8),
B(SetPendingMessage),
@@ -95,11 +87,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[34, 76, 84],
- [108, 141, 143],
+ [100, 121, 123],
]
---
@@ -107,9 +98,9 @@ snippet: "
var x, y, a = [0,1,2,3];
[,x,...y] = a;
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 252
+bytecode array length: 232
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(2),
@@ -186,29 +177,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(4), U8(23),
- B(Star), R(11),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(12),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(13),
- B(LdaConstant), U8(5),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
- B(Throw),
- B(CallProperty0), R(11), R(5), U8(25),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(5), U8(25),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
B(Jump), U8(12),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(11),
B(ReThrow),
B(Ldar), R(9),
B(SetPendingMessage),
@@ -226,11 +209,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[34, 162, 170],
- [194, 227, 229],
+ [186, 207, 209],
]
---
@@ -238,9 +220,9 @@ snippet: "
var x={}, y, a = [0];
[x.foo,y=4] = a;
"
-frame size: 16
+frame size: 15
parameter count: 1
-bytecode array length: 217
+bytecode array length: 197
bytecodes: [
/* 40 S> */ B(CreateEmptyObjectLiteral),
B(Star), R(0),
@@ -304,29 +286,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(12),
B(LdaNamedProperty), R(5), U8(5), U8(17),
- B(Star), R(12),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(13),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(14),
- B(LdaConstant), U8(6),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
- B(Throw),
- B(CallProperty0), R(12), R(5), U8(19),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(5), U8(19),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(Jump), U8(12),
- B(Star), R(13),
+ B(Star), R(12),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(9),
B(SetPendingMessage),
@@ -345,11 +319,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[37, 127, 135],
- [159, 192, 194],
+ [151, 172, 174],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 866694aa03..3a4c3f50cf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -16,7 +16,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 319
+bytecode array length: 299
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -84,20 +84,12 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(94),
+ B(JumpIfToBooleanTrue), U8(74),
+ B(Mov), R(context), R(14),
B(LdaNamedProperty), R(6), U8(8), U8(17),
- B(Star), R(14),
- B(JumpIfUndefinedOrNull), U8(86),
- B(Mov), R(context), R(15),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(16),
- B(LdaConstant), U8(9),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
- B(Throw),
- B(CallProperty0), R(14), R(6), U8(19),
+ B(JumpIfUndefinedOrNull), U8(65),
+ B(Star), R(15),
+ B(CallProperty0), R(15), R(6), U8(19),
B(Star), R(17),
B(Mov), R(0), R(16),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(16), U8(2),
@@ -116,11 +108,11 @@ bytecodes: [
B(Star), R(18),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
B(Jump), U8(12),
- B(Star), R(15),
+ B(Star), R(14),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(ReThrow),
B(Ldar), R(10),
B(SetPendingMessage),
@@ -137,7 +129,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 57 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(10),
+ B(CreateCatchContext), R(5), U8(9),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -153,7 +145,7 @@ bytecodes: [
]
constant pool: [
Smi [95],
- Smi [223],
+ Smi [203],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -161,13 +153,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 291, 291],
+ [19, 271, 271],
[74, 153, 161],
- [185, 254, 256],
+ [177, 234, 236],
]
---
@@ -179,7 +170,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 340
+bytecode array length: 320
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -249,20 +240,12 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(94),
+ B(JumpIfToBooleanTrue), U8(74),
+ B(Mov), R(context), R(14),
B(LdaNamedProperty), R(6), U8(8), U8(17),
- B(Star), R(14),
- B(JumpIfUndefinedOrNull), U8(86),
- B(Mov), R(context), R(15),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(16),
- B(LdaConstant), U8(9),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
- B(Throw),
- B(CallProperty0), R(14), R(6), U8(19),
+ B(JumpIfUndefinedOrNull), U8(65),
+ B(Star), R(15),
+ B(CallProperty0), R(15), R(6), U8(19),
B(Star), R(17),
B(Mov), R(0), R(16),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(16), U8(2),
@@ -281,16 +264,16 @@ bytecodes: [
B(Star), R(18),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
B(Jump), U8(12),
- B(Star), R(15),
+ B(Star), R(14),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(ReThrow),
B(Ldar), R(10),
B(SetPendingMessage),
B(Ldar), R(8),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
B(Jump), U8(19),
B(Ldar), R(9),
B(ReThrow),
@@ -308,7 +291,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 68 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(12),
+ B(CreateCatchContext), R(5), U8(11),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -324,7 +307,7 @@ bytecodes: [
]
constant pool: [
Smi [95],
- Smi [227],
+ Smi [207],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -332,15 +315,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 312, 312],
+ [19, 292, 292],
[74, 157, 165],
- [189, 258, 260],
+ [181, 238, 240],
]
---
@@ -355,7 +337,7 @@ snippet: "
"
frame size: 19
parameter count: 1
-bytecode array length: 335
+bytecode array length: 315
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -430,20 +412,12 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(94),
+ B(JumpIfToBooleanTrue), U8(74),
+ B(Mov), R(context), R(14),
B(LdaNamedProperty), R(6), U8(8), U8(19),
- B(Star), R(14),
- B(JumpIfUndefinedOrNull), U8(86),
- B(Mov), R(context), R(15),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(16),
- B(LdaConstant), U8(9),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
- B(Throw),
- B(CallProperty0), R(14), R(6), U8(21),
+ B(JumpIfUndefinedOrNull), U8(65),
+ B(Star), R(15),
+ B(CallProperty0), R(15), R(6), U8(21),
B(Star), R(17),
B(Mov), R(0), R(16),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(16), U8(2),
@@ -462,11 +436,11 @@ bytecodes: [
B(Star), R(18),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(18), U8(1),
B(Jump), U8(12),
- B(Star), R(15),
+ B(Star), R(14),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(ReThrow),
B(Ldar), R(10),
B(SetPendingMessage),
@@ -483,7 +457,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 114 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(10),
+ B(CreateCatchContext), R(5), U8(9),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -499,7 +473,7 @@ bytecodes: [
]
constant pool: [
Smi [95],
- Smi [239],
+ Smi [219],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
@@ -507,13 +481,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 307, 307],
+ [19, 287, 287],
[74, 169, 177],
- [201, 270, 272],
+ [193, 250, 252],
]
---
@@ -524,9 +497,9 @@ snippet: "
}
f();
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 251
+bytecode array length: 231
bytecodes: [
B(Mov), R(closure), R(2),
B(Mov), R(this), R(3),
@@ -577,34 +550,26 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(8),
B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(11),
B(LdaNamedProperty), R(4), U8(6), U8(18),
- B(Star), R(11),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(12),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(13),
- B(LdaConstant), U8(7),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
- B(Throw),
- B(CallProperty0), R(11), R(4), U8(20),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(4), U8(20),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
B(Jump), U8(12),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaZero),
B(TestReferenceEqual), R(6),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(11),
B(ReThrow),
B(Ldar), R(8),
B(SetPendingMessage),
B(Ldar), R(6),
- B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
B(Jump), U8(19),
B(Ldar), R(7),
B(ReThrow),
@@ -622,7 +587,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(3), U8(3),
/* 96 S> */ B(Return),
B(Star), R(3),
- B(CreateCatchContext), R(3), U8(10),
+ B(CreateCatchContext), R(3), U8(9),
B(Star), R(2),
B(LdaTheHole),
B(SetPendingMessage),
@@ -644,14 +609,13 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
]
handlers: [
- [15, 223, 223],
+ [15, 203, 203],
[52, 104, 112],
- [136, 169, 171],
+ [128, 149, 151],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 681b4bc9f5..a3b9a1a86c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -9,9 +9,9 @@ wrap: yes
snippet: "
for (var p of [0, 1, 2]) {}
"
-frame size: 13
+frame size: 12
parameter count: 1
-bytecode array length: 163
+bytecode array length: 143
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(4),
@@ -51,29 +51,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(7),
B(Ldar), R(4),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(9),
B(LdaNamedProperty), R(3), U8(4), U8(13),
- B(Star), R(9),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(10),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(11),
- B(LdaConstant), U8(5),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
- B(Throw),
- B(CallProperty0), R(9), R(3), U8(15),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(10),
+ B(CallProperty0), R(10), R(3), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Jump), U8(12),
- B(Star), R(10),
+ B(Star), R(9),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
- B(Ldar), R(10),
+ B(Ldar), R(9),
B(ReThrow),
B(Ldar), R(7),
B(SetPendingMessage),
@@ -91,11 +83,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[31, 73, 81],
- [105, 138, 140],
+ [97, 118, 120],
]
---
@@ -103,9 +94,9 @@ snippet: "
var x = 'potatoes';
for (var p of x) { return p; }
"
-frame size: 14
+frame size: 13
parameter count: 1
-bytecode array length: 171
+bytecode array length: 151
bytecodes: [
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
@@ -147,34 +138,26 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(8),
B(Ldar), R(5),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(10),
B(LdaNamedProperty), R(4), U8(4), U8(12),
- B(Star), R(10),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(11),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(12),
- B(LdaConstant), U8(5),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
- B(Throw),
- B(CallProperty0), R(10), R(4), U8(14),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(4), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(Jump), U8(12),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaZero),
B(TestReferenceEqual), R(6),
B(JumpIfTrue), U8(5),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(ReThrow),
B(Ldar), R(8),
B(SetPendingMessage),
B(Ldar), R(6),
- B(SwitchOnSmiNoFeedback), U8(6), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
B(Jump), U8(8),
B(Ldar), R(7),
B(ReThrow),
@@ -189,13 +172,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
[29, 75, 83],
- [107, 140, 142],
+ [99, 120, 122],
]
---
@@ -205,9 +187,9 @@ snippet: "
if (x == 20) break;
}
"
-frame size: 13
+frame size: 12
parameter count: 1
-bytecode array length: 179
+bytecode array length: 159
bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
B(Star), R(4),
@@ -254,29 +236,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(7),
B(Ldar), R(4),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(9),
B(LdaNamedProperty), R(3), U8(4), U8(15),
- B(Star), R(9),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(10),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(11),
- B(LdaConstant), U8(5),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
- B(Throw),
- B(CallProperty0), R(9), R(3), U8(17),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(10),
+ B(CallProperty0), R(10), R(3), U8(17),
B(JumpIfJSReceiver), U8(21),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Jump), U8(12),
- B(Star), R(10),
+ B(Star), R(9),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
- B(Ldar), R(10),
+ B(Ldar), R(9),
B(ReThrow),
B(Ldar), R(7),
B(SetPendingMessage),
@@ -294,11 +268,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[31, 89, 97],
- [121, 154, 156],
+ [113, 134, 136],
]
---
@@ -306,9 +279,9 @@ snippet: "
var x = { 'a': 1, 'b': 2 };
for (x['a'] of [1,2,3]) { return x['a']; }
"
-frame size: 13
+frame size: 12
parameter count: 1
-bytecode array length: 185
+bytecode array length: 165
bytecodes: [
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41),
B(Star), R(0),
@@ -354,34 +327,26 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(6),
B(Ldar), R(3),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(9),
B(LdaNamedProperty), R(2), U8(6), U8(18),
- B(Star), R(9),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(10),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(11),
- B(LdaConstant), U8(7),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
- B(Throw),
- B(CallProperty0), R(9), R(2), U8(20),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(10),
+ B(CallProperty0), R(10), R(2), U8(20),
B(JumpIfJSReceiver), U8(21),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
B(Jump), U8(12),
- B(Star), R(10),
+ B(Star), R(9),
B(LdaZero),
B(TestReferenceEqual), R(4),
B(JumpIfTrue), U8(5),
- B(Ldar), R(10),
+ B(Ldar), R(9),
B(ReThrow),
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
- B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
B(Jump), U8(8),
B(Ldar), R(5),
B(ReThrow),
@@ -398,12 +363,11 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
[37, 89, 97],
- [121, 154, 156],
+ [113, 134, 136],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index a38d3d78dd..42f4b336ca 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -13,9 +13,9 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 15
+frame size: 14
parameter count: 2
-bytecode array length: 160
+bytecode array length: 140
bytecodes: [
/* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
@@ -54,29 +54,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(3), U8(12),
- B(Star), R(11),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(12),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(13),
- B(LdaConstant), U8(4),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
- B(Throw),
- B(CallProperty0), R(11), R(5), U8(14),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(5), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
B(Jump), U8(12),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(11),
B(ReThrow),
B(Ldar), R(9),
B(SetPendingMessage),
@@ -93,11 +85,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[25, 70, 78],
- [102, 135, 137],
+ [94, 115, 117],
]
---
@@ -109,7 +100,7 @@ snippet: "
"
frame size: 20
parameter count: 2
-bytecode array length: 244
+bytecode array length: 224
bytecodes: [
/* 10 E> */ B(CreateFunctionContext), U8(0), U8(5),
B(PushContext), R(2),
@@ -185,29 +176,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(12),
B(LdaNamedProperty), R(5), U8(8), U8(16),
- B(Star), R(12),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(13),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(14),
- B(LdaConstant), U8(9),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
- B(Throw),
- B(CallProperty0), R(12), R(5), U8(18),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(5), U8(18),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(Jump), U8(12),
- B(Star), R(13),
+ B(Star), R(12),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(9),
B(SetPendingMessage),
@@ -230,11 +213,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["1"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[58, 152, 160],
- [184, 217, 219],
+ [176, 197, 199],
]
---
@@ -244,9 +226,9 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 14
+frame size: 13
parameter count: 2
-bytecode array length: 177
+bytecode array length: 157
bytecodes: [
/* 34 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
@@ -293,29 +275,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(7),
B(Ldar), R(4),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(10),
B(LdaNamedProperty), R(3), U8(5), U8(14),
- B(Star), R(10),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(11),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(12),
- B(LdaConstant), U8(6),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
- B(Throw),
- B(CallProperty0), R(10), R(3), U8(16),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(3), U8(16),
B(JumpIfJSReceiver), U8(21),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
B(Jump), U8(12),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaZero),
B(TestReferenceEqual), R(5),
B(JumpIfTrue), U8(5),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(ReThrow),
B(Ldar), R(7),
B(SetPendingMessage),
@@ -334,11 +308,10 @@ constant pool: [
SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[25, 87, 95],
- [119, 152, 154],
+ [111, 132, 134],
]
---
@@ -348,9 +321,9 @@ snippet: "
}
f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);
"
-frame size: 17
+frame size: 16
parameter count: 2
-bytecode array length: 171
+bytecode array length: 151
bytecodes: [
/* 41 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
@@ -393,29 +366,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(11),
B(Ldar), R(8),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(13),
B(LdaNamedProperty), R(7), U8(5), U8(17),
- B(Star), R(13),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(14),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(15),
- B(LdaConstant), U8(6),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
- B(Throw),
- B(CallProperty0), R(13), R(7), U8(19),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(7), U8(19),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
B(Jump), U8(12),
- B(Star), R(14),
+ B(Star), R(13),
B(LdaZero),
B(TestReferenceEqual), R(9),
B(JumpIfTrue), U8(5),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(ReThrow),
B(Ldar), R(11),
B(SetPendingMessage),
@@ -434,11 +399,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[25, 81, 89],
- [113, 146, 148],
+ [105, 126, 128],
]
---
@@ -448,9 +412,9 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 16
+frame size: 15
parameter count: 2
-bytecode array length: 201
+bytecode array length: 181
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(5),
@@ -503,29 +467,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(12),
B(LdaNamedProperty), R(6), U8(6), U8(12),
- B(Star), R(12),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(13),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(14),
- B(LdaConstant), U8(7),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
- B(Throw),
- B(CallProperty0), R(12), R(6), U8(14),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(6), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(Jump), U8(12),
- B(Star), R(13),
+ B(Star), R(12),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(10),
B(SetPendingMessage),
@@ -545,11 +501,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
[66, 111, 119],
- [143, 176, 178],
+ [135, 156, 158],
]
---
@@ -559,9 +514,9 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 15
+frame size: 14
parameter count: 2
-bytecode array length: 245
+bytecode array length: 225
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -628,34 +583,26 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(9), U8(12),
- B(Star), R(11),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(12),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(13),
- B(LdaConstant), U8(10),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
- B(Throw),
- B(CallProperty0), R(11), R(5), U8(14),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(5), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
B(Jump), U8(12),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(11),
B(ReThrow),
B(Ldar), R(9),
B(SetPendingMessage),
B(Ldar), R(7),
- B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
B(Jump), U8(8),
B(Ldar), R(8),
B(ReThrow),
@@ -675,13 +622,12 @@ constant pool: [
Smi [16],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
[66, 149, 157],
- [181, 214, 216],
+ [173, 194, 196],
]
---
@@ -691,9 +637,9 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 17
+frame size: 16
parameter count: 2
-bytecode array length: 215
+bytecode array length: 195
bytecodes: [
B(Mov), R(closure), R(5),
B(Mov), R(this), R(6),
@@ -737,29 +683,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(11),
B(Ldar), R(8),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(13),
B(LdaNamedProperty), R(7), U8(3), U8(12),
- B(Star), R(13),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(14),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(15),
- B(LdaConstant), U8(4),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
- B(Throw),
- B(CallProperty0), R(13), R(7), U8(14),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(7), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
B(Jump), U8(12),
- B(Star), R(14),
+ B(Star), R(13),
B(LdaZero),
B(TestReferenceEqual), R(9),
B(JumpIfTrue), U8(5),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(ReThrow),
B(Ldar), R(11),
B(SetPendingMessage),
@@ -776,7 +714,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(6), U8(3),
/* 60 S> */ B(Return),
B(Star), R(6),
- B(CreateCatchContext), R(6), U8(5),
+ B(CreateCatchContext), R(6), U8(4),
B(Star), R(5),
B(LdaTheHole),
B(SetPendingMessage),
@@ -795,13 +733,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
]
handlers: [
- [15, 187, 187],
+ [15, 167, 167],
[40, 85, 93],
- [117, 150, 152],
+ [109, 130, 132],
]
---
@@ -811,9 +748,9 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 16
+frame size: 15
parameter count: 2
-bytecode array length: 251
+bytecode array length: 231
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(4),
@@ -870,29 +807,21 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(10),
B(Ldar), R(7),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(12),
B(LdaNamedProperty), R(6), U8(4), U8(12),
- B(Star), R(12),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(13),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(14),
- B(LdaConstant), U8(5),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
- B(Throw),
- B(CallProperty0), R(12), R(6), U8(14),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(6), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
B(Jump), U8(12),
- B(Star), R(13),
+ B(Star), R(12),
B(LdaZero),
B(TestReferenceEqual), R(8),
B(JumpIfTrue), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
B(Ldar), R(10),
B(SetPendingMessage),
@@ -909,7 +838,7 @@ bytecodes: [
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(5), U8(3),
/* 54 S> */ B(Return),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(6),
+ B(CreateCatchContext), R(5), U8(5),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -929,12 +858,11 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
SCOPE_INFO_TYPE,
]
handlers: [
- [19, 223, 223],
+ [19, 203, 203],
[44, 121, 129],
- [153, 186, 188],
+ [145, 166, 168],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index c3f8b980cf..acd0a0e7f8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -96,9 +96,9 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 251
+bytecode array length: 231
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
@@ -167,34 +167,26 @@ bytecodes: [
B(SetPendingMessage),
B(Star), R(9),
B(Ldar), R(6),
- B(JumpIfToBooleanTrue), U8(58),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(Mov), R(context), R(11),
B(LdaNamedProperty), R(5), U8(10), U8(13),
- B(Star), R(11),
- B(JumpIfUndefinedOrNull), U8(50),
- B(Mov), R(context), R(12),
- B(TestTypeOf), U8(6),
- B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(160),
- B(Star), R(13),
- B(LdaConstant), U8(11),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
- B(Throw),
- B(CallProperty0), R(11), R(5), U8(15),
+ B(JumpIfUndefinedOrNull), U8(29),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(5), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
B(Jump), U8(12),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaZero),
B(TestReferenceEqual), R(7),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(11),
B(ReThrow),
B(Ldar), R(9),
B(SetPendingMessage),
B(Ldar), R(7),
- B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
B(Jump), U8(8),
B(Ldar), R(8),
B(ReThrow),
@@ -215,13 +207,12 @@ constant pool: [
Smi [16],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
[72, 155, 163],
- [187, 220, 222],
+ [179, 200, 202],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index 7ac886945c..36f54e68a8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -26,14 +26,14 @@ frame size: 7
parameter count: 1
bytecode array length: 97
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 67 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 76 S> */ B(LdaCurrentContextSlot), U8(2),
+ /* 76 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(4),
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
/* 81 E> */ B(LdaKeyedProperty), R(this), U8(0),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(4), U8(1),
B(Star), R(5),
@@ -45,16 +45,16 @@ bytecodes: [
B(CallProperty1), R(6), R(this), R(5), U8(5),
/* 91 S> */ B(LdaSmi), I8(1),
B(Star), R(3),
- B(LdaCurrentContextSlot), U8(2),
+ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(5),
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
/* 96 E> */ B(LdaKeyedProperty), R(this), U8(7),
B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(5), U8(1),
B(Star), R(6),
B(CallProperty1), R(6), R(this), R(3), U8(9),
- /* 108 S> */ B(LdaCurrentContextSlot), U8(2),
+ /* 108 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(4),
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
/* 120 E> */ B(LdaKeyedProperty), R(this), U8(11),
B(CallRuntime), U16(Runtime::kLoadPrivateGetter), R(4), U8(1),
B(Star), R(5),
@@ -79,12 +79,12 @@ frame size: 5
parameter count: 1
bytecode array length: 31
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -110,12 +110,12 @@ frame size: 5
parameter count: 1
bytecode array length: 31
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -141,12 +141,12 @@ frame size: 5
parameter count: 1
bytecode array length: 31
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -172,12 +172,12 @@ frame size: 6
parameter count: 1
bytecode array length: 31
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(4),
B(LdaConstant), U8(0),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
new file mode 100644
index 0000000000..0c3eefef90
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFieldAccess.golden
@@ -0,0 +1,84 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+test function name: test
+private methods: yes
+
+---
+snippet: "
+ class A {
+ #a;
+ #b;
+ constructor() {
+ this.#a = this.#b;
+ }
+ }
+
+ var test = A;
+ new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 30
+bytecodes: [
+ /* 35 E> */ B(LdaNamedProperty), R(closure), U8(0), U8(0),
+ B(JumpIfUndefined), U8(11),
+ B(Star), R(1),
+ B(CallProperty0), R(1), R(this), U8(2),
+ B(Mov), R(this), R(0),
+ /* 44 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 59 E> */ B(LdaKeyedProperty), R(this), U8(4),
+ /* 52 E> */ B(StaKeyedProperty), R(this), R(3), U8(6),
+ B(LdaUndefined),
+ /* 65 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ class B {
+ #a;
+ #b;
+ constructor() {
+ this.#a = this.#b;
+ }
+ force(str) {
+ eval(str);
+ }
+ }
+
+ var test = B;
+ new test;
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 30
+bytecodes: [
+ /* 35 E> */ B(LdaNamedProperty), R(closure), U8(0), U8(0),
+ B(JumpIfUndefined), U8(11),
+ B(Star), R(1),
+ B(CallProperty0), R(1), R(this), U8(2),
+ B(Mov), R(this), R(0),
+ /* 44 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
+ /* 59 E> */ B(LdaKeyedProperty), R(this), U8(4),
+ /* 52 E> */ B(StaKeyedProperty), R(this), R(3), U8(6),
+ B(LdaUndefined),
+ /* 65 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index cd5dd6f5a6..3b095a4434 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -21,14 +21,14 @@ frame size: 4
parameter count: 1
bytecode array length: 30
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(LdaCurrentContextSlot), U8(3),
+ /* 49 S> */ B(LdaImmutableCurrentContextSlot), U8(3),
/* 61 E> */ B(LdaKeyedProperty), R(this), U8(0),
- B(LdaCurrentContextSlot), U8(2),
+ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(3),
/* 63 E> */ B(CallAnyReceiver), R(3), R(this), U8(1), U8(2),
/* 66 S> */ B(Return),
@@ -52,12 +52,12 @@ frame size: 5
parameter count: 1
bytecode array length: 31
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(263),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -84,12 +84,12 @@ frame size: 5
parameter count: 1
bytecode array length: 31
bytecodes: [
- B(LdaCurrentContextSlot), U8(3),
+ B(LdaImmutableCurrentContextSlot), U8(3),
B(Star), R(1),
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(263),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -120,7 +120,7 @@ bytecodes: [
B(PushContext), R(0),
B(Ldar), R(this),
B(StaCurrentContextSlot), U8(2),
- B(LdaContextSlot), R(0), U8(3), U8(0),
+ B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
B(Star), R(2),
B(Mov), R(this), R(1),
B(Mov), R(0), R(3),
@@ -129,9 +129,9 @@ bytecodes: [
B(Star), R(5),
/* 61 E> */ B(CallUndefinedReceiver0), R(5), U8(0),
B(Star), R(5),
- B(LdaContextSlot), R(0), U8(3), U8(0),
+ B(LdaImmutableContextSlot), R(0), U8(3), U8(0),
/* 63 E> */ B(LdaKeyedProperty), R(5), U8(2),
- B(LdaContextSlot), R(0), U8(2), U8(0),
+ B(LdaImmutableContextSlot), R(0), U8(2), U8(0),
B(Star), R(4),
/* 66 E> */ B(CallAnyReceiver), R(4), R(5), U8(1), U8(4),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 352ec83961..13e0ef019a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -25,13 +25,13 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(262),
+ B(Wide), B(LdaSmi), I16(261),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kNewTypeError), R(2), U8(2),
B(Throw),
- B(LdaCurrentContextSlot), U8(2),
+ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(0),
/* 70 E> */ B(CallAnyReceiver), R(0), R(1), U8(1), U8(0),
/* 73 S> */ B(Return),
@@ -56,7 +56,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(263),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -83,7 +83,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(264),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(263),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -116,13 +116,13 @@ frame size: 5
parameter count: 1
bytecode array length: 142
bytecodes: [
- /* 90 S> */ B(LdaCurrentContextSlot), U8(2),
+ /* 90 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(1),
B(LdaCurrentContextSlot), U8(3),
/* 94 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(262),
+ B(Wide), B(LdaSmi), I16(261),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -138,13 +138,13 @@ bytecodes: [
B(CallProperty1), R(3), R(0), R(2), U8(3),
/* 105 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- B(LdaCurrentContextSlot), U8(2),
+ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(2),
B(LdaCurrentContextSlot), U8(3),
/* 109 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(263),
+ B(Wide), B(LdaSmi), I16(262),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -153,13 +153,13 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kLoadPrivateSetter), R(2), U8(1),
B(Star), R(3),
B(CallProperty1), R(3), R(1), R(0), U8(5),
- /* 122 S> */ B(LdaCurrentContextSlot), U8(2),
+ /* 122 S> */ B(LdaImmutableCurrentContextSlot), U8(2),
B(Star), R(1),
B(LdaCurrentContextSlot), U8(3),
/* 133 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(262),
+ B(Wide), B(LdaSmi), I16(261),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -189,7 +189,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -215,7 +215,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 53 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -241,7 +241,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(265),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -267,7 +267,7 @@ frame size: 3
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 46 S> */ B(Wide), B(LdaSmi), I16(265),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(264),
B(Star), R(1),
B(LdaConstant), U8(0),
B(Star), R(2),
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index b883753dc5..13410c916c 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2756,6 +2756,42 @@ TEST(PrivateClassFields) {
LoadGolden("PrivateClassFields.golden")));
}
+TEST(PrivateClassFieldAccess) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_test_function_name("test");
+
+ const char* snippets[] = {
+ "class A {\n"
+ " #a;\n"
+ " #b;\n"
+ " constructor() {\n"
+ " this.#a = this.#b;\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "var test = A;\n"
+ "new test;\n",
+
+ "class B {\n"
+ " #a;\n"
+ " #b;\n"
+ " constructor() {\n"
+ " this.#a = this.#b;\n"
+ " }\n"
+ " force(str) {\n"
+ " eval(str);\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "var test = B;\n"
+ "new test;\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PrivateClassFieldAccess.golden")));
+}
+
TEST(PrivateMethodDeclaration) {
bool old_methods_flag = i::FLAG_harmony_private_methods;
i::FLAG_harmony_private_methods = true;
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index bd9edb127b..6c9ed36a7c 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -522,6 +522,7 @@ TEST(InterpreterStringAdd) {
{ast_factory.GetOneByteString(""), LiteralForTest(2.5),
factory->NewStringFromStaticChars("2.5"), BinaryOperationFeedback::kAny},
};
+ ast_factory.Internalize(isolate);
for (size_t i = 0; i < arraysize(test_cases); i++) {
FeedbackVectorSpec feedback_spec(zone);
@@ -534,7 +535,6 @@ TEST(InterpreterStringAdd) {
builder.LoadLiteral(test_cases[i].lhs).StoreAccumulatorInRegister(reg);
LoadLiteralForTest(&builder, test_cases[i].rhs);
builder.BinaryOperation(Token::Value::ADD, reg, GetIndex(slot)).Return();
- ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -744,6 +744,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
{Token::Value::MOD, LiteralForTest(3),
LiteralForTest(ast_factory.GetOneByteString("-2")),
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
+ ast_factory.Internalize(isolate);
for (const BinaryOpExpectation& test_case : kTestCases) {
i::FeedbackVectorSpec feedback_spec(zone);
@@ -760,7 +761,6 @@ TEST(InterpreterBinaryOpTypeFeedback) {
LoadLiteralForTest(&builder, test_case.arg2);
builder.BinaryOperation(test_case.op, reg, GetIndex(slot0)).Return();
- ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -849,6 +849,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
Handle<Smi>(Smi::zero(), isolate), BinaryOperationFeedback::kNumber},
{Token::Value::SAR, LiteralForTest(ast_factory.GetOneByteString("2")), 1,
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
+ ast_factory.Internalize(isolate);
for (const BinaryOpExpectation& test_case : kTestCases) {
i::FeedbackVectorSpec feedback_spec(zone);
@@ -866,7 +867,6 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
.BinaryOperation(test_case.op, reg, GetIndex(slot0))
.Return();
- ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
@@ -1358,8 +1358,6 @@ TEST(InterpreterCall) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- HashSeed(isolate));
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddLoadICSlot();
@@ -1371,10 +1369,12 @@ TEST(InterpreterCall) {
int call_slot_index = -1;
call_slot_index = GetIndex(call_slot);
- const AstRawString* name = ast_factory.GetOneByteString("func");
-
// Check with no args.
{
+ AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
+ const AstRawString* name = ast_factory.GetOneByteString("func");
+
BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(1);
@@ -1399,6 +1399,10 @@ TEST(InterpreterCall) {
// Check that receiver is passed properly.
{
+ AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
+ const AstRawString* name = ast_factory.GetOneByteString("func");
+
BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(1);
@@ -1424,6 +1428,10 @@ TEST(InterpreterCall) {
// Check with two parameters (+ receiver).
{
+ AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
+ const AstRawString* name = ast_factory.GetOneByteString("func");
+
BytecodeArrayBuilder builder(zone, 1, 4, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(3);
@@ -1457,6 +1465,10 @@ TEST(InterpreterCall) {
// Check with 10 parameters (+ receiver).
{
+ AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
+ const AstRawString* name = ast_factory.GetOneByteString("func");
+
BytecodeArrayBuilder builder(zone, 1, 12, &feedback_spec);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(11);
@@ -2286,14 +2298,15 @@ TEST(InterpreterTestIn) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
- HashSeed(isolate));
// Allocate an array
Handle<i::JSArray> array =
factory->NewJSArray(0, i::ElementsKind::PACKED_SMI_ELEMENTS);
// Check for these properties on the array object
const char* properties[] = {"length", "fuzzle", "x", "0"};
for (size_t i = 0; i < arraysize(properties); i++) {
+ AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
+ HashSeed(isolate));
+
bool expected_value = (i == 0);
FeedbackVectorSpec feedback_spec(zone);
BytecodeArrayBuilder builder(zone, 1, 1, &feedback_spec);
@@ -2363,6 +2376,7 @@ TEST(InterpreterUnaryNotNonBoolean) {
false),
std::make_pair(LiteralForTest(ast_factory.GetOneByteString("")), true),
};
+ ast_factory.Internalize(isolate);
for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
BytecodeArrayBuilder builder(zone, 1, 0);
@@ -2370,7 +2384,6 @@ TEST(InterpreterUnaryNotNonBoolean) {
Register r0(0);
LoadLiteralForTest(&builder, object_type_tuples[i].first);
builder.LogicalNot(ToBooleanMode::kConvertToBoolean).Return();
- ast_factory.Internalize(isolate);
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 1f1cb55f9b..81ad57b9c9 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -11,23 +11,9 @@
#ifdef V8_USE_PERFETTO
#include "perfetto/tracing.h"
-#include "protos/perfetto/trace/chrome/chrome_trace_event.pb.h"
-#include "protos/perfetto/trace/chrome/chrome_trace_event.pbzero.h"
-#include "protos/perfetto/trace/chrome/chrome_trace_packet.pb.h"
#include "protos/perfetto/trace/trace.pb.h"
-#include "src/libplatform/tracing/json-trace-event-listener.h"
#include "src/libplatform/tracing/trace-event-listener.h"
-#endif // V8_USE_PERFETTO
-
-#ifdef V8_USE_PERFETTO
-class TestDataSource : public perfetto::DataSource<TestDataSource> {
- public:
- void OnSetup(const SetupArgs&) override {}
- void OnStart(const StartArgs&) override {}
- void OnStop(const StopArgs&) override {}
-};
-
-PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(TestDataSource);
+#include "src/tracing/traced-value.h"
#endif // V8_USE_PERFETTO
namespace v8 {
@@ -59,6 +45,8 @@ TEST(TestTraceConfig) {
delete trace_config;
}
+// Perfetto doesn't use TraceObject.
+#if !defined(V8_USE_PERFETTO)
TEST(TestTraceObject) {
TraceObject trace_object;
uint8_t category_enabled_flag = 41;
@@ -101,7 +89,10 @@ class MockTraceWriter : public TraceWriter {
private:
std::vector<std::string> events_;
};
+#endif // !defined(V8_USE_PERFETTO)
+// Perfetto doesn't use the ring buffer.
+#if !defined(V8_USE_PERFETTO)
TEST(TestTraceBufferRingBuffer) {
// We should be able to add kChunkSize * 2 + 1 trace events.
const int HANDLES_COUNT = TraceBufferChunk::kChunkSize * 2 + 1;
@@ -151,7 +142,10 @@ TEST(TestTraceBufferRingBuffer) {
}
delete ring_buffer;
}
+#endif // !defined(V8_USE_PERFETTO)
+// Perfetto has an internal JSON exporter.
+#if !defined(V8_USE_PERFETTO)
void PopulateJSONWriter(TraceWriter* writer) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
std::unique_ptr<v8::Platform> default_platform(
@@ -165,10 +159,6 @@ void PopulateJSONWriter(TraceWriter* writer) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
-#ifdef V8_USE_PERFETTO
- std::ostringstream sstream;
- tracing_controller->InitializeForPerfetto(&sstream);
-#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8-cat");
tracing_controller->StartTracing(trace_config);
@@ -220,6 +210,7 @@ TEST(TestJSONTraceWriterWithCustomtag) {
CHECK_EQ(expected_trace_str, trace_str);
}
+#endif // !defined(V8_USE_PERFETTO)
void GetJSONStrings(std::vector<std::string>* ret, const std::string& str,
const std::string& param, const std::string& start_delim,
@@ -235,7 +226,8 @@ void GetJSONStrings(std::vector<std::string>* ret, const std::string& str,
}
}
-#ifndef V8_USE_PERFETTO
+// With Perfetto the tracing controller doesn't observe events.
+#if !defined(V8_USE_PERFETTO)
TEST(TestTracingController) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
std::unique_ptr<v8::Platform> default_platform(
@@ -251,10 +243,6 @@ TEST(TestTracingController) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
-#ifdef V8_USE_PERFETTO
- std::ostringstream sstream;
- tracing_controller->InitializeForPerfetto(&sstream);
-#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
tracing_controller->StartTracing(trace_config);
@@ -311,9 +299,6 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
-#ifdef V8_USE_PERFETTO
- tracing_controller->InitializeForPerfetto(&perfetto_stream);
-#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
tracing_controller->StartTracing(trace_config);
@@ -402,7 +387,7 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
CHECK_EQ(all_args[22], "\"a1\":[42,42]");
CHECK_EQ(all_args[23], "\"a1\":[42,42],\"a2\":[123,123]");
}
-#endif // !V8_USE_PERFETTO
+#endif // !defined(V8_USE_PERFETTO)
namespace {
@@ -427,14 +412,15 @@ TEST(TracingObservers) {
v8::platform::tracing::TracingController* tracing_controller = tracing.get();
static_cast<v8::platform::DefaultPlatform*>(default_platform.get())
->SetTracingController(std::move(tracing));
+#ifdef V8_USE_PERFETTO
+ std::ostringstream sstream;
+ tracing_controller->InitializeForPerfetto(&sstream);
+#else
MockTraceWriter* writer = new MockTraceWriter();
v8::platform::tracing::TraceBuffer* ring_buffer =
v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
writer);
tracing_controller->Initialize(ring_buffer);
-#ifdef V8_USE_PERFETTO
- std::ostringstream sstream;
- tracing_controller->InitializeForPerfetto(&sstream);
#endif
v8::platform::tracing::TraceConfig* trace_config =
new v8::platform::tracing::TraceConfig();
@@ -484,6 +470,8 @@ TEST(TracingObservers) {
i::V8::SetPlatformForTesting(old_platform);
}
+// With Perfetto the tracing controller doesn't observe events.
+#if !defined(V8_USE_PERFETTO)
class TraceWritingThread : public base::Thread {
public:
TraceWritingThread(
@@ -525,10 +513,6 @@ TEST(AddTraceEventMultiThreaded) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
-#ifdef V8_USE_PERFETTO
- std::ostringstream sstream;
- tracing_controller->InitializeForPerfetto(&sstream);
-#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
tracing_controller->StartTracing(trace_config);
@@ -546,26 +530,110 @@ TEST(AddTraceEventMultiThreaded) {
i::V8::SetPlatformForTesting(old_platform);
}
+#endif // !defined(V8_USE_PERFETTO)
#ifdef V8_USE_PERFETTO
-using TraceEvent = ::perfetto::protos::ChromeTraceEvent;
+using TrackEvent = ::perfetto::protos::TrackEvent;
class TestListener : public TraceEventListener {
public:
void ProcessPacket(const ::perfetto::protos::TracePacket& packet) {
- for (const ::perfetto::protos::ChromeTraceEvent& event :
- packet.chrome_events().trace_events()) {
- events_.push_back(event);
+ if (packet.incremental_state_cleared()) {
+ categories_.clear();
+ event_names_.clear();
+ debug_annotation_names_.clear();
+ }
+
+ if (!packet.has_track_event()) return;
+
+ // Update incremental state.
+ if (packet.has_interned_data()) {
+ const auto& interned_data = packet.interned_data();
+ for (const auto& it : interned_data.event_categories()) {
+ CHECK_EQ(categories_.find(it.iid()), categories_.end());
+ categories_[it.iid()] = it.name();
+ }
+ for (const auto& it : interned_data.event_names()) {
+ CHECK_EQ(event_names_.find(it.iid()), event_names_.end());
+ event_names_[it.iid()] = it.name();
+ }
+ for (const auto& it : interned_data.debug_annotation_names()) {
+ CHECK_EQ(debug_annotation_names_.find(it.iid()),
+ debug_annotation_names_.end());
+ debug_annotation_names_[it.iid()] = it.name();
+ }
+ }
+ const auto& track_event = packet.track_event();
+ std::string slice;
+ switch (track_event.type()) {
+ case perfetto::protos::TrackEvent::TYPE_SLICE_BEGIN:
+ slice += "B";
+ break;
+ case perfetto::protos::TrackEvent::TYPE_SLICE_END:
+ slice += "E";
+ break;
+ case perfetto::protos::TrackEvent::TYPE_INSTANT:
+ slice += "I";
+ break;
+ default:
+ case perfetto::protos::TrackEvent::TYPE_UNSPECIFIED:
+ CHECK(false);
}
+ slice += ":" +
+ (track_event.category_iids_size()
+ ? categories_[track_event.category_iids().Get(0)]
+ : "") +
+ ".";
+ if (track_event.name_iid()) {
+ slice += event_names_[track_event.name_iid()];
+ } else {
+ slice += track_event.name();
+ }
+
+ if (track_event.debug_annotations_size()) {
+ slice += "(";
+ bool first_annotation = true;
+ for (const auto& it : track_event.debug_annotations()) {
+ if (!first_annotation) {
+ slice += ",";
+ }
+ slice += debug_annotation_names_[it.name_iid()] + "=";
+ std::stringstream value;
+ if (it.has_bool_value()) {
+ value << "(bool)" << it.bool_value();
+ } else if (it.has_uint_value()) {
+ value << "(uint)" << it.uint_value();
+ } else if (it.has_int_value()) {
+ value << "(int)" << it.int_value();
+ } else if (it.has_double_value()) {
+ value << "(double)" << it.double_value();
+ } else if (it.has_string_value()) {
+ value << "(string)" << it.string_value();
+ } else if (it.has_pointer_value()) {
+ value << "(pointer)0x" << std::hex << it.pointer_value();
+ } else if (it.has_legacy_json_value()) {
+ value << "(json)" << it.legacy_json_value();
+ } else if (it.has_nested_value()) {
+ value << "(nested)" << it.nested_value().string_value();
+ }
+ slice += value.str();
+ first_annotation = false;
+ }
+ slice += ")";
+ }
+ events_.push_back(slice);
}
- TraceEvent* get_event(size_t index) { return &events_.at(index); }
+ const std::string& get_event(size_t index) { return events_.at(index); }
size_t events_size() const { return events_.size(); }
private:
- std::vector<TraceEvent> events_;
+ std::vector<std::string> events_;
+ std::map<uint64_t, std::string> categories_;
+ std::map<uint64_t, std::string> event_names_;
+ std::map<uint64_t, std::string> debug_annotation_names_;
};
class TracingTestHarness {
@@ -592,9 +660,14 @@ class TracingTestHarness {
tracing_controller_->StartTracing(trace_config);
}
- void StopTracing() { tracing_controller_->StopTracing(); }
+ void StopTracing() {
+ v8::TrackEvent::Flush();
+ tracing_controller_->StopTracing();
+ }
- TraceEvent* get_event(size_t index) { return listener_.get_event(index); }
+ const std::string& get_event(size_t index) {
+ return listener_.get_event(index);
+ }
size_t events_size() const { return listener_.events_size(); }
std::string perfetto_json_stream() { return perfetto_json_stream_.str(); }
@@ -622,49 +695,13 @@ TEST(Perfetto) {
harness.StopTracing();
- TraceEvent* event = harness.get_event(0);
- int32_t thread_id = event->thread_id();
- int32_t process_id = event->process_id();
- CHECK_EQ("test1", event->name());
- CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase());
- int64_t timestamp = event->timestamp();
-
- event = harness.get_event(1);
- CHECK_EQ("test2", event->name());
- CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase());
- CHECK_EQ(thread_id, event->thread_id());
- CHECK_EQ(process_id, event->process_id());
- CHECK_GE(event->timestamp(), timestamp);
- timestamp = event->timestamp();
-
- event = harness.get_event(2);
- CHECK_EQ("test3", event->name());
- CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase());
- CHECK_EQ(thread_id, event->thread_id());
- CHECK_EQ(process_id, event->process_id());
- CHECK_GE(event->timestamp(), timestamp);
- timestamp = event->timestamp();
-
- event = harness.get_event(3);
- CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase());
- CHECK_EQ(thread_id, event->thread_id());
- CHECK_EQ(process_id, event->process_id());
- CHECK_GE(event->timestamp(), timestamp);
- timestamp = event->timestamp();
-
- event = harness.get_event(4);
- CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase());
- CHECK_EQ(thread_id, event->thread_id());
- CHECK_EQ(process_id, event->process_id());
- CHECK_GE(event->timestamp(), timestamp);
- timestamp = event->timestamp();
-
- event = harness.get_event(5);
- CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase());
- CHECK_EQ(thread_id, event->thread_id());
- CHECK_EQ(process_id, event->process_id());
- CHECK_GE(event->timestamp(), timestamp);
- timestamp = event->timestamp();
+ CHECK_EQ("B:v8.test1", harness.get_event(0));
+ CHECK_EQ("B:v8.test2(arg1=(uint)1024)", harness.get_event(1));
+ CHECK_EQ("B:v8.test3(arg1=(uint)1024,arg2=(string)str_arg)",
+ harness.get_event(2));
+ CHECK_EQ("E:.", harness.get_event(3));
+ CHECK_EQ("E:.", harness.get_event(4));
+ CHECK_EQ("E:.", harness.get_event(5));
CHECK_EQ(6, harness.events_size());
}
@@ -684,10 +721,10 @@ TEST(Categories) {
harness.StopTracing();
CHECK_EQ(4, harness.events_size());
- TraceEvent* event = harness.get_event(0);
- CHECK_EQ("v8.Test", event->name());
- event = harness.get_event(1);
- CHECK_EQ("v8.Test3", event->name());
+ CHECK_EQ("B:v8.v8.Test", harness.get_event(0));
+ CHECK_EQ("B:v8.v8.Test3", harness.get_event(1));
+ CHECK_EQ("E:.", harness.get_event(2));
+ CHECK_EQ("E:.", harness.get_event(3));
}
// Replacement for 'TestTracingControllerMultipleArgsAndCopy'
@@ -747,123 +784,57 @@ TEST(MultipleArgsAndCopy) {
mm = "CHANGED";
mmm = "CHANGED";
+ auto arg = v8::tracing::TracedValue::Create();
+ arg->SetInteger("value", 42);
TRACE_EVENT_INSTANT1("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
- new ConvertableToTraceFormatMock(42));
- std::unique_ptr<ConvertableToTraceFormatMock> trace_event_arg(
- new ConvertableToTraceFormatMock(42));
+ std::move(arg));
+
+ arg = v8::tracing::TracedValue::Create();
+ arg->SetString("value", "string");
+ auto arg2 = v8::tracing::TracedValue::Create();
+ arg2->SetDouble("value", 1.23);
TRACE_EVENT_INSTANT2("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
- std::move(trace_event_arg), "a2",
- new ConvertableToTraceFormatMock(123));
+ std::move(arg), "a2", std::move(arg2));
}
harness.StopTracing();
- // 20 START/END events, 4 INSTANT events.
- CHECK_EQ(44, harness.events_size());
- TraceEvent* event = harness.get_event(0);
- CHECK_EQ("aa", event->args()[0].name());
- CHECK_EQ(aa, event->args()[0].uint_value());
-
- event = harness.get_event(1);
- CHECK_EQ("bb", event->args()[0].name());
- CHECK_EQ(bb, event->args()[0].uint_value());
-
- event = harness.get_event(2);
- CHECK_EQ("cc", event->args()[0].name());
- CHECK_EQ(cc, event->args()[0].uint_value());
-
- event = harness.get_event(3);
- CHECK_EQ("dd", event->args()[0].name());
- CHECK_EQ(dd, event->args()[0].uint_value());
-
- event = harness.get_event(4);
- CHECK_EQ("ee", event->args()[0].name());
- CHECK_EQ(ee, event->args()[0].int_value());
-
- event = harness.get_event(5);
- CHECK_EQ("ff", event->args()[0].name());
- CHECK_EQ(ff, event->args()[0].int_value());
-
- event = harness.get_event(6);
- CHECK_EQ("gg", event->args()[0].name());
- CHECK_EQ(gg, event->args()[0].int_value());
-
- event = harness.get_event(7);
- CHECK_EQ("hh", event->args()[0].name());
- CHECK_EQ(hh, event->args()[0].int_value());
-
- event = harness.get_event(8);
- CHECK_EQ("ii1", event->args()[0].name());
- CHECK_EQ(ii1, event->args()[0].bool_value());
-
- event = harness.get_event(9);
- CHECK_EQ("ii2", event->args()[0].name());
- CHECK_EQ(ii2, event->args()[0].bool_value());
-
- event = harness.get_event(10);
- CHECK_EQ("jj1", event->args()[0].name());
- CHECK_EQ(jj1, event->args()[0].double_value());
-
- event = harness.get_event(11);
- CHECK_EQ("jj2", event->args()[0].name());
- CHECK_EQ(jj2, event->args()[0].double_value());
-
- event = harness.get_event(12);
- CHECK_EQ("jj3", event->args()[0].name());
- CHECK(std::isnan(event->args()[0].double_value()));
-
- event = harness.get_event(13);
- CHECK_EQ("jj4", event->args()[0].name());
- CHECK_EQ(jj4, event->args()[0].double_value());
-
- event = harness.get_event(14);
- CHECK_EQ("jj5", event->args()[0].name());
- CHECK_EQ(jj5, event->args()[0].double_value());
-
- event = harness.get_event(15);
- CHECK_EQ("kk", event->args()[0].name());
- CHECK_EQ(kk, reinterpret_cast<void*>(event->args()[0].pointer_value()));
-
- event = harness.get_event(16);
- CHECK_EQ("ll", event->args()[0].name());
- CHECK_EQ(ll, event->args()[0].string_value());
-
- event = harness.get_event(17);
- CHECK_EQ("mm", event->args()[0].name());
- CHECK_EQ("\"INIT\"", event->args()[0].string_value());
-
- event = harness.get_event(18);
- CHECK_EQ("v8.Test2.1", event->name());
- CHECK_EQ("aa", event->args()[0].name());
- CHECK_EQ(aa, event->args()[0].uint_value());
- CHECK_EQ("ll", event->args()[1].name());
- CHECK_EQ(ll, event->args()[1].string_value());
-
- event = harness.get_event(19);
- CHECK_EQ("mm1", event->args()[0].name());
- CHECK_EQ("INIT", event->args()[0].string_value());
- CHECK_EQ("mm2", event->args()[1].name());
- CHECK_EQ("\"INIT\"", event->args()[1].string_value());
-
- event = harness.get_event(20);
- CHECK_EQ("INIT", event->name());
-
- event = harness.get_event(21);
- CHECK_EQ("INIT", event->name());
- CHECK_EQ("mm1", event->args()[0].name());
- CHECK_EQ("INIT", event->args()[0].string_value());
- CHECK_EQ("mm2", event->args()[1].name());
- CHECK_EQ("\"INIT\"", event->args()[1].string_value());
-
- event = harness.get_event(22);
- CHECK_EQ("a1", event->args()[0].name());
- CHECK_EQ("[42,42]", event->args()[0].json_value());
-
- event = harness.get_event(23);
- CHECK_EQ("a1", event->args()[0].name());
- CHECK_EQ("[42,42]", event->args()[0].json_value());
- CHECK_EQ("a2", event->args()[1].name());
- CHECK_EQ("[123,123]", event->args()[1].json_value());
+ CHECK_EQ("B:v8.v8.Test.aa(aa=(uint)11)", harness.get_event(0));
+ CHECK_EQ("B:v8.v8.Test.bb(bb=(uint)22)", harness.get_event(1));
+ CHECK_EQ("B:v8.v8.Test.cc(cc=(uint)33)", harness.get_event(2));
+ CHECK_EQ("B:v8.v8.Test.dd(dd=(uint)44)", harness.get_event(3));
+ CHECK_EQ("B:v8.v8.Test.ee(ee=(int)-55)", harness.get_event(4));
+ CHECK_EQ("B:v8.v8.Test.ff(ff=(int)-66)", harness.get_event(5));
+ CHECK_EQ("B:v8.v8.Test.gg(gg=(int)-77)", harness.get_event(6));
+ CHECK_EQ("B:v8.v8.Test.hh(hh=(int)-88)", harness.get_event(7));
+ CHECK_EQ("B:v8.v8.Test.ii(ii1=(bool)1)", harness.get_event(8));
+ CHECK_EQ("B:v8.v8.Test.ii(ii2=(bool)0)", harness.get_event(9));
+ CHECK_EQ("B:v8.v8.Test.jj1(jj1=(double)99)", harness.get_event(10));
+ CHECK_EQ("B:v8.v8.Test.jj2(jj2=(double)1e+100)", harness.get_event(11));
+ CHECK_EQ("B:v8.v8.Test.jj3(jj3=(double)nan)", harness.get_event(12));
+ CHECK_EQ("B:v8.v8.Test.jj4(jj4=(double)inf)", harness.get_event(13));
+ CHECK_EQ("B:v8.v8.Test.jj5(jj5=(double)-inf)", harness.get_event(14));
+
+ std::ostringstream pointer_stream;
+ pointer_stream << "B:v8.v8.Test.kk(kk=(pointer)" << &aa << ")";
+ CHECK_EQ(pointer_stream.str().c_str(), harness.get_event(15));
+
+ CHECK_EQ("B:v8.v8.Test.ll(ll=(string)100)", harness.get_event(16));
+ CHECK_EQ("B:v8.v8.Test.mm(mm=(string)\"INIT\")", harness.get_event(17));
+ CHECK_EQ("B:v8.v8.Test2.1(aa=(uint)11,ll=(string)100)",
+ harness.get_event(18));
+ CHECK_EQ("B:v8.v8.Test2.2(mm1=(string)INIT,mm2=(string)\"INIT\")",
+ harness.get_event(19));
+ CHECK_EQ("I:v8.INIT", harness.get_event(20));
+ CHECK_EQ("I:v8.INIT(mm1=(string)INIT,mm2=(string)\"INIT\")",
+ harness.get_event(21));
+ CHECK_EQ("I:v8.v8.Test(a1=(json){\"value\":42})", harness.get_event(22));
+ CHECK_EQ(
+ "I:v8.v8.Test(a1=(json){\"value\":\"string\"},a2=(json){\"value\":1.23})",
+ harness.get_event(23));
+
+ // Check the terminating end events.
+ for (size_t i = 0; i < 20; i++) CHECK_EQ("E:.", harness.get_event(24 + i));
}
TEST(JsonIntegrationTest) {
@@ -895,84 +866,6 @@ TEST(JsonIntegrationTest) {
CHECK_EQ("\"4\":\"-Infinity\"", all_args[3]);
}
-TEST(TracingPerfetto) {
- ::perfetto::TraceConfig perfetto_trace_config;
- perfetto_trace_config.add_buffers()->set_size_kb(4096);
- auto* ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
- ds_config->set_name("v8.trace_events");
-
- perfetto::DataSourceDescriptor dsd;
- dsd.set_name("v8.trace_events");
- TestDataSource::Register(dsd);
-
- auto tracing_session_ =
- perfetto::Tracing::NewTrace(perfetto::BackendType::kInProcessBackend);
- tracing_session_->Setup(perfetto_trace_config);
- tracing_session_->StartBlocking();
-
- for (int i = 0; i < 15; i++) {
- TestDataSource::Trace([&](TestDataSource::TraceContext ctx) {
- auto packet = ctx.NewTracePacket();
- auto* trace_event_bundle = packet->set_chrome_events();
- auto* trace_event = trace_event_bundle->add_trace_events();
-
- trace_event->set_phase('c');
- trace_event->set_thread_id(v8::base::OS::GetCurrentThreadId());
- trace_event->set_timestamp(123);
- trace_event->set_process_id(v8::base::OS::GetCurrentProcessId());
- trace_event->set_thread_timestamp(123);
- });
- }
- tracing_session_->StopBlocking();
-
- std::ostringstream perfetto_json_stream_;
-
- {
- v8::platform::tracing::JSONTraceEventListener json_listener_(
- &perfetto_json_stream_);
-
- std::vector<char> trace = tracing_session_->ReadTraceBlocking();
- json_listener_.ParseFromArray(trace);
- }
-
- printf("%s\n", perfetto_json_stream_.str().c_str());
- CHECK_GT(perfetto_json_stream_.str().length(), 0);
-}
-
-TEST(StartAndStopRepeated) {
- for (int i = 0; i < 3; i++) {
- ::perfetto::TraceConfig perfetto_trace_config;
- perfetto_trace_config.add_buffers()->set_size_kb(4096);
- auto* ds_config =
- perfetto_trace_config.add_data_sources()->mutable_config();
- ds_config->set_name("v8.trace_events");
-
- perfetto::DataSourceDescriptor dsd;
- dsd.set_name("v8.trace_events");
- TestDataSource::Register(dsd);
-
- auto tracing_session_ =
- perfetto::Tracing::NewTrace(perfetto::BackendType::kInProcessBackend);
- tracing_session_->Setup(perfetto_trace_config);
- tracing_session_->StartBlocking();
-
- for (int i = 0; i < 15; i++) {
- TestDataSource::Trace([&](TestDataSource::TraceContext ctx) {
- auto packet = ctx.NewTracePacket();
- auto* trace_event_bundle = packet->set_chrome_events();
- auto* trace_event = trace_event_bundle->add_trace_events();
-
- trace_event->set_phase('c');
- trace_event->set_thread_id(v8::base::OS::GetCurrentThreadId());
- trace_event->set_timestamp(123);
- trace_event->set_process_id(v8::base::OS::GetCurrentProcessId());
- trace_event->set_thread_timestamp(123);
- });
- }
- tracing_session_->StopBlocking();
- }
-}
-
#endif // V8_USE_PERFETTO
} // namespace tracing
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index da5f744f33..7890170de8 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -707,9 +707,13 @@ TEST(PreParserScopeAnalysis) {
shared->uncompiled_data_with_preparse_data().preparse_data(),
isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared);
+ flags.set_is_lazy_compile(true);
+
// Parse the lazy function using the scope data.
- i::ParseInfo using_scope_data(isolate, *shared);
- using_scope_data.set_lazy_compile();
+ i::UnoptimizedCompileState using_scope_state(isolate);
+ i::ParseInfo using_scope_data(isolate, flags, &using_scope_state);
using_scope_data.set_consumed_preparse_data(
i::ConsumedPreparseData::For(isolate, produced_data_on_heap));
CHECK(i::parsing::ParseFunction(&using_scope_data, shared, isolate));
@@ -720,12 +724,9 @@ TEST(PreParserScopeAnalysis) {
CHECK(i::ScopeTestHelper::HasSkippedFunctionInside(
scope_with_skipped_functions));
- // Do scope allocation (based on the preparsed scope data).
- CHECK(i::DeclarationScope::Analyze(&using_scope_data));
-
// Parse the lazy function again eagerly to produce baseline data.
- i::ParseInfo not_using_scope_data(isolate, *shared);
- not_using_scope_data.set_lazy_compile();
+ i::UnoptimizedCompileState not_using_scope_state(isolate);
+ i::ParseInfo not_using_scope_data(isolate, flags, &not_using_scope_state);
CHECK(i::parsing::ParseFunction(&not_using_scope_data, shared, isolate));
// Verify that we didn't skip anything (there's no preparsed scope data,
@@ -735,9 +736,6 @@ TEST(PreParserScopeAnalysis) {
CHECK(!i::ScopeTestHelper::HasSkippedFunctionInside(
scope_without_skipped_functions));
- // Do normal scope allocation.
- CHECK(i::DeclarationScope::Analyze(&not_using_scope_data));
-
// Verify that scope allocation gave the same results when parsing w/ the
// scope data (and skipping functions), and when parsing without.
i::ScopeTestHelper::CompareScopes(
@@ -759,7 +757,10 @@ TEST(Regress753896) {
i::Handle<i::String> source = factory->InternalizeUtf8String(
"function lazy() { let v = 0; if (true) { var v = 0; } }");
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
+ i::UnoptimizedCompileState state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ i::ParseInfo info(isolate, flags, &state);
// We don't assert that parsing succeeded or that it failed; currently the
// error is not detected inside lazy functions, but it might be in the future.
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index 9451d61d89..7fb670e017 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -7,6 +7,7 @@
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "test/cctest/cctest.h"
@@ -34,8 +35,9 @@ struct ScannerTestHelper {
ScannerTestHelper make_scanner(const char* src) {
ScannerTestHelper helper;
helper.stream = ScannerStream::ForTesting(src);
- helper.scanner =
- std::unique_ptr<Scanner>(new Scanner(helper.stream.get(), false));
+ helper.scanner = std::unique_ptr<Scanner>(
+ new Scanner(helper.stream.get(),
+ UnoptimizedCompileFlags::ForTest(CcTest::i_isolate())));
helper.scanner->Initialize();
return helper;
}
diff --git a/deps/v8/test/cctest/test-api-icu.cc b/deps/v8/test/cctest/test-api-icu.cc
new file mode 100644
index 0000000000..c5e617fdd2
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-icu.cc
@@ -0,0 +1,57 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef V8_INTL_SUPPORT
+
+#include <stdlib.h>
+
+#include "include/v8.h"
+#include "src/objects/objects-inl.h"
+#include "test/cctest/cctest.h"
+#include "unicode/locid.h"
+
+namespace {
+void CheckLocaleSpecificValues(const char* locale, const char* date,
+ const char* number) {
+ CHECK(v8_str(locale)->StrictEquals(
+ CompileRun("Intl.NumberFormat().resolvedOptions().locale")));
+ CHECK(v8_str(date)->StrictEquals(
+ CompileRun("new Date('02/14/2020 13:45').toLocaleString()")));
+ CHECK(v8_str(number)->StrictEquals(
+ CompileRun("Number(10000.3).toLocaleString()")));
+}
+
+void SetIcuLocale(const char* locale_name) {
+ UErrorCode error_code = U_ZERO_ERROR;
+ icu::Locale locale(locale_name);
+ icu::Locale::setDefault(locale, error_code);
+ CHECK(U_SUCCESS(error_code));
+}
+} // namespace
+
+TEST(LocaleConfigurationChangeNotification) {
+ icu::Locale default_locale = icu::Locale::getDefault();
+
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ SetIcuLocale("en_US");
+ isolate->LocaleConfigurationChangeNotification();
+ CheckLocaleSpecificValues("en-US", "2/14/2020, 1:45:00 PM", "10,000.3");
+
+ SetIcuLocale("ru_RU");
+ isolate->LocaleConfigurationChangeNotification();
+ CheckLocaleSpecificValues("ru-RU", "14.02.2020, 13:45:00", "10Ā 000,3");
+
+ SetIcuLocale("zh_CN");
+ isolate->LocaleConfigurationChangeNotification();
+ CheckLocaleSpecificValues("zh-CN", "2020/2/14 äø‹åˆ1:45:00", "10,000.3");
+
+ UErrorCode error_code = U_ZERO_ERROR;
+ icu::Locale::setDefault(default_locale, error_code);
+ CHECK(U_SUCCESS(error_code));
+}
+
+#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/test/cctest/test-api-wasm.cc b/deps/v8/test/cctest/test-api-wasm.cc
index 4dc84254bc..eb49d5668a 100644
--- a/deps/v8/test/cctest/test-api-wasm.cc
+++ b/deps/v8/test/cctest/test-api-wasm.cc
@@ -130,11 +130,16 @@ TEST(WasmStreamingAbortWithoutReject) {
namespace {
bool wasm_threads_enabled_value = false;
+bool wasm_simd_enabled_value = false;
bool MockWasmThreadsEnabledCallback(v8::Local<v8::Context>) {
return wasm_threads_enabled_value;
}
+bool MockWasmSimdEnabledCallback(v8::Local<v8::Context>) {
+ return wasm_simd_enabled_value;
+}
+
} // namespace
TEST(TestSetWasmThreadsEnabledCallback) {
@@ -165,3 +170,32 @@ TEST(TestSetWasmThreadsEnabledCallback) {
i::FLAG_experimental_wasm_threads = false;
CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
}
+
+TEST(TestSetWasmSimdEnabledCallback) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ i::Handle<i::Context> i_context = v8::Utils::OpenHandle(*context);
+
+ // {Isolate::IsWasmSimdEnabled} calls the callback set by the embedder if
+ // such a callback exists. Otherwise it returns
+ // {FLAG_experimental_wasm_simd}. First we test that the flag is returned
+ // correctly if no callback is set. Then we test that the flag is ignored if
+ // the callback is set.
+
+ i::FLAG_experimental_wasm_simd = false;
+ CHECK(!i_isolate->IsWasmSimdEnabled(i_context));
+
+ i::FLAG_experimental_wasm_simd = true;
+ CHECK(i_isolate->IsWasmSimdEnabled(i_context));
+
+ isolate->SetWasmSimdEnabledCallback(MockWasmSimdEnabledCallback);
+ wasm_simd_enabled_value = false;
+ CHECK(!i_isolate->IsWasmSimdEnabled(i_context));
+
+ wasm_simd_enabled_value = true;
+ i::FLAG_experimental_wasm_simd = false;
+ CHECK(i_isolate->IsWasmSimdEnabled(i_context));
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 191bb63b8e..734f52eb5f 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -1075,20 +1075,23 @@ template<typename Constructor, typename Accessor>
static void TestFunctionTemplateAccessor(Constructor constructor,
Accessor accessor) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(env->GetIsolate(), constructor);
- fun_templ->SetClassName(v8_str("funky"));
+ v8::FunctionTemplate::New(isolate, constructor);
+ fun_templ->PrototypeTemplate()->Set(
+ v8::Symbol::GetToStringTag(isolate), v8_str("funky"),
+ static_cast<v8::PropertyAttribute>(v8::ReadOnly | v8::DontEnum));
fun_templ->InstanceTemplate()->SetAccessor(v8_str("m"), accessor);
+
Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
- Local<Value> result =
- v8_compile("(new obj()).toString()")->Run(env.local()).ToLocalChecked();
+ Local<Value> result = CompileRun("(new obj()).toString()");
CHECK(v8_str("[object funky]")->Equals(env.local(), result).FromJust());
CompileRun("var obj_instance = new obj();");
- Local<Script> script;
- script = v8_compile("obj_instance.x");
+
+ Local<Script> script = v8_compile("obj_instance.x");
for (int i = 0; i < 30; i++) {
CHECK_EQ(1, v8_run_int32value(script));
}
@@ -12633,22 +12636,20 @@ THREADED_TEST(NewTargetHandler) {
}
THREADED_TEST(ObjectProtoToString) {
+ LocalContext context;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("MyClass"));
- LocalContext context;
Local<String> customized_tostring = v8_str("customized toString");
// Replace Object.prototype.toString
- v8_compile(
- "Object.prototype.toString = function() {"
- " return 'customized toString';"
- "}")
- ->Run(context.local())
- .ToLocalChecked();
+ CompileRun(R"(
+ Object.prototype.toString = function() {
+ return 'customized toString';
+ })");
// Normal ToString call should call replaced Object.prototype.toString
Local<v8::Object> instance = templ->GetFunction(context.local())
@@ -12659,10 +12660,11 @@ THREADED_TEST(ObjectProtoToString) {
CHECK(value->IsString() &&
value->Equals(context.local(), customized_tostring).FromJust());
- // ObjectProtoToString should not call replace toString function.
+ // ObjectProtoToString should not call replace toString function. It should
+ // not look at the class name either.
value = instance->ObjectProtoToString(context.local()).ToLocalChecked();
CHECK(value->IsString() &&
- value->Equals(context.local(), v8_str("[object MyClass]")).FromJust());
+ value->Equals(context.local(), v8_str("[object Object]")).FromJust());
// Check global
value =
@@ -12671,8 +12673,7 @@ THREADED_TEST(ObjectProtoToString) {
value->Equals(context.local(), v8_str("[object Object]")).FromJust());
// Check ordinary object
- Local<Value> object =
- v8_compile("new Object()")->Run(context.local()).ToLocalChecked();
+ Local<Value> object = CompileRun("new Object()");
value = object.As<v8::Object>()
->ObjectProtoToString(context.local())
.ToLocalChecked();
@@ -12685,16 +12686,23 @@ TEST(ObjectProtoToStringES6) {
LocalContext context;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
+
+ // Check that ES6 semantics using @@toStringTag work.
+ Local<v8::Symbol> toStringTag = v8::Symbol::GetToStringTag(isolate);
+
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("MyClass"));
+ templ->PrototypeTemplate()->Set(
+ toStringTag, v8_str("MyClassToStringTag"),
+ static_cast<v8::PropertyAttribute>(v8::ReadOnly | v8::DontEnum));
Local<String> customized_tostring = v8_str("customized toString");
// Replace Object.prototype.toString
- CompileRun(
- "Object.prototype.toString = function() {"
- " return 'customized toString';"
- "}");
+ CompileRun(R"(
+ Object.prototype.toString = function() {
+ return 'customized toString';
+ })");
// Normal ToString call should call replaced Object.prototype.toString
Local<v8::Object> instance = templ->GetFunction(context.local())
@@ -12705,27 +12713,14 @@ TEST(ObjectProtoToStringES6) {
CHECK(value->IsString() &&
value->Equals(context.local(), customized_tostring).FromJust());
- // ObjectProtoToString should not call replace toString function.
+ // ObjectProtoToString should not call replace toString function. Instead it
+ // should look at the @@toStringTag property.
value = instance->ObjectProtoToString(context.local()).ToLocalChecked();
CHECK(value->IsString() &&
- value->Equals(context.local(), v8_str("[object MyClass]")).FromJust());
-
- // Check global
- value =
- context->Global()->ObjectProtoToString(context.local()).ToLocalChecked();
- CHECK(value->IsString() &&
- value->Equals(context.local(), v8_str("[object Object]")).FromJust());
-
- // Check ordinary object
- Local<Value> object = CompileRun("new Object()");
- value = object.As<v8::Object>()
- ->ObjectProtoToString(context.local())
- .ToLocalChecked();
- CHECK(value->IsString() &&
- value->Equals(context.local(), v8_str("[object Object]")).FromJust());
+ value->Equals(context.local(), v8_str("[object MyClassToStringTag]"))
+ .FromJust());
- // Check that ES6 semantics using @@toStringTag work
- Local<v8::Symbol> toStringTag = v8::Symbol::GetToStringTag(isolate);
+ Local<Value> object;
#define TEST_TOSTRINGTAG(type, tag, expected) \
do { \
@@ -16357,18 +16352,7 @@ TEST(PromiseHook) {
CHECK_EQ(v8::Promise::kPending, GetPromise("p")->State());
CompileRun("resolve(Promise.resolve(value));\n");
CHECK_EQ(v8::Promise::kFulfilled, GetPromise("p")->State());
- CHECK_EQ(11, promise_hook_data->promise_hook_count);
-
- promise_hook_data->Reset();
- source =
- "var p = Promise.resolve({\n"
- " then(r) {\n"
- " r();\n"
- " }\n"
- "});";
- CompileRun(source);
- CHECK_EQ(GetPromise("p")->State(), v8::Promise::kFulfilled);
- CHECK_EQ(promise_hook_data->promise_hook_count, 5);
+ CHECK_EQ(9, promise_hook_data->promise_hook_count);
delete promise_hook_data;
isolate->SetPromiseHook(nullptr);
@@ -19367,6 +19351,52 @@ TEST(ModifyCodeGenFromStrings) {
try_catch.Reset();
}
+v8::ModifyCodeGenerationFromStringsResult RejectStringsIncrementNumbers(
+ Local<Context> context, Local<Value> source) {
+ if (source->IsString()) {
+ return {false, v8::MaybeLocal<String>()};
+ }
+
+ Local<v8::Number> number;
+ if (!source->ToNumber(context).ToLocal(&number)) {
+ return {true, v8::MaybeLocal<String>()};
+ }
+
+ Local<v8::String> incremented =
+ String::NewFromUtf8(context->GetIsolate(),
+ std::to_string(number->Value() + 1).c_str(),
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+
+ return {true, incremented};
+}
+
+TEST(AllowFromStringsOrModifyCodegen) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ context->GetIsolate()->SetModifyCodeGenerationFromStringsCallback(
+ &RejectStringsIncrementNumbers);
+
+ context->AllowCodeGenerationFromStrings(false);
+
+ TryCatch try_catch(CcTest::isolate());
+ Local<Value> result = CompileRun("eval('40+2')");
+ CHECK(result.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+
+ result = CompileRun("eval(42)");
+ CHECK_EQ(43, result->Int32Value(context.local()).FromJust());
+
+ context->AllowCodeGenerationFromStrings(true);
+
+ result = CompileRun("eval('40+2')");
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
+
+ result = CompileRun("eval(42)");
+ CHECK_EQ(43, result->Int32Value(context.local()).FromJust());
+}
+
TEST(SetErrorMessageForCodeGenFromStrings) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -24464,8 +24494,6 @@ TEST(TurboAsmDisablesDetach) {
"}"
"var buffer = new ArrayBuffer(4096);"
"var module = Module(this, {}, buffer);"
- "%PrepareFunctionForOptimization(module.load);"
- "%OptimizeFunctionOnNextCall(module.load);"
"module.load();"
"buffer";
@@ -24481,8 +24509,6 @@ TEST(TurboAsmDisablesDetach) {
"}"
"var buffer = new ArrayBuffer(4096);"
"var module = Module(this, {}, buffer);"
- "%PrepareFunctionForOptimization(module.store);"
- "%OptimizeFunctionOnNextCall(module.store);"
"module.store();"
"buffer";
@@ -25734,7 +25760,6 @@ void HostInitializeImportMetaObjectCallbackStatic(Local<Context> context,
Local<Module> module,
Local<Object> meta) {
CHECK(!module.IsEmpty());
-
meta->CreateDataProperty(context, v8_str("foo"), v8_str("bar")).ToChecked();
}
@@ -25758,10 +25783,9 @@ TEST(ImportMeta) {
v8::ScriptCompiler::Source source(source_text, origin);
Local<Module> module =
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- i::Handle<i::Object> meta =
- i_isolate->RunHostInitializeImportMetaObjectCallback(
- i::Handle<i::SourceTextModule>::cast(v8::Utils::OpenHandle(*module)));
- CHECK(meta->IsJSObject());
+ i::Handle<i::JSObject> meta = i::SourceTextModule::GetImportMeta(
+ i_isolate,
+ i::Handle<i::SourceTextModule>::cast(v8::Utils::OpenHandle(*module)));
Local<Object> meta_obj = Local<Object>::Cast(v8::Utils::ToLocal(meta));
CHECK(meta_obj->Get(context.local(), v8_str("foo"))
.ToLocalChecked()
@@ -26890,8 +26914,8 @@ static void CallIsolate2(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context>::New(isolate_2, context_2);
v8::Context::Scope context_scope(context);
reinterpret_cast<i::Isolate*>(isolate_2)->heap()->CollectAllGarbage(
- i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
- v8::kGCCallbackFlagForced);
+ i::Heap::kForcedGC, i::GarbageCollectionReason::kTesting,
+ v8::kNoGCCallbackFlags);
CompileRun("f2() //# sourceURL=isolate2b");
}
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 755041e0fb..eaad3180a5 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -2686,9 +2686,8 @@ TEST(AllocateFunctionWithMapAndContext) {
m.NewJSPromise(context, m.UndefinedConstant());
TNode<Context> promise_context = m.CreatePromiseResolvingFunctionsContext(
context, promise, m.BooleanConstant(false), native_context);
- TNode<Object> resolve_info = m.LoadContextElement(
- native_context,
- Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
+ TNode<Object> resolve_info =
+ m.PromiseCapabilityDefaultResolveSharedFunConstant();
const TNode<Object> map = m.LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
const TNode<JSFunction> resolve = m.AllocateFunctionWithMapAndContext(
@@ -2705,9 +2704,11 @@ TEST(AllocateFunctionWithMapAndContext) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_fixed_array(), fun->elements());
CHECK_EQ(isolate->heap()->many_closures_cell(), fun->raw_feedback_cell());
CHECK(!fun->has_prototype_slot());
- CHECK_EQ(*isolate->promise_capability_default_resolve_shared_fun(),
+ CHECK_EQ(*isolate->factory()->promise_capability_default_resolve_shared_fun(),
fun->shared());
- CHECK_EQ(isolate->promise_capability_default_resolve_shared_fun()->GetCode(),
+ CHECK_EQ(isolate->factory()
+ ->promise_capability_default_resolve_shared_fun()
+ ->GetCode(),
fun->code());
}
@@ -2769,10 +2770,12 @@ TEST(NewPromiseCapability) {
CHECK(result->promise().IsJSPromise());
CHECK(result->resolve().IsJSFunction());
CHECK(result->reject().IsJSFunction());
- CHECK_EQ(*isolate->promise_capability_default_reject_shared_fun(),
- JSFunction::cast(result->reject()).shared());
- CHECK_EQ(*isolate->promise_capability_default_resolve_shared_fun(),
- JSFunction::cast(result->resolve()).shared());
+ CHECK_EQ(
+ *isolate->factory()->promise_capability_default_reject_shared_fun(),
+ JSFunction::cast(result->reject()).shared());
+ CHECK_EQ(
+ *isolate->factory()->promise_capability_default_resolve_shared_fun(),
+ JSFunction::cast(result->resolve()).shared());
Handle<JSFunction> callbacks[] = {
handle(JSFunction::cast(result->resolve()), isolate),
@@ -3710,6 +3713,284 @@ TEST(InstructionSchedulingCallerSavedRegisters) {
FLAG_turbo_instruction_scheduling = old_turbo_instruction_scheduling;
}
+TEST(WasmInt32ToHeapNumber) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ int32_t test_values[] = {
+ // Smi values.
+ 1,
+ 0,
+ -1,
+ kSmiMaxValue,
+ kSmiMinValue,
+ // Test integers that can't be Smis (only possible if Smis are 31 bits).
+#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ kSmiMaxValue + 1,
+ kSmiMinValue - 1,
+#endif
+ };
+
+ // FunctionTester can't handle Wasm type arguments, so for each test value,
+ // build a function with the arguments baked in, then generate a no-argument
+ // function to call.
+ const int kNumParams = 1;
+ for (size_t i = 0; i < arraysize(test_values); ++i) {
+ int32_t test_value = test_values[i];
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+ Node* context = m.Parameter(kNumParams + 1);
+ const TNode<Int32T> arg = m.Int32Constant(test_value);
+ const TNode<Object> call_result =
+ m.CallBuiltin(Builtins::kWasmInt32ToHeapNumber, context, arg);
+ m.Return(call_result);
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ CHECK(result->IsNumber());
+ Handle<Object> expected(isolate->factory()->NewNumber(test_value));
+ CHECK(result->StrictEquals(*expected));
+ }
+}
+
+int32_t NumberToInt32(Handle<Object> number) {
+ if (number->IsSmi()) {
+ return Smi::ToInt(*number);
+ }
+ if (number->IsHeapNumber()) {
+ double num = HeapNumber::cast(*number).value();
+ return DoubleToInt32(num);
+ }
+ UNREACHABLE();
+}
+
+TEST(WasmTaggedNonSmiToInt32) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<Object> test_values[] = {
+ // No Smis here; the builtin can't handle them correctly.
+ factory->NewNumber(-0.0),
+ factory->NewNumber(1.5),
+ factory->NewNumber(-1.5),
+ factory->NewNumber(2 * static_cast<double>(kSmiMaxValue)),
+ factory->NewNumber(2 * static_cast<double>(kSmiMinValue)),
+ factory->NewNumber(std::numeric_limits<double>::infinity()),
+ factory->NewNumber(-std::numeric_limits<double>::infinity()),
+ factory->NewNumber(-std::numeric_limits<double>::quiet_NaN()),
+ };
+
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+ Node* context = m.Parameter(kNumParams + 2);
+ const TNode<Object> arg = m.CAST(m.Parameter(0));
+ int32_t result = 0;
+ Node* base = m.IntPtrConstant(reinterpret_cast<intptr_t>(&result));
+ Node* value = m.CallBuiltin(Builtins::kWasmTaggedNonSmiToInt32, context, arg);
+ m.StoreNoWriteBarrier(MachineRepresentation::kWord32, base, value);
+ m.Return(m.UndefinedConstant());
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ for (size_t i = 0; i < arraysize(test_values); ++i) {
+ Handle<Object> test_value = test_values[i];
+ ft.Call(test_value);
+ int32_t expected = NumberToInt32(test_value);
+ CHECK_EQ(result, expected);
+ }
+}
+
+TEST(WasmFloat32ToNumber) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ float test_values[] = {
+ // Smi values.
+ 1,
+ 0,
+ -1,
+ // Max and min Smis can't be represented as floats.
+ // Non-Smi values.
+ -0.0,
+ 1.5,
+ std::numeric_limits<float>::quiet_NaN(),
+ std::numeric_limits<float>::infinity(),
+ };
+
+ // FunctionTester can't handle Wasm type arguments, so for each test value,
+ // build a function with the arguments baked in, then generate a no-argument
+ // function to call.
+ const int kNumParams = 1;
+ for (size_t i = 0; i < arraysize(test_values); ++i) {
+ double test_value = test_values[i];
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+ Node* context = m.Parameter(kNumParams + 1);
+ const TNode<Float32T> arg = m.Float32Constant(test_value);
+ const TNode<Object> call_result =
+ m.CallBuiltin(Builtins::kWasmFloat32ToNumber, context, arg);
+ m.Return(call_result);
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ CHECK(result->IsNumber());
+ Handle<Object> expected(isolate->factory()->NewNumber(test_value));
+ CHECK(result->StrictEquals(*expected) ||
+ (std::isnan(test_value) && std::isnan(result->Number())));
+ CHECK_EQ(result->IsSmi(), expected->IsSmi());
+ }
+}
+
+TEST(WasmFloat64ToNumber) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ double test_values[] = {
+ // Smi values.
+ 1,
+ 0,
+ -1,
+ kSmiMaxValue,
+ kSmiMinValue,
+ // Non-Smi values.
+ -0.0,
+ 1.5,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity(),
+ };
+
+ // FunctionTester can't handle Wasm type arguments, so for each test value,
+ // build a function with the arguments baked in, then generate a no-argument
+ // function to call.
+ const int kNumParams = 1;
+ for (size_t i = 0; i < arraysize(test_values); ++i) {
+ double test_value = test_values[i];
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+ Node* context = m.Parameter(kNumParams + 1);
+ const TNode<Float64T> arg = m.Float64Constant(test_value);
+ const TNode<Object> call_result =
+ m.CallBuiltin(Builtins::kWasmFloat64ToNumber, context, arg);
+ m.Return(call_result);
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ CHECK(result->IsNumber());
+ Handle<Object> expected(isolate->factory()->NewNumber(test_value));
+ CHECK(result->StrictEquals(*expected) ||
+ (std::isnan(test_value) && std::isnan(result->Number())));
+ CHECK_EQ(result->IsSmi(), expected->IsSmi());
+ }
+}
+
+double NumberToFloat64(Handle<Object> number) {
+ if (number->IsSmi()) {
+ return Smi::ToInt(*number);
+ }
+ if (number->IsHeapNumber()) {
+ return HeapNumber::cast(*number).value();
+ }
+ UNREACHABLE();
+}
+
+TEST(WasmTaggedToFloat64) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ Handle<Object> test_values[] = {
+ // Smi values.
+ handle(Smi::FromInt(1), isolate),
+ handle(Smi::FromInt(0), isolate),
+ handle(Smi::FromInt(-1), isolate),
+ handle(Smi::FromInt(kSmiMaxValue), isolate),
+ handle(Smi::FromInt(kSmiMinValue), isolate),
+ // Test some non-Smis.
+ factory->NewNumber(-0.0),
+ factory->NewNumber(1.5),
+ factory->NewNumber(-1.5),
+// Integer Overflows on platforms with 32 bit Smis.
+#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
+ factory->NewNumber(2 * kSmiMaxValue),
+ factory->NewNumber(2 * kSmiMinValue),
+#endif
+ factory->NewNumber(std::numeric_limits<double>::infinity()),
+ factory->NewNumber(-std::numeric_limits<double>::infinity()),
+ factory->NewNumber(-std::numeric_limits<double>::quiet_NaN()),
+ };
+
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+ Node* context = m.Parameter(kNumParams + 2);
+ const TNode<Object> arg = m.CAST(m.Parameter(0));
+ double result = 0;
+ Node* base = m.IntPtrConstant(reinterpret_cast<intptr_t>(&result));
+ Node* value = m.CallBuiltin(Builtins::kWasmTaggedToFloat64, context, arg);
+ m.StoreNoWriteBarrier(MachineRepresentation::kFloat64, base, value);
+ m.Return(m.UndefinedConstant());
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ for (size_t i = 0; i < arraysize(test_values); ++i) {
+ Handle<Object> test_value = test_values[i];
+ ft.Call(test_value);
+ double expected = NumberToFloat64(test_value);
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(result));
+ } else {
+ CHECK_EQ(result, expected);
+ }
+ }
+}
+
+TEST(SmiUntagLeftShiftOptimization) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ {
+ TNode<TaggedIndex> param =
+ TNode<TaggedIndex>::UncheckedCast(m.Parameter(0));
+ TNode<WordT> unoptimized =
+ m.IntPtrMul(m.TaggedIndexToIntPtr(param), m.IntPtrConstant(8));
+ TNode<WordT> optimized = m.WordShl(
+ m.BitcastTaggedToWordForTagAndSmiBits(param), 3 - kSmiTagSize);
+ m.StaticAssert(m.WordEqual(unoptimized, optimized));
+ m.Return(m.UndefinedConstant());
+ }
+
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
+}
+
+TEST(SmiUntagComparisonOptimization) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ {
+ TNode<Smi> a = TNode<Smi>::UncheckedCast(m.Parameter(0));
+ TNode<Smi> b = TNode<Smi>::UncheckedCast(m.Parameter(1));
+ TNode<BoolT> unoptimized = m.UintPtrLessThan(m.SmiUntag(a), m.SmiUntag(b));
+#ifdef V8_COMPRESS_POINTERS
+ TNode<BoolT> optimized = m.Uint32LessThan(
+ m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(a)),
+ m.TruncateIntPtrToInt32(m.BitcastTaggedToWordForTagAndSmiBits(b)));
+#else
+ TNode<BoolT> optimized =
+ m.UintPtrLessThan(m.BitcastTaggedToWordForTagAndSmiBits(a),
+ m.BitcastTaggedToWordForTagAndSmiBits(b));
+#endif
+ m.StaticAssert(m.Word32Equal(unoptimized, optimized));
+ m.Return(m.UndefinedConstant());
+ }
+
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index 3d063ac770..c9f5bcfc18 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -25,7 +25,7 @@ const int kReach = 1 << kReachBits;
TEST(ConstantPoolPointers) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
- const int kRegularCount = kReach / kPointerSize;
+ const int kRegularCount = kReach / kSystemPointerSize;
ConstantPoolEntry::Access access;
int pos = 0;
intptr_t value = 0;
@@ -67,8 +67,9 @@ TEST(ConstantPoolDoubles) {
TEST(ConstantPoolMixedTypes) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
- const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
- ((kPointerSize < kDoubleSize) ? 1 : 0));
+ const int kRegularCount =
+ (((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
+ ((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Type type = kPtrType;
ConstantPoolEntry::Access access;
int pos = 0;
@@ -103,11 +104,11 @@ TEST(ConstantPoolMixedReach) {
const int ptrReach = 1 << ptrReachBits;
const int dblReachBits = kReachBits;
const int dblReach = kReach;
- const int dblRegularCount =
- Min(dblReach / kDoubleSize, ptrReach / (kDoubleSize + kPointerSize));
+ const int dblRegularCount = Min(
+ dblReach / kDoubleSize, ptrReach / (kDoubleSize + kSystemPointerSize));
const int ptrRegularCount =
- ((ptrReach - (dblRegularCount * (kDoubleSize + kPointerSize))) /
- kPointerSize) +
+ ((ptrReach - (dblRegularCount * (kDoubleSize + kSystemPointerSize))) /
+ kSystemPointerSize) +
dblRegularCount;
ConstantPoolBuilder builder(ptrReachBits, dblReachBits);
ConstantPoolEntry::Access access;
@@ -152,8 +153,9 @@ TEST(ConstantPoolMixedReach) {
TEST(ConstantPoolSharing) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
- const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
- ((kPointerSize < kDoubleSize) ? 1 : 0));
+ const int kRegularCount =
+ (((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
+ ((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Access access;
CHECK(builder.IsEmpty());
@@ -201,8 +203,9 @@ TEST(ConstantPoolSharing) {
TEST(ConstantPoolNoSharing) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
- const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
- ((kPointerSize < kDoubleSize) ? 1 : 0));
+ const int kRegularCount =
+ (((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
+ ((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Access access;
CHECK(builder.IsEmpty());
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index b329a841a6..de1c342754 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -39,6 +39,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/spaces.h"
#include "src/libplatform/default-platform.h"
+#include "src/libsampler/sampler.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
@@ -54,7 +55,6 @@
#include "src/tracing/trace-event.h"
#ifdef V8_USE_PERFETTO
-#include "protos/perfetto/trace/chrome/chrome_trace_event.pb.h"
#include "protos/perfetto/trace/trace.pb.h"
#endif
@@ -2625,22 +2625,34 @@ using v8::platform::tracing::TraceObject;
namespace {
#ifdef V8_USE_PERFETTO
-
class CpuProfilerListener : public platform::tracing::TraceEventListener {
public:
void ProcessPacket(const ::perfetto::protos::TracePacket& packet) {
- for (const ::perfetto::protos::ChromeTraceEvent& trace_event :
- packet.chrome_events().trace_events()) {
- if (trace_event.name() != std::string("Profile") &&
- trace_event.name() != std::string("ProfileChunk"))
- return;
- CHECK(!profile_id_ || trace_event.id() == profile_id_);
- CHECK_EQ(1, trace_event.args_size());
- CHECK(trace_event.args()[0].has_json_value());
- profile_id_ = trace_event.id();
- result_json_ += result_json_.empty() ? "[" : ",\n";
- result_json_ += trace_event.args()[0].json_value();
+ auto& seq_state = sequence_state_[packet.trusted_packet_sequence_id()];
+ if (packet.incremental_state_cleared()) seq_state = SequenceState{};
+
+ if (!packet.has_track_event()) return;
+
+ // Update incremental state.
+ if (packet.has_interned_data()) {
+ const auto& interned_data = packet.interned_data();
+ for (const auto& it : interned_data.event_names()) {
+ CHECK_EQ(seq_state.event_names_.find(it.iid()),
+ seq_state.event_names_.end());
+ seq_state.event_names_[it.iid()] = it.name();
+ }
}
+ const auto& track_event = packet.track_event();
+ auto name = seq_state.event_names_[track_event.name_iid()];
+ if (name != "Profile" && name != "ProfileChunk") return;
+
+ CHECK_EQ(1, track_event.debug_annotations_size());
+ CHECK(track_event.debug_annotations()[0].has_legacy_json_value());
+ CHECK(!profile_id_ ||
+ track_event.legacy_event().unscoped_id() == profile_id_);
+ profile_id_ = track_event.legacy_event().unscoped_id();
+ result_json_ += result_json_.empty() ? "[" : ",\n";
+ result_json_ += track_event.debug_annotations()[0].legacy_json_value();
}
const std::string& result_json() {
@@ -2650,11 +2662,17 @@ class CpuProfilerListener : public platform::tracing::TraceEventListener {
void Reset() {
result_json_.clear();
profile_id_ = 0;
+ sequence_state_.clear();
}
private:
std::string result_json_;
uint64_t profile_id_ = 0;
+
+ struct SequenceState {
+ std::map<uint64_t, std::string> event_names_;
+ };
+ std::map<uint32_t, SequenceState> sequence_state_;
};
#else
@@ -2732,6 +2750,9 @@ TEST(TracingCpuProfiler) {
tracing_controller->StartTracing(trace_config);
CompileRun(test_code.c_str());
+#ifdef V8_USE_PERFETTO
+ TrackEvent::Flush();
+#endif
tracing_controller->StopTracing();
#ifdef V8_USE_PERFETTO
@@ -2757,9 +2778,11 @@ TEST(TracingCpuProfiler) {
->IsTrue();
}
+#ifndef V8_USE_PERFETTO
static_cast<v8::platform::tracing::TracingController*>(
i::V8::GetCurrentPlatform()->GetTracingController())
->Initialize(nullptr);
+#endif // !V8_USE_PERFETTO
}
TEST(Issue763073) {
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index a8b1ecb7a9..2ab689e746 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -4,6 +4,7 @@
#include "src/api/api-inl.h"
#include "src/flags/flags.h"
+#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces.h"
#include "test/cctest/cctest.h"
#include "tools/debug_helper/debug-helper.h"
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index ed669db806..35b868a615 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -3601,34 +3601,30 @@ TEST(NoDebugBreakInAfterCompileEventListener) {
// Test that the debug break flag works with function.apply.
-TEST(DebugBreakFunctionApply) {
+TEST(RepeatDebugBreak) {
+ // Test that we can repeatedly set a break without JS execution continuing.
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Local<v8::Context> context = env.local();
// Create a function for testing breaking in apply.
- v8::Local<v8::Function> foo = CompileFunction(
- &env,
- "function baz(x) { }"
- "function bar(x) { baz(); }"
- "function foo(){ bar.apply(this, [1]); }",
- "foo");
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo() {}", "foo");
- // Register a debug event listener which steps and counts.
+ // Register a debug delegate which repeatedly sets a break and counts.
DebugEventBreakMax delegate;
v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
// Set the debug break flag before calling the code using function.apply.
v8::debug::SetBreakOnNextFunctionCall(env->GetIsolate());
- // Limit the number of debug breaks. This is a regression test for issue 493
- // where this test would enter an infinite loop.
+ // Trigger a break by calling into foo().
break_point_hit_count = 0;
- max_break_point_hit_count = 10000; // 10000 => infinite loop.
+ max_break_point_hit_count = 10000;
foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
// When keeping the debug break several break will happen.
- CHECK_GT(break_point_hit_count, 1);
+ CHECK_EQ(break_point_hit_count, max_break_point_hit_count);
v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
@@ -3697,7 +3693,7 @@ void DebugBreakLoop(const char* loop_header, const char** loop_bodies,
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- // Register a debug event listener which sets the break flag and counts.
+ // Register a debug delegate which repeatedly sets the break flag and counts.
DebugEventBreakMax delegate;
v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index cc4f5cc296..496fa66684 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -546,6 +546,9 @@ TEST(DisasmIa320) {
__ pinsrw(xmm5, edx, 5);
__ pinsrw(xmm5, Operand(edx, 4), 5);
+ __ movmskps(edx, xmm5);
+ __ pmovmskb(edx, xmm5);
+
#define EMIT_SSE2_INSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ instruction(xmm5, xmm1); \
__ instruction(xmm5, Operand(edx, 4));
@@ -782,6 +785,10 @@ TEST(DisasmIa320) {
__ vmovd(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vmovd(eax, xmm1);
__ vmovd(Operand(ebx, ecx, times_4, 10000), xmm1);
+
+ __ vmovmskps(edx, xmm5);
+ __ vpmovmskb(ebx, xmm1);
+
#define EMIT_SSE2_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ v##instruction(xmm7, xmm5, xmm1); \
__ v##instruction(xmm7, xmm5, Operand(edx, 4));
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index b9cf05bcc9..8e9eadca25 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -391,6 +391,8 @@ TEST(DisasmX64) {
// Move operation
__ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
__ cvttss2si(rdx, xmm1);
+ __ cvtqsi2ss(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ cvtqsi2ss(xmm1, rdx);
__ cvttps2dq(xmm0, xmm1);
__ cvttps2dq(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movaps(xmm0, xmm1);
@@ -403,6 +405,8 @@ TEST(DisasmX64) {
__ ucomiss(xmm0, xmm1);
__ ucomiss(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ movmskps(rdx, xmm9);
+
#define EMIT_SSE_INSTR(instruction, notUsed1, notUsed2) \
__ instruction(xmm1, xmm0); \
__ instruction(xmm1, Operand(rbx, rcx, times_4, 10000));
@@ -423,8 +427,15 @@ TEST(DisasmX64) {
__ cvttsd2si(rdx, xmm1);
__ cvttsd2siq(rdx, xmm1);
__ cvttsd2siq(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ cvtlsi2sd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ cvtlsi2sd(xmm1, rdx);
__ cvtqsi2sd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ cvtqsi2sd(xmm1, rdx);
+ __ cvtss2sd(xmm1, xmm9);
+ __ cvtss2sd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ cvtsd2si(rdx, xmm9);
+ __ cvtsd2siq(rdx, xmm9);
+
__ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
// 128 bit move instructions.
@@ -434,6 +445,12 @@ TEST(DisasmX64) {
__ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
__ ucomisd(xmm0, xmm1);
+ __ ucomisd(xmm8, Operand(rbx, rdx, times_4, 10000));
+
+ __ cmpltsd(xmm3, xmm11);
+
+ __ movmskpd(rdx, xmm9);
+ __ pmovmskb(rdx, xmm9);
__ pcmpeqd(xmm1, xmm0);
@@ -571,6 +588,9 @@ TEST(DisasmX64) {
__ blendvpd(xmm5, xmm1);
__ blendvpd(xmm5, Operand(rdx, 4));
+ __ roundss(xmm8, xmm3, kRoundDown);
+ __ roundsd(xmm8, xmm3, kRoundDown);
+
SSE4_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
SSE4_UNOP_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
SSE4_EXTRACT_INSTRUCTION_LIST(EMIT_SSE34_IMM_INSTR)
@@ -650,6 +670,7 @@ TEST(DisasmX64) {
__ vmovupd(xmm0, Operand(rbx, rcx, times_4, 10000));
__ vmovupd(Operand(rbx, rcx, times_4, 10000), xmm0);
__ vmovmskpd(r9, xmm4);
+ __ vpmovmskb(r10, xmm9);
__ vmovups(xmm5, xmm1);
__ vmovups(xmm5, Operand(rdx, 4));
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 840478a520..d7b672a345 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -31,12 +31,10 @@ namespace test_field_type_tracking {
// and observed transitions caused generalization of all fields).
const bool IS_PROTO_TRANS_ISSUE_FIXED = false;
-
// TODO(ishell): fix this once TransitionToAccessorProperty is able to always
// keep map in fast mode.
const bool IS_ACCESSOR_FIELD_SUPPORTED = false;
-
// Number of properties used in the tests.
const int kPropCount = 7;
@@ -606,6 +604,33 @@ Handle<Code> CreateDummyOptimizedCode(Isolate* isolate) {
.Build();
}
+static void CheckCodeObjectForDeopt(const CRFTData& from,
+ const CRFTData& expected,
+ Handle<Code> code_field_type,
+ Handle<Code> code_field_repr,
+ Handle<Code> code_field_const,
+ bool expected_deopt) {
+ if (!from.type->Equals(*expected.type)) {
+ CHECK_EQ(expected_deopt, code_field_type->marked_for_deoptimization());
+ } else {
+ CHECK(!code_field_type->marked_for_deoptimization());
+ }
+
+ if (!from.representation.Equals(expected.representation)) {
+ CHECK_EQ(expected_deopt, code_field_repr->marked_for_deoptimization());
+ } else {
+ CHECK(!code_field_repr->marked_for_deoptimization());
+ }
+
+ if (!code_field_const.is_null()) {
+ if (from.constness != expected.constness) {
+ CHECK_EQ(expected_deopt, code_field_const->marked_for_deoptimization());
+ } else {
+ CHECK(!code_field_const->marked_for_deoptimization());
+ }
+ }
+}
+
// This test ensures that field generalization at |property_index| is done
// correctly independently of the fact that the |map| is detached from
// transition tree or not.
@@ -668,13 +693,23 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
// Create dummy optimized code object to test correct dependencies
// on the field owner.
- Handle<Code> code = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_type = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_repr = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_const = CreateDummyOptimizedCode(isolate);
Handle<Map> field_owner(
map->FindFieldOwner(isolate, InternalIndex(property_index)), isolate);
- DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
- field_owner,
- DependentCode::kFieldOwnerGroup);
- CHECK(!code->marked_for_deoptimization());
+ DependentCode::InstallDependency(isolate,
+ MaybeObjectHandle::Weak(code_field_type),
+ field_owner, DependentCode::kFieldTypeGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_repr), field_owner,
+ DependentCode::kFieldRepresentationGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_const), field_owner,
+ DependentCode::kFieldConstGroup);
+ CHECK(!code_field_type->marked_for_deoptimization());
+ CHECK(!code_field_repr->marked_for_deoptimization());
+ CHECK(!code_field_const->marked_for_deoptimization());
// Create new maps by generalizing representation of propX field.
Handle<Map> new_map =
@@ -687,29 +722,28 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
CHECK(!new_map->is_deprecated());
CHECK(expectations.Check(*new_map));
+ bool should_deopt = false;
if (is_detached_map) {
CHECK(!map->is_stable());
CHECK(map->is_deprecated());
CHECK_NE(*map, *new_map);
- CHECK_EQ(expected_field_owner_dependency && !field_owner->is_deprecated(),
- code->marked_for_deoptimization());
-
+ should_deopt =
+ expected_field_owner_dependency && !field_owner->is_deprecated();
} else if (expected_deprecation) {
CHECK(!map->is_stable());
CHECK(map->is_deprecated());
CHECK(field_owner->is_deprecated());
- CHECK_NE(*map, *new_map);
- CHECK(!code->marked_for_deoptimization());
-
+ should_deopt = false;
} else {
CHECK(!field_owner->is_deprecated());
CHECK(map->is_stable()); // Map did not change, must be left stable.
CHECK_EQ(*map, *new_map);
-
- CHECK_EQ(expected_field_owner_dependency,
- code->marked_for_deoptimization());
+ should_deopt = expected_field_owner_dependency;
}
+ CheckCodeObjectForDeopt(from, expected, code_field_type, code_field_repr,
+ code_field_const, should_deopt);
+
{
// Check that all previous maps are not stable.
Map tmp = *new_map;
@@ -1002,7 +1036,6 @@ TEST(GeneralizeFieldWithAccessorProperties) {
}
}
-
////////////////////////////////////////////////////////////////////////////////
// A set of tests for attribute reconfiguration case.
//
@@ -1035,7 +1068,6 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
-
// Create another branch in transition tree (property at index |kSplitProp|
// has different attributes), initialize expectations.
const int kSplitProp = kPropCount / 2;
@@ -1059,13 +1091,23 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
// Create dummy optimized code object to test correct dependencies
// on the field owner.
- Handle<Code> code = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_type = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_repr = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_const = CreateDummyOptimizedCode(isolate);
Handle<Map> field_owner(
map->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
- DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
- field_owner,
- DependentCode::kFieldOwnerGroup);
- CHECK(!code->marked_for_deoptimization());
+ DependentCode::InstallDependency(isolate,
+ MaybeObjectHandle::Weak(code_field_type),
+ field_owner, DependentCode::kFieldTypeGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_repr), field_owner,
+ DependentCode::kFieldRepresentationGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_const), field_owner,
+ DependentCode::kFieldConstGroup);
+ CHECK(!code_field_type->marked_for_deoptimization());
+ CHECK(!code_field_repr->marked_for_deoptimization());
+ CHECK(!code_field_const->marked_for_deoptimization());
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1085,7 +1127,9 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
expected.type);
}
CHECK(map->is_deprecated());
- CHECK(!code->marked_for_deoptimization());
+ CHECK(!code_field_type->marked_for_deoptimization());
+ CHECK(!code_field_repr->marked_for_deoptimization());
+ CHECK(!code_field_const->marked_for_deoptimization());
CHECK_NE(*map, *new_map);
CHECK(!new_map->is_deprecated());
@@ -1125,7 +1169,6 @@ void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
-
// Create another branch in transition tree (property at index |kSplitProp|
// has different attributes), initialize expectations.
const int kSplitProp = kPropCount / 2;
@@ -1149,13 +1192,23 @@ void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
// Create dummy optimized code object to test correct dependencies
// on the field owner.
- Handle<Code> code = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_type = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_repr = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_const = CreateDummyOptimizedCode(isolate);
Handle<Map> field_owner(
map->FindFieldOwner(isolate, InternalIndex(kSplitProp)), isolate);
- DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
- field_owner,
- DependentCode::kFieldOwnerGroup);
- CHECK(!code->marked_for_deoptimization());
+ DependentCode::InstallDependency(isolate,
+ MaybeObjectHandle::Weak(code_field_type),
+ field_owner, DependentCode::kFieldTypeGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_repr), field_owner,
+ DependentCode::kFieldRepresentationGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_const), field_owner,
+ DependentCode::kFieldConstGroup);
+ CHECK(!code_field_type->marked_for_deoptimization());
+ CHECK(!code_field_repr->marked_for_deoptimization());
+ CHECK(!code_field_const->marked_for_deoptimization());
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1179,7 +1232,8 @@ void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
}
CHECK(!map->is_deprecated());
CHECK_EQ(*map, *new_map);
- CHECK_EQ(expected_field_owner_dependency, code->marked_for_deoptimization());
+ CheckCodeObjectForDeopt(from, expected, code_field_type, code_field_repr,
+ code_field_const, expected_field_owner_dependency);
CHECK(!new_map->is_deprecated());
CHECK(expectations.Check(*new_map));
@@ -1364,7 +1418,6 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeHeapObjectFieldToTagged) {
{PropertyConstness::kMutable, Representation::Tagged(), any_type});
}
-
// Checks that given |map| is deprecated and that it updates to given |new_map|
// which in turn should match expectations.
struct CheckDeprecated {
@@ -1383,7 +1436,6 @@ struct CheckDeprecated {
}
};
-
// Checks that given |map| is NOT deprecated, equals to given |new_map| and
// matches expectations.
struct CheckSameMap {
@@ -1403,7 +1455,6 @@ struct CheckSameMap {
}
};
-
// Checks that given |map| is NOT deprecated and matches expectations.
// |new_map| is unrelated to |map|.
struct CheckUnrelated {
@@ -1471,7 +1522,6 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
-
// Create branch to |map1|.
Handle<Map> map1 = map;
Expectations expectations1 = expectations;
@@ -1488,7 +1538,6 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
CHECK(map1->is_stable());
CHECK(expectations1.Check(*map1));
-
// Create another branch in transition tree (property at index |kSplitProp|
// has different attributes), initialize expectations.
Handle<Map> map2 = map;
@@ -1508,7 +1557,6 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
-
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
Handle<Map> new_map =
@@ -1555,7 +1603,6 @@ TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
-
TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -1600,7 +1647,6 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
TestReconfigureProperty_CustomPropertyAfterTargetMap(&config, &checker);
}
-
TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -1795,13 +1841,23 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
// Create dummy optimized code object to test correct dependencies
// on the field owner.
- Handle<Code> code = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_type = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_repr = CreateDummyOptimizedCode(isolate);
+ Handle<Code> code_field_const = CreateDummyOptimizedCode(isolate);
Handle<Map> field_owner(
map->FindFieldOwner(isolate, InternalIndex(kDiffProp)), isolate);
- DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
- field_owner,
- DependentCode::kFieldOwnerGroup);
- CHECK(!code->marked_for_deoptimization());
+ DependentCode::InstallDependency(isolate,
+ MaybeObjectHandle::Weak(code_field_type),
+ field_owner, DependentCode::kFieldTypeGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_repr), field_owner,
+ DependentCode::kFieldRepresentationGroup);
+ DependentCode::InstallDependency(
+ isolate, MaybeObjectHandle::Weak(code_field_const), field_owner,
+ DependentCode::kFieldConstGroup);
+ CHECK(!code_field_type->marked_for_deoptimization());
+ CHECK(!code_field_repr->marked_for_deoptimization());
+ CHECK(!code_field_const->marked_for_deoptimization());
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -1823,7 +1879,9 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
CHECK(!map->is_deprecated());
CHECK_EQ(*map, *new_map);
CHECK_EQ(IsGeneralizableTo(to.constness, from.constness),
- !code->marked_for_deoptimization());
+ !code_field_const->marked_for_deoptimization());
+ CheckCodeObjectForDeopt(from, expected, code_field_type, code_field_repr,
+ Handle<Code>(), false);
CHECK(!new_map->is_deprecated());
CHECK(expectations.Check(*new_map));
@@ -2288,7 +2346,6 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
}
}
-
TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 93c7048f81..9112dc7a57 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -203,7 +203,6 @@ TEST(FlagsJitlessImplications) {
// definition order in flag-definitions.h.
CHECK(!FLAG_opt);
CHECK(!FLAG_validate_asm);
- CHECK(FLAG_wasm_interpret_all);
CHECK(!FLAG_asm_wasm_lazy_compilation);
CHECK(!FLAG_wasm_lazy_compilation);
}
diff --git a/deps/v8/test/cctest/test-inspector.cc b/deps/v8/test/cctest/test-inspector.cc
index 6dd2aefb9e..e36ce19eca 100644
--- a/deps/v8/test/cctest/test-inspector.cc
+++ b/deps/v8/test/cctest/test-inspector.cc
@@ -4,11 +4,11 @@
#include <memory>
-#include "test/cctest/cctest.h"
-
#include "include/v8-inspector.h"
#include "include/v8.h"
#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/string-util.h"
+#include "test/cctest/cctest.h"
using v8_inspector::StringBuffer;
using v8_inspector::StringView;
@@ -63,3 +63,108 @@ TEST(WrapInsideWrapOnInterrupt) {
isolate->RequestInterrupt(&WrapOnInterrupt, session.get());
session->wrapObject(env.local(), v8::Null(isolate), object_group_view, false);
}
+
+TEST(BinaryFromBase64) {
+ auto checkBinary = [](const v8_inspector::protocol::Binary& binary,
+ const std::vector<uint8_t>& values) {
+ std::vector<uint8_t> binary_vector(binary.data(),
+ binary.data() + binary.size());
+ CHECK_EQ(binary_vector, values);
+ };
+
+ {
+ bool success;
+ auto binary = v8_inspector::protocol::Binary::fromBase64("", &success);
+ CHECK(success);
+ checkBinary(binary, {});
+ }
+ {
+ bool success;
+ auto binary = v8_inspector::protocol::Binary::fromBase64("YQ==", &success);
+ CHECK(success);
+ checkBinary(binary, {'a'});
+ }
+ {
+ bool success;
+ auto binary = v8_inspector::protocol::Binary::fromBase64("YWI=", &success);
+ CHECK(success);
+ checkBinary(binary, {'a', 'b'});
+ }
+ {
+ bool success;
+ auto binary = v8_inspector::protocol::Binary::fromBase64("YWJj", &success);
+ CHECK(success);
+ checkBinary(binary, {'a', 'b', 'c'});
+ }
+ {
+ bool success;
+ // Wrong input length:
+ auto binary = v8_inspector::protocol::Binary::fromBase64("Y", &success);
+ CHECK(!success);
+ }
+ {
+ bool success;
+ // Invalid space:
+ auto binary = v8_inspector::protocol::Binary::fromBase64("=AAA", &success);
+ CHECK(!success);
+ }
+ {
+ bool success;
+ // Invalid space in a non-final block of four:
+ auto binary =
+ v8_inspector::protocol::Binary::fromBase64("AAA=AAAA", &success);
+ CHECK(!success);
+ }
+ {
+ bool success;
+ // Invalid invalid space in second to last position:
+ auto binary = v8_inspector::protocol::Binary::fromBase64("AA=A", &success);
+ CHECK(!success);
+ }
+ {
+ bool success;
+ // Invalid character:
+ auto binary = v8_inspector::protocol::Binary::fromBase64(" ", &success);
+ CHECK(!success);
+ }
+}
+
+TEST(BinaryToBase64) {
+ uint8_t input[] = {'a', 'b', 'c'};
+ {
+ auto binary = v8_inspector::protocol::Binary::fromSpan(input, 0);
+ v8_inspector::protocol::String base64 = binary.toBase64();
+ CHECK_EQ(base64.utf8(), "");
+ }
+ {
+ auto binary = v8_inspector::protocol::Binary::fromSpan(input, 1);
+ v8_inspector::protocol::String base64 = binary.toBase64();
+ CHECK_EQ(base64.utf8(), "YQ==");
+ }
+ {
+ auto binary = v8_inspector::protocol::Binary::fromSpan(input, 2);
+ v8_inspector::protocol::String base64 = binary.toBase64();
+ CHECK_EQ(base64.utf8(), "YWI=");
+ }
+ {
+ auto binary = v8_inspector::protocol::Binary::fromSpan(input, 3);
+ v8_inspector::protocol::String base64 = binary.toBase64();
+ CHECK_EQ(base64.utf8(), "YWJj");
+ }
+}
+
+TEST(BinaryBase64RoundTrip) {
+ std::array<uint8_t, 256> values;
+ for (uint16_t b = 0x0; b <= 0xFF; ++b) values[b] = b;
+ auto binary =
+ v8_inspector::protocol::Binary::fromSpan(values.data(), values.size());
+ v8_inspector::protocol::String base64 = binary.toBase64();
+ bool success = false;
+ auto roundtrip_binary =
+ v8_inspector::protocol::Binary::fromBase64(base64, &success);
+ CHECK(success);
+ CHECK_EQ(values.size(), roundtrip_binary.size());
+ for (size_t i = 0; i < values.size(); ++i) {
+ CHECK_EQ(values[i], roundtrip_binary.data()[i]);
+ }
+}
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 6759c37128..d6c09a1fd4 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -6,6 +6,7 @@
#include "src/execution/microtask-queue.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
+#include "src/heap/heap-inl.h"
#include "src/objects/js-objects.h"
#include "src/objects/js-weak-refs-inl.h"
#include "test/cctest/cctest.h"
@@ -97,6 +98,33 @@ void NullifyWeakCell(Handle<WeakCell> weak_cell, Isolate* isolate) {
#endif // VERIFY_HEAP
}
+Object PopClearedCellHoldings(
+ Handle<JSFinalizationRegistry> finalization_registry, Isolate* isolate) {
+ // PopClearedCell is implemented in Torque. Reproduce that implementation here
+ // for testing.
+ Handle<WeakCell> weak_cell =
+ handle(WeakCell::cast(finalization_registry->cleared_cells()), isolate);
+ DCHECK(weak_cell->prev().IsUndefined(isolate));
+ finalization_registry->set_cleared_cells(weak_cell->next());
+ weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
+
+ if (finalization_registry->cleared_cells().IsWeakCell()) {
+ WeakCell cleared_cells_head =
+ WeakCell::cast(finalization_registry->cleared_cells());
+ DCHECK_EQ(cleared_cells_head.prev(), *weak_cell);
+ cleared_cells_head.set_prev(ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate));
+ }
+
+ if (!weak_cell->unregister_token().IsUndefined(isolate)) {
+ JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap(
+ isolate, finalization_registry->ptr(), weak_cell->ptr());
+ }
+
+ return weak_cell->holdings();
+}
+
// Usage: VerifyWeakCellChain(isolate, list_head, n, cell1, cell2, ..., celln);
// verifies that list_head == cell1 and cell1, cell2, ..., celln. form a list.
void VerifyWeakCellChain(Isolate* isolate, Object list_head, int n_args, ...) {
@@ -361,15 +389,13 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
NullifyWeakCell(weak_cell3, isolate);
CHECK(finalization_registry->NeedsCleanup());
- Object cleared1 = JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate);
+ Object cleared1 = PopClearedCellHoldings(finalization_registry, isolate);
CHECK_EQ(cleared1, *holdings3);
CHECK(weak_cell3->prev().IsUndefined(isolate));
CHECK(weak_cell3->next().IsUndefined(isolate));
CHECK(finalization_registry->NeedsCleanup());
- Object cleared2 = JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate);
+ Object cleared2 = PopClearedCellHoldings(finalization_registry, isolate);
CHECK_EQ(cleared2, *holdings2);
CHECK(weak_cell2->prev().IsUndefined(isolate));
CHECK(weak_cell2->next().IsUndefined(isolate));
@@ -379,8 +405,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
NullifyWeakCell(weak_cell1, isolate);
CHECK(finalization_registry->NeedsCleanup());
- Object cleared3 = JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate);
+ Object cleared3 = PopClearedCellHoldings(finalization_registry, isolate);
CHECK_EQ(cleared3, *holdings1);
CHECK(weak_cell1->prev().IsUndefined(isolate));
CHECK(weak_cell1->next().IsUndefined(isolate));
@@ -424,8 +449,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
*weak_cell1);
}
- Object cleared1 = JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate);
+ Object cleared1 = PopClearedCellHoldings(finalization_registry, isolate);
CHECK_EQ(cleared1, *holdings2);
{
@@ -434,8 +458,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
VerifyWeakCellKeyChain(isolate, key_map, *token1, 1, *weak_cell1);
}
- Object cleared2 = JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate);
+ Object cleared2 = PopClearedCellHoldings(finalization_registry, isolate);
CHECK_EQ(cleared2, *holdings1);
{
@@ -621,8 +644,7 @@ TEST(TestWeakCellUnregisterPopped) {
NullifyWeakCell(weak_cell1, isolate);
CHECK(finalization_registry->NeedsCleanup());
- Object cleared1 = JSFinalizationRegistry::PopClearedCellHoldings(
- finalization_registry, isolate);
+ Object cleared1 = PopClearedCellHoldings(finalization_registry, isolate);
CHECK_EQ(cleared1, *holdings1);
VerifyWeakCellChain(isolate, finalization_registry->active_cells(), 0);
@@ -853,5 +875,98 @@ TEST(TestRemoveUnregisterToken) {
}
}
+TEST(JSWeakRefScavengedInWorklist) {
+ FLAG_harmony_weak_refs = true;
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ {
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakRef> weak_ref;
+
+ // Make a WeakRef that points to a target, both of which become unreachable.
+ {
+ HandleScope inner_scope(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
+ CHECK(Heap::InYoungGeneration(*js_object));
+ CHECK(Heap::InYoungGeneration(*inner_weak_ref));
+
+ weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
+ }
+
+ // Do marking. This puts the WeakRef above into the js_weak_refs worklist
+ // since its target isn't marked.
+ CHECK(
+ heap->mark_compact_collector()->weak_objects()->js_weak_refs.IsEmpty());
+ heap::SimulateIncrementalMarking(heap, true);
+ CHECK(!heap->mark_compact_collector()
+ ->weak_objects()
+ ->js_weak_refs.IsEmpty());
+ }
+
+ // Now collect both weak_ref and its target. The worklist should be empty.
+ CcTest::CollectGarbage(NEW_SPACE);
+ CHECK(heap->mark_compact_collector()->weak_objects()->js_weak_refs.IsEmpty());
+
+ // The mark-compactor shouldn't see zapped WeakRefs in the worklist.
+ CcTest::CollectAllGarbage();
+}
+
+TEST(JSWeakRefTenuredInWorklist) {
+ FLAG_harmony_weak_refs = true;
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ HandleScope outer_scope(isolate);
+ Handle<JSWeakRef> weak_ref;
+
+ // Make a WeakRef that points to a target. The target becomes unreachable.
+ {
+ HandleScope inner_scope(isolate);
+ Handle<JSObject> js_object =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
+ CHECK(Heap::InYoungGeneration(*js_object));
+ CHECK(Heap::InYoungGeneration(*inner_weak_ref));
+
+ weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
+ }
+ JSWeakRef old_weak_ref_location = *weak_ref;
+
+ // Do marking. This puts the WeakRef above into the js_weak_refs worklist
+ // since its target isn't marked.
+ CHECK(heap->mark_compact_collector()->weak_objects()->js_weak_refs.IsEmpty());
+ heap::SimulateIncrementalMarking(heap, true);
+ CHECK(
+ !heap->mark_compact_collector()->weak_objects()->js_weak_refs.IsEmpty());
+
+ // Now collect weak_ref's target. We still have a Handle to weak_ref, so it is
+ // moved and remains on the worklist.
+ CcTest::CollectGarbage(NEW_SPACE);
+ JSWeakRef new_weak_ref_location = *weak_ref;
+ CHECK_NE(old_weak_ref_location, new_weak_ref_location);
+ CHECK(
+ !heap->mark_compact_collector()->weak_objects()->js_weak_refs.IsEmpty());
+
+ // The mark-compactor should see the moved WeakRef in the worklist.
+ CcTest::CollectAllGarbage();
+ CHECK(heap->mark_compact_collector()->weak_objects()->js_weak_refs.IsEmpty());
+ CHECK(weak_ref->target().IsUndefined(isolate));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 254fed7db2..2daa69103a 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -721,8 +721,10 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(isolate1, &source)
.ToLocalChecked();
- CHECK_EQ(code_event_handler.CountLines("Script", "f1"), 1);
- CHECK_EQ(code_event_handler.CountLines("Script", "f2"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Script", "f1"),
+ i::FLAG_stress_background_compile ? 2 : 1);
+ CHECK_EQ(code_event_handler.CountLines("Script", "f2"),
+ i::FLAG_stress_background_compile ? 2 : 1);
cache = v8::ScriptCompiler::CreateCodeCache(script);
}
isolate1->Dispose();
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index fd75f263ca..a7a846b3a6 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -261,9 +261,6 @@ TEST(ArrowOrAssignmentOp) {
bool TokenIsBinaryOp(Token::Value token) {
switch (token) {
case Token::COMMA:
- case Token::NULLISH:
- case Token::OR:
- case Token::AND:
#define T(name, string, precedence) case Token::name:
BINARY_OP_TOKEN_LIST(T, EXPAND_BINOP_TOKEN)
#undef T
@@ -515,6 +512,8 @@ TEST(ScanKeywords) {
#undef KEYWORD
{nullptr, i::Token::IDENTIFIER}};
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(CcTest::i_isolate());
KeywordToken key_token;
char buffer[32];
for (int i = 0; (key_token = keywords[i]).keyword != nullptr; i++) {
@@ -523,7 +522,7 @@ TEST(ScanKeywords) {
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
auto stream = i::ScannerStream::ForTesting(keyword, length);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -531,7 +530,7 @@ TEST(ScanKeywords) {
// Removing characters will make keyword matching fail.
{
auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -542,7 +541,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -552,7 +551,7 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
auto stream = i::ScannerStream::ForTesting(buffer, length);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -566,6 +565,8 @@ TEST(ScanHTMLEndComments) {
v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope handles(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(i_isolate);
// Regression test. See:
// http://code.google.com/p/chromium/issues/detail?id=53548
@@ -619,7 +620,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -628,7 +629,7 @@ TEST(ScanHTMLEndComments) {
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
i_isolate->counters()->runtime_call_stats(),
- i_isolate->logger());
+ i_isolate->logger(), flags);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
CHECK(!pending_error_handler.has_pending_error());
@@ -637,7 +638,7 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -646,7 +647,7 @@ TEST(ScanHTMLEndComments) {
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
i_isolate->counters()->runtime_call_stats(),
- i_isolate->logger());
+ i_isolate->logger(), flags);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -656,11 +657,15 @@ TEST(ScanHTMLEndComments) {
}
TEST(ScanHtmlComments) {
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(CcTest::i_isolate());
+
const char* src = "a <!-- b --> c";
// Disallow HTML comments.
{
+ flags.set_is_module(true);
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(stream.get(), true);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::ILLEGAL, scanner.Next());
@@ -668,8 +673,9 @@ TEST(ScanHtmlComments) {
// Skip HTML comments:
{
+ flags.set_is_module(false);
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -693,6 +699,9 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
TEST(StandAlonePreParser) {
v8::V8::Initialize();
i::Isolate* i_isolate = CcTest::i_isolate();
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(i_isolate);
+ flags.set_allow_natives_syntax(true);
i_isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
128 * 1024);
@@ -708,7 +717,7 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = i_isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
@@ -718,8 +727,7 @@ TEST(StandAlonePreParser) {
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
i_isolate->counters()->runtime_call_stats(),
- i_isolate->logger());
- preparser.set_allow_natives(true);
+ i_isolate->logger(), flags);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
CHECK(!pending_error_handler.has_pending_error());
@@ -729,8 +737,10 @@ TEST(StandAlonePreParser) {
TEST(StandAlonePreParserNoNatives) {
v8::V8::Initialize();
-
i::Isolate* isolate = CcTest::i_isolate();
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(isolate);
+
isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
128 * 1024);
@@ -740,7 +750,7 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
// Preparser defaults to disallowing natives syntax.
@@ -751,7 +761,7 @@ TEST(StandAlonePreParserNoNatives) {
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
isolate->counters()->runtime_call_stats(),
- isolate->logger());
+ isolate->logger(), flags);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
CHECK(pending_error_handler.has_pending_error() ||
@@ -763,6 +773,8 @@ TEST(StandAlonePreParserNoNatives) {
TEST(RegressChromium62639) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(isolate);
isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
128 * 1024);
@@ -775,7 +787,7 @@ TEST(RegressChromium62639) {
// failed in debug mode, and sometimes crashed in release mode.
auto stream = i::ScannerStream::ForTesting(program);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
i::Zone zone(isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone, isolate->ast_string_constants(),
@@ -784,7 +796,7 @@ TEST(RegressChromium62639) {
i::PreParser preparser(&zone, &scanner, isolate->stack_guard()->real_climit(),
&ast_value_factory, &pending_error_handler,
isolate->counters()->runtime_call_stats(),
- isolate->logger());
+ isolate->logger(), flags);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -796,6 +808,8 @@ TEST(RegressChromium62639) {
TEST(PreParseOverflow) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(isolate);
isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
128 * 1024);
@@ -808,7 +822,7 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
- i::Scanner scanner(stream.get(), false);
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
i::Zone zone(isolate->allocator(), ZONE_NAME);
@@ -817,7 +831,7 @@ TEST(PreParseOverflow) {
i::PendingCompilationErrorHandler pending_error_handler;
i::PreParser preparser(
&zone, &scanner, stack_limit, &ast_value_factory, &pending_error_handler,
- isolate->counters()->runtime_call_stats(), isolate->logger());
+ isolate->counters()->runtime_call_stats(), isolate->logger(), flags);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
}
@@ -826,7 +840,10 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::Scanner scanner(stream, false);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(CcTest::i_isolate());
+
+ i::Scanner scanner(stream, flags);
scanner.Initialize();
int i = 0;
@@ -894,7 +911,9 @@ TEST(StreamScanner) {
void TestScanRegExp(const char* re_source, const char* expected) {
auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
- i::Scanner scanner(stream.get(), false);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForTest(CcTest::i_isolate());
+ i::Scanner scanner(stream.get(), flags);
scanner.Initialize();
i::Token::Value start = scanner.peek();
@@ -1056,13 +1075,13 @@ TEST(ScopeUsesArgumentsSuperThis) {
factory->NewStringFromUtf8(i::CStrVector(program.begin()))
.ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
// The information we're checking is only produced when eager parsing.
- info.set_allow_lazy_parsing(false);
+ flags.set_allow_lazy_parsing(false);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
- CHECK(i::Rewriter::Rewrite(&info));
- info.ast_value_factory()->Internalize(isolate);
- CHECK(i::DeclarationScope::Analyze(&info));
i::DeclarationScope::AllocateScopeInfos(&info, isolate);
CHECK_NOT_NULL(info.literal());
@@ -1121,9 +1140,12 @@ static void CheckParsesToNumber(const char* source) {
i::Handle<i::Script> script = factory->NewScript(source_code);
- i::ParseInfo info(isolate, *script);
- info.set_allow_lazy_parsing(false);
- info.set_toplevel(true);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_allow_lazy_parsing(false);
+ flags.set_is_toplevel(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
@@ -1431,8 +1453,12 @@ TEST(ScopePositions) {
.ToHandleChecked();
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_language_mode(source_data[i].language_mode);
+
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_outer_language_mode(source_data[i].language_mode);
+ i::ParseInfo info(isolate, flags, &compile_state);
i::parsing::ParseProgram(&info, script, isolate);
CHECK_NOT_NULL(info.literal());
@@ -1477,14 +1503,27 @@ TEST(DiscardFunctionBody) {
i::Handle<i::String> source_code =
factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source_code);
- i::ParseInfo info(isolate, *script);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ i::ParseInfo info(isolate, flags, &compile_state);
i::parsing::ParseProgram(&info, script, isolate);
function = info.literal();
CHECK_NOT_NULL(function);
- CHECK_EQ(1, function->body()->length());
- i::FunctionLiteral* inner =
- function->body()->first()->AsExpressionStatement()->expression()->
- AsCall()->expression()->AsFunctionLiteral();
+ // The rewriter will rewrite this to
+ // .result = (function f(){...})();
+ // return .result;
+ // so extract the function from there.
+ CHECK_EQ(2, function->body()->length());
+ i::FunctionLiteral* inner = function->body()
+ ->first()
+ ->AsExpressionStatement()
+ ->expression()
+ ->AsAssignment()
+ ->value()
+ ->AsCall()
+ ->expression()
+ ->AsFunctionLiteral();
i::Scope* inner_scope = inner->scope();
i::FunctionLiteral* fun = nullptr;
if (!inner_scope->declarations()->is_empty()) {
@@ -1534,8 +1573,6 @@ enum ParserFlag {
kAllowHarmonyPrivateMethods,
kAllowHarmonyDynamicImport,
kAllowHarmonyImportMeta,
- kAllowHarmonyNullish,
- kAllowHarmonyOptionalChaining,
};
enum ParserSyncTestResult {
@@ -1549,22 +1586,17 @@ void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
i::FLAG_harmony_private_methods = flags.contains(kAllowHarmonyPrivateMethods);
i::FLAG_harmony_dynamic_import = flags.contains(kAllowHarmonyDynamicImport);
i::FLAG_harmony_import_meta = flags.contains(kAllowHarmonyImportMeta);
- i::FLAG_harmony_optional_chaining =
- flags.contains(kAllowHarmonyOptionalChaining);
- i::FLAG_harmony_nullish = flags.contains(kAllowHarmonyNullish);
}
-void SetParserFlags(i::PreParser* parser, base::EnumSet<ParserFlag> flags) {
- parser->set_allow_natives(flags.contains(kAllowNatives));
- parser->set_allow_harmony_private_methods(
+void SetParserFlags(i::UnoptimizedCompileFlags* compile_flags,
+ base::EnumSet<ParserFlag> flags) {
+ compile_flags->set_allow_natives_syntax(flags.contains(kAllowNatives));
+ compile_flags->set_allow_harmony_private_methods(
flags.contains(kAllowHarmonyPrivateMethods));
- parser->set_allow_harmony_dynamic_import(
+ compile_flags->set_allow_harmony_dynamic_import(
flags.contains(kAllowHarmonyDynamicImport));
- parser->set_allow_harmony_import_meta(
+ compile_flags->set_allow_harmony_import_meta(
flags.contains(kAllowHarmonyImportMeta));
- parser->set_allow_harmony_optional_chaining(
- flags.contains(kAllowHarmonyOptionalChaining));
- parser->set_allow_harmony_nullish(flags.contains(kAllowHarmonyNullish));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
@@ -1574,6 +1606,12 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
bool ignore_error_msg = false) {
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags compile_flags =
+ i::UnoptimizedCompileFlags::ForToplevelCompile(
+ isolate, true, LanguageMode::kSloppy, REPLMode::kNo);
+ SetParserFlags(&compile_flags, flags);
+ compile_flags.set_is_module(is_module);
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
@@ -1582,15 +1620,14 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
if (test_preparser) {
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(isolate, source));
- i::Scanner scanner(stream.get(), is_module);
+ i::Scanner scanner(stream.get(), compile_flags);
i::Zone zone(isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone, isolate->ast_string_constants(),
HashSeed(isolate));
i::PreParser preparser(&zone, &scanner, stack_limit, &ast_value_factory,
&pending_error_handler,
isolate->counters()->runtime_call_stats(),
- isolate->logger(), -1, is_module);
- SetParserFlags(&preparser, flags);
+ isolate->logger(), compile_flags);
scanner.Initialize();
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
@@ -1600,10 +1637,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::FunctionLiteral* function;
{
SetGlobalFlags(flags);
- i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_allow_lazy_parsing(flags.contains(kAllowLazy));
- if (is_module) info.set_module();
+ i::Handle<i::Script> script =
+ factory->NewScriptWithId(source, compile_flags.script_id());
+ i::ParseInfo info(isolate, compile_flags, &compile_state);
i::parsing::ParseProgram(&info, script, isolate);
function = info.literal();
}
@@ -1995,10 +2031,7 @@ TEST(OptionalChaining) {
{"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
const char* statement_data[] = {"a?.b", "a?.['b']", "a?.()", nullptr};
- static const ParserFlag flags[] = {kAllowHarmonyOptionalChaining};
- RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0, flags,
- 1, nullptr, 0, false, true, true);
- RunParserSyncTest(context_data, statement_data, kError);
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(OptionalChainingTaggedError) {
@@ -2010,9 +2043,6 @@ TEST(OptionalChainingTaggedError) {
{"", ""}, {"'use strict';", ""}, {nullptr, nullptr}};
const char* statement_data[] = {"a?.b``", "a?.['b']``", "a?.()``", nullptr};
- static const ParserFlag flags[] = {kAllowHarmonyOptionalChaining};
- RunParserSyncTest(context_data, statement_data, kError, nullptr, 9, flags, 1,
- nullptr, 0, false, true, true);
RunParserSyncTest(context_data, statement_data, kError);
}
@@ -2028,10 +2058,7 @@ TEST(Nullish) {
"a ?? b ?? c ? d : e",
nullptr};
- static const ParserFlag flags[] = {kAllowHarmonyNullish};
- RunParserSyncTest(context_data, statement_data, kSuccess, nullptr, 0, flags,
- 1, nullptr, 0, false, true, true);
- RunParserSyncTest(context_data, statement_data, kError);
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
TEST(NullishNotContained) {
@@ -2046,9 +2073,7 @@ TEST(NullishNotContained) {
"a ?? b && c",
nullptr};
- static const ParserFlag flags[] = {kAllowHarmonyNullish};
- RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags, 1,
- nullptr, 0, false, true, true);
+ RunParserSyncTest(context_data, statement_data, kError);
}
TEST(ErrorsEvalAndArguments) {
@@ -3483,6 +3508,7 @@ TEST(InnerAssignment) {
i::SNPrintF(program, "%s%s%s%s%s", prefix, outer, midfix, inner,
suffix);
+ UnoptimizedCompileState compile_state(isolate);
std::unique_ptr<i::ParseInfo> info;
if (lazy) {
printf("%s\n", program.begin());
@@ -3491,8 +3517,9 @@ TEST(InnerAssignment) {
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Handle<i::SharedFunctionInfo> shared =
i::handle(f->shared(), isolate);
- info =
- std::unique_ptr<i::ParseInfo>(new i::ParseInfo(isolate, *shared));
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared);
+ info = std::make_unique<i::ParseInfo>(isolate, flags, &compile_state);
CHECK(i::parsing::ParseFunction(info.get(), shared, isolate));
} else {
i::Handle<i::String> source =
@@ -3500,12 +3527,12 @@ TEST(InnerAssignment) {
source->PrintOn(stdout);
printf("\n");
i::Handle<i::Script> script = factory->NewScript(source);
- info =
- std::unique_ptr<i::ParseInfo>(new i::ParseInfo(isolate, *script));
- info->set_allow_lazy_parsing(false);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_allow_lazy_parsing(false);
+ info = std::make_unique<i::ParseInfo>(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(info.get(), script, isolate));
}
- CHECK(i::Compiler::Analyze(info.get()));
CHECK_NOT_NULL(info->literal());
i::Scope* scope = info->literal()->scope();
@@ -3609,10 +3636,12 @@ TEST(MaybeAssignedParameters) {
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Handle<i::SharedFunctionInfo> shared = i::handle(f->shared(), isolate);
- info = std::unique_ptr<i::ParseInfo>(new i::ParseInfo(isolate, *shared));
- info->set_allow_lazy_parsing(allow_lazy);
+ i::UnoptimizedCompileState state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared);
+ flags.set_allow_lazy_parsing(allow_lazy);
+ info = std::make_unique<i::ParseInfo>(isolate, flags, &state);
CHECK(i::parsing::ParseFunction(info.get(), shared, isolate));
- CHECK(i::Compiler::Analyze(info.get()));
CHECK_NOT_NULL(info->literal());
i::Scope* scope = info->literal()->scope();
@@ -3645,13 +3674,15 @@ static void TestMaybeAssigned(Input input, const char* variable, bool module,
printf("\n");
i::Handle<i::Script> script = factory->NewScript(string);
- std::unique_ptr<i::ParseInfo> info;
- info = std::unique_ptr<i::ParseInfo>(new i::ParseInfo(isolate, *script));
- info->set_module(module);
- info->set_allow_lazy_parsing(allow_lazy_parsing);
+ i::UnoptimizedCompileState state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(module);
+ flags.set_allow_lazy_parsing(allow_lazy_parsing);
+ std::unique_ptr<i::ParseInfo> info =
+ std::make_unique<i::ParseInfo>(isolate, flags, &state);
CHECK(i::parsing::ParseProgram(info.get(), script, isolate));
- CHECK(i::Compiler::Analyze(info.get()));
CHECK_NOT_NULL(info->literal());
i::Scope* scope = info->literal()->scope();
@@ -7401,8 +7432,11 @@ TEST(BasicImportExportParsing) {
// Show that parsing as a module works
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_module();
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
if (!i::parsing::ParseProgram(&info, script, isolate)) {
i::Handle<i::JSObject> exception_handle(
i::JSObject::cast(isolate->pending_exception()), isolate);
@@ -7423,8 +7457,11 @@ TEST(BasicImportExportParsing) {
// And that parsing a script does not.
{
+ i::UnoptimizedCompileState compile_state(isolate);
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(!i::parsing::ParseProgram(&info, script, isolate));
isolate->clear_pending_exception();
}
@@ -7461,8 +7498,11 @@ TEST(NamespaceExportParsing) {
i::Handle<i::String> source =
factory->NewStringFromAsciiChecked(kSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_module();
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
}
}
@@ -7556,8 +7596,11 @@ TEST(ImportExportParsingErrors) {
factory->NewStringFromAsciiChecked(kErrorSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_module();
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(!i::parsing::ParseProgram(&info, script, isolate));
isolate->clear_pending_exception();
}
@@ -7592,8 +7635,11 @@ TEST(ModuleTopLevelFunctionDecl) {
factory->NewStringFromAsciiChecked(kErrorSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_module();
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(!i::parsing::ParseProgram(&info, script, isolate));
isolate->clear_pending_exception();
}
@@ -7789,10 +7835,12 @@ TEST(ModuleParsingInternals) {
"export {foob};";
i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
- info.set_module();
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
- CHECK(i::Compiler::Analyze(&info));
i::FunctionLiteral* func = info.literal();
i::ModuleScope* module_scope = func->scope()->AsModuleScope();
i::Scope* outer_scope = module_scope->outer_scope();
@@ -8032,7 +8080,10 @@ void TestLanguageMode(const char* source,
i::Handle<i::Script> script =
factory->NewScript(factory->NewStringFromAsciiChecked(source));
- i::ParseInfo info(isolate, *script);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ i::ParseInfo info(isolate, flags, &compile_state);
i::parsing::ParseProgram(&info, script, isolate);
CHECK_NOT_NULL(info.literal());
CHECK_EQ(expected_language_mode, info.literal()->language_mode());
@@ -10813,10 +10864,12 @@ TEST(NoPessimisticContextAllocation) {
printf("\n");
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
- CHECK(i::Compiler::Analyze(&info));
CHECK_NOT_NULL(info.literal());
i::Scope* scope = info.literal()->scope()->inner_scope();
@@ -11373,12 +11426,12 @@ TEST(LexicalLoopVariable) {
i::Handle<i::String> source =
factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(isolate, *script);
-
- info.set_allow_lazy_parsing(false);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_allow_lazy_parsing(false);
+ i::ParseInfo info(isolate, flags, &compile_state);
CHECK(i::parsing::ParseProgram(&info, script, isolate));
- CHECK(i::Rewriter::Rewrite(&info));
- CHECK(i::DeclarationScope::Analyze(&info));
i::DeclarationScope::AllocateScopeInfos(&info, isolate);
CHECK_NOT_NULL(info.literal());
diff --git a/deps/v8/test/cctest/test-persistent-handles.cc b/deps/v8/test/cctest/test-persistent-handles.cc
new file mode 100644
index 0000000000..0bb2990d17
--- /dev/null
+++ b/deps/v8/test/cctest/test-persistent-handles.cc
@@ -0,0 +1,114 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "src/api/api.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/local-handles-inl.h"
+#include "src/handles/persistent-handles.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/safepoint.h"
+#include "src/objects/heap-number.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+static constexpr int kNumHandles = kHandleBlockSize * 2 + kHandleBlockSize / 2;
+
+class PersistentHandlesThread final : public v8::base::Thread {
+ public:
+ PersistentHandlesThread(Heap* heap, std::vector<Handle<HeapNumber>> handles,
+ std::unique_ptr<PersistentHandles> ph, Address object,
+ base::Semaphore* sema_started,
+ base::Semaphore* sema_gc_finished)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ handles_(std::move(handles)),
+ ph_(std::move(ph)),
+ object_(object),
+ sema_started_(sema_started),
+ sema_gc_finished_(sema_gc_finished) {}
+
+ void Run() override {
+ LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHandleScope scope(&local_heap);
+
+ for (int i = 0; i < kNumHandles; i++) {
+ handles_.push_back(
+ Handle<HeapNumber>::cast(local_heap.NewPersistentHandle(object_)));
+ }
+
+ sema_started_->Signal();
+
+ {
+ ParkedScope scope(&local_heap);
+ sema_gc_finished_->Wait();
+ }
+
+ for (Handle<HeapNumber> handle : handles_) {
+ CHECK_EQ(42.0, handle->value());
+ }
+
+ CHECK_EQ(handles_.size(), kNumHandles * 2);
+
+ CHECK(!ph_);
+ ph_ = local_heap.DetachPersistentHandles();
+ }
+
+ Heap* heap_;
+ std::vector<Handle<HeapNumber>> handles_;
+ std::unique_ptr<PersistentHandles> ph_;
+ Address object_;
+ base::Semaphore* sema_started_;
+ base::Semaphore* sema_gc_finished_;
+};
+
+TEST(CreatePersistentHandles) {
+ CcTest::InitializeVM();
+ FLAG_local_heaps = true;
+ Isolate* isolate = CcTest::i_isolate();
+
+ Address object = kNullAddress;
+ std::unique_ptr<PersistentHandles> ph = isolate->NewPersistentHandles();
+ std::vector<Handle<HeapNumber>> handles;
+
+ HandleScope handle_scope(isolate);
+ Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(42.0);
+
+ object = number->ptr();
+
+ for (int i = 0; i < kNumHandles; i++) {
+ handles.push_back(Handle<HeapNumber>::cast(ph->NewHandle(object)));
+ }
+
+ base::Semaphore sema_started(0);
+ base::Semaphore sema_gc_finished(0);
+
+ // pass persistent handles to background thread
+ std::unique_ptr<PersistentHandlesThread> thread(new PersistentHandlesThread(
+ isolate->heap(), std::move(handles), std::move(ph), object, &sema_started,
+ &sema_gc_finished));
+ CHECK(thread->Start());
+
+ sema_started.Wait();
+
+ CcTest::CollectAllGarbage();
+ sema_gc_finished.Signal();
+
+ thread->Join();
+
+ // get persistent handles back to main thread
+ ph = std::move(thread->ph_);
+ ph->NewHandle(number->ptr());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index ce916c9c7a..da3a32b794 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -50,6 +50,7 @@
#include "src/utils/ostreams.h"
#include "src/zone/zone-list-inl.h"
#include "test/cctest/cctest.h"
+#include "test/common/wasm/flag-utils.h"
namespace v8 {
namespace internal {
@@ -1278,14 +1279,16 @@ TEST(MacroAssembler) {
Handle<String> source = factory->NewStringFromStaticChars("^f(o)o");
Handle<ByteArray> array = Handle<ByteArray>::cast(m.GetCode(source));
int captures[5];
+ std::memset(captures, 0, sizeof(captures));
const uc16 str1[] = {'f', 'o', 'o', 'b', 'a', 'r'};
Handle<String> f1_16 = factory->NewStringFromTwoByte(
Vector<const uc16>(str1, 6)).ToHandleChecked();
- CHECK(IrregexpInterpreter::MatchInternal(isolate, *array, *f1_16, captures, 5,
- 0, RegExp::CallOrigin::kFromRuntime,
- JSRegExp::kNoBacktrackLimit));
+ CHECK_EQ(IrregexpInterpreter::SUCCESS,
+ IrregexpInterpreter::MatchInternal(
+ isolate, *array, *f1_16, captures, 5, 5, 0,
+ RegExp::CallOrigin::kFromRuntime, JSRegExp::kNoBacktrackLimit));
CHECK_EQ(0, captures[0]);
CHECK_EQ(3, captures[1]);
CHECK_EQ(1, captures[2]);
@@ -1296,10 +1299,17 @@ TEST(MacroAssembler) {
Handle<String> f2_16 = factory->NewStringFromTwoByte(
Vector<const uc16>(str2, 6)).ToHandleChecked();
- CHECK(!IrregexpInterpreter::MatchInternal(
- isolate, *array, *f2_16, captures, 5, 0, RegExp::CallOrigin::kFromRuntime,
- JSRegExp::kNoBacktrackLimit));
- CHECK_EQ(42, captures[0]);
+ std::memset(captures, 0, sizeof(captures));
+ CHECK_EQ(IrregexpInterpreter::FAILURE,
+ IrregexpInterpreter::MatchInternal(
+ isolate, *array, *f2_16, captures, 5, 5, 0,
+ RegExp::CallOrigin::kFromRuntime, JSRegExp::kNoBacktrackLimit));
+ // Failed matches don't alter output registers.
+ CHECK_EQ(0, captures[0]);
+ CHECK_EQ(0, captures[1]);
+ CHECK_EQ(0, captures[2]);
+ CHECK_EQ(0, captures[3]);
+ CHECK_EQ(0, captures[4]);
}
#ifndef V8_INTL_SUPPORT
@@ -2332,6 +2342,31 @@ TEST(PeepholeLabelFixupsComplex) {
}
}
+TEST(UnicodePropertyEscapeCodeSize) {
+ i::FlagScope<bool> f(&v8::internal::FLAG_regexp_tier_up, false);
+
+ LocalContext env;
+ v8::HandleScope scope(CcTest::isolate());
+ i::Handle<i::JSRegExp> re = Utils::OpenHandle(
+ *CompileRun("const r = /\\p{L}\\p{L}\\p{L}/u; r.exec('\\u200b'); r;")
+ .As<v8::RegExp>());
+
+ static constexpr int kMaxSize = 200 * KB;
+ static constexpr bool kIsNotLatin1 = false;
+ Object maybe_code = re->Code(kIsNotLatin1);
+ Object maybe_bytecode = re->Bytecode(kIsNotLatin1);
+ if (maybe_bytecode.IsByteArray()) {
+ // On x64, excessive inlining produced >250KB.
+ CHECK_LT(ByteArray::cast(maybe_bytecode).Size(), kMaxSize);
+ } else if (maybe_code.IsCode()) {
+ // On x64, excessive inlining produced >360KB.
+ CHECK_LT(Code::cast(maybe_code).Size(), kMaxSize);
+ CHECK_EQ(Code::cast(maybe_code).kind(), Code::REGEXP);
+ } else {
+ UNREACHABLE();
+ }
+}
+
#undef CHECK_PARSE_ERROR
#undef CHECK_SIMPLE
#undef CHECK_MIN_MAX
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index 26002621b6..f5003721d6 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk-inl.h"
#include "src/objects/cell.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/script.h"
@@ -48,7 +49,6 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) {
V(shared_wasm_memories) \
V(materialized_objects) \
V(public_symbol_table) \
- V(retained_maps) \
V(retaining_path_targets) \
V(serialized_global_proxy_sizes) \
V(serialized_objects) \
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index d92d34faef..5b7ec576cf 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -49,8 +49,8 @@
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
-#include "src/snapshot/partial-deserializer.h"
-#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/context-deserializer.h"
+#include "src/snapshot/context-serializer.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/read-only-serializer.h"
#include "src/snapshot/snapshot-compression.h"
@@ -150,7 +150,6 @@ namespace {
v8::StartupData CreateSnapshotDataBlob(const char* embedded_source) {
v8::StartupData data = CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling::kClear, embedded_source);
- ReadOnlyHeap::ClearSharedHeapForTest();
return data;
}
@@ -172,11 +171,14 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
internal_isolate->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kTesting);
- ReadOnlySerializer read_only_serializer(internal_isolate);
+ DisallowHeapAllocation no_gc;
+ ReadOnlySerializer read_only_serializer(internal_isolate,
+ Snapshot::kDefaultSerializerFlags);
read_only_serializer.SerializeReadOnlyRoots();
- StartupSerializer ser(internal_isolate, &read_only_serializer);
- ser.SerializeStrongReferences();
+ StartupSerializer ser(internal_isolate, Snapshot::kDefaultSerializerFlags,
+ &read_only_serializer);
+ ser.SerializeStrongReferences(no_gc);
ser.SerializeWeakReferencesAndDeferred();
read_only_serializer.FinalizeSerialization();
@@ -186,12 +188,11 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
WritePayload(read_only_snapshot.RawData())};
}
-
-Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
- Vector<const uint8_t> body,
- Vector<const uint8_t> tail, int repeats) {
- int source_length = head.length() + body.length() * repeats + tail.length();
- uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
+Vector<const char> ConstructSource(Vector<const char> head,
+ Vector<const char> body,
+ Vector<const char> tail, int repeats) {
+ size_t source_length = head.size() + body.size() * repeats + tail.size();
+ char* source = NewArray<char>(source_length);
CopyChars(source, head.begin(), head.length());
for (int i = 0; i < repeats; i++) {
CopyChars(source + head.length() + i * body.length(), body.begin(),
@@ -199,8 +200,7 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
}
CopyChars(source + head.length() + repeats * body.length(), tail.begin(),
tail.length());
- return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
- source_length);
+ return VectorOf(source, source_length);
}
static v8::Isolate* Deserialize(const StartupBlobs& blobs) {
@@ -224,7 +224,6 @@ void TestStartupSerializerOnceImpl() {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::HandleScope handle_scope(isolate);
@@ -282,7 +281,6 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -303,7 +301,6 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -332,7 +329,6 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -353,9 +349,9 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
FreeCurrentEmbeddedBlob();
}
-static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
- Vector<const byte>* read_only_blob_out,
- Vector<const byte>* partial_blob_out) {
+static void SerializeContext(Vector<const byte>* startup_blob_out,
+ Vector<const byte>* read_only_blob_out,
+ Vector<const byte>* context_blob_out) {
v8::Isolate* v8_isolate = TestSerializer::NewIsolateInitialized();
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
Heap* heap = isolate->heap();
@@ -386,18 +382,22 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
env.Reset();
+ DisallowHeapAllocation no_gc;
SnapshotByteSink read_only_sink;
- ReadOnlySerializer read_only_serializer(isolate);
+ ReadOnlySerializer read_only_serializer(isolate,
+ Snapshot::kDefaultSerializerFlags);
read_only_serializer.SerializeReadOnlyRoots();
SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &read_only_serializer);
- startup_serializer.SerializeStrongReferences();
+ StartupSerializer startup_serializer(
+ isolate, Snapshot::kDefaultSerializerFlags, &read_only_serializer);
+ startup_serializer.SerializeStrongReferences(no_gc);
- SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- v8::SerializeInternalFieldsCallback());
- partial_serializer.Serialize(&raw_context, false);
+ SnapshotByteSink context_sink;
+ ContextSerializer context_serializer(
+ isolate, Snapshot::kDefaultSerializerFlags, &startup_serializer,
+ v8::SerializeInternalFieldsCallback());
+ context_serializer.Serialize(&raw_context, no_gc);
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -405,40 +405,39 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
- SnapshotData partial_snapshot(&partial_serializer);
+ SnapshotData context_snapshot(&context_serializer);
- *partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *context_blob_out = WritePayload(context_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(SnapshotCompression) {
DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> read_only_blob;
- Vector<const byte> partial_blob;
- PartiallySerializeContext(&startup_blob, &read_only_blob, &partial_blob);
- SnapshotData original_snapshot_data(partial_blob);
+ Vector<const byte> context_blob;
+ SerializeContext(&startup_blob, &read_only_blob, &context_blob);
+ SnapshotData original_snapshot_data(context_blob);
SnapshotData compressed =
i::SnapshotCompression::Compress(&original_snapshot_data);
SnapshotData decompressed =
i::SnapshotCompression::Decompress(compressed.RawData());
- CHECK_EQ(partial_blob, decompressed.RawData());
+ CHECK_EQ(context_blob, decompressed.RawData());
startup_blob.Dispose();
read_only_blob.Dispose();
- partial_blob.Dispose();
+ context_blob.Dispose();
}
-UNINITIALIZED_TEST(PartialSerializerContext) {
+UNINITIALIZED_TEST(ContextSerializerContext) {
DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> read_only_blob;
- Vector<const byte> partial_blob;
- PartiallySerializeContext(&startup_blob, &read_only_blob, &partial_blob);
+ Vector<const byte> context_blob;
+ SerializeContext(&startup_blob, &read_only_blob, &context_blob);
StartupBlobs blobs = {startup_blob, read_only_blob};
v8::Isolate* v8_isolate = TestSerializer::NewIsolateFromBlob(blobs);
@@ -453,8 +452,8 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
isolate->factory()->NewUninitializedJSGlobalProxy(
JSGlobalProxy::SizeWithEmbedderFields(0));
{
- SnapshotData snapshot_data(partial_blob);
- root = PartialDeserializer::DeserializeContext(
+ SnapshotData snapshot_data(context_blob);
+ root = ContextDeserializer::DeserializeContext(
isolate, &snapshot_data, false, global_proxy,
v8::DeserializeInternalFieldsCallback())
.ToHandleChecked();
@@ -464,25 +463,24 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
Handle<Object> root2;
{
- SnapshotData snapshot_data(partial_blob);
- root2 = PartialDeserializer::DeserializeContext(
+ SnapshotData snapshot_data(context_blob);
+ root2 = ContextDeserializer::DeserializeContext(
isolate, &snapshot_data, false, global_proxy,
v8::DeserializeInternalFieldsCallback())
.ToHandleChecked();
CHECK(root2->IsContext());
CHECK(!root.is_identical_to(root2));
}
- partial_blob.Dispose();
+ context_blob.Dispose();
}
v8_isolate->Dispose();
blobs.Dispose();
FreeCurrentEmbeddedBlob();
}
-static void PartiallySerializeCustomContext(
- Vector<const byte>* startup_blob_out,
- Vector<const byte>* read_only_blob_out,
- Vector<const byte>* partial_blob_out) {
+static void SerializeCustomContext(Vector<const byte>* startup_blob_out,
+ Vector<const byte>* read_only_blob_out,
+ Vector<const byte>* context_blob_out) {
v8::Isolate* v8_isolate = TestSerializer::NewIsolateInitialized();
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
{
@@ -511,12 +509,12 @@ static void PartiallySerializeCustomContext(
"var p = 0;"
"(async ()=>{ p = await 42; })();");
- Vector<const uint8_t> source = ConstructSource(
+ Vector<const char> source = ConstructSource(
StaticCharVector("function g() { return [,"), StaticCharVector("1,"),
StaticCharVector("];} a = g(); b = g(); b.push(1);"), 100000);
- v8::MaybeLocal<v8::String> source_str = v8::String::NewFromOneByte(
- v8_isolate, source.begin(), v8::NewStringType::kNormal,
- source.length());
+ v8::MaybeLocal<v8::String> source_str =
+ v8::String::NewFromUtf8(v8_isolate, source.begin(),
+ v8::NewStringType::kNormal, source.length());
CompileRun(source_str.ToLocalChecked());
source.Dispose();
}
@@ -534,18 +532,22 @@ static void PartiallySerializeCustomContext(
env.Reset();
+ DisallowHeapAllocation no_gc;
SnapshotByteSink read_only_sink;
- ReadOnlySerializer read_only_serializer(isolate);
+ ReadOnlySerializer read_only_serializer(isolate,
+ Snapshot::kDefaultSerializerFlags);
read_only_serializer.SerializeReadOnlyRoots();
SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &read_only_serializer);
- startup_serializer.SerializeStrongReferences();
+ StartupSerializer startup_serializer(
+ isolate, Snapshot::kDefaultSerializerFlags, &read_only_serializer);
+ startup_serializer.SerializeStrongReferences(no_gc);
- SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- v8::SerializeInternalFieldsCallback());
- partial_serializer.Serialize(&raw_context, false);
+ SnapshotByteSink context_sink;
+ ContextSerializer context_serializer(
+ isolate, Snapshot::kDefaultSerializerFlags, &startup_serializer,
+ v8::SerializeInternalFieldsCallback());
+ context_serializer.Serialize(&raw_context, no_gc);
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -553,23 +555,21 @@ static void PartiallySerializeCustomContext(
SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
- SnapshotData partial_snapshot(&partial_serializer);
+ SnapshotData context_snapshot(&context_serializer);
- *partial_blob_out = WritePayload(partial_snapshot.RawData());
+ *context_blob_out = WritePayload(context_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
- ReadOnlyHeap::ClearSharedHeapForTest();
}
-UNINITIALIZED_TEST(PartialSerializerCustomContext) {
+UNINITIALIZED_TEST(ContextSerializerCustomContext) {
DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> read_only_blob;
- Vector<const byte> partial_blob;
- PartiallySerializeCustomContext(&startup_blob, &read_only_blob,
- &partial_blob);
+ Vector<const byte> context_blob;
+ SerializeCustomContext(&startup_blob, &read_only_blob, &context_blob);
StartupBlobs blobs = {startup_blob, read_only_blob};
v8::Isolate* v8_isolate = TestSerializer::NewIsolateFromBlob(blobs);
@@ -584,8 +584,8 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
isolate->factory()->NewUninitializedJSGlobalProxy(
JSGlobalProxy::SizeWithEmbedderFields(0));
{
- SnapshotData snapshot_data(partial_blob);
- root = PartialDeserializer::DeserializeContext(
+ SnapshotData snapshot_data(context_blob);
+ root = ContextDeserializer::DeserializeContext(
isolate, &snapshot_data, false, global_proxy,
v8::DeserializeInternalFieldsCallback())
.ToHandleChecked();
@@ -657,7 +657,7 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
.FromJust();
CHECK_EQ(100002, b);
}
- partial_blob.Dispose();
+ context_blob.Dispose();
}
v8_isolate->Dispose();
blobs.Dispose();
@@ -782,7 +782,6 @@ void TestCustomSnapshotDataBlobWithIrregexpCode(
DisableEmbeddedBlobRefcounting();
v8::StartupData data1 =
CreateSnapshotDataBlobInternal(function_code_handling, source);
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params1;
params1.snapshot_blob = &data1;
@@ -931,7 +930,6 @@ void TypedArrayTestHelper(
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator =
@@ -1100,7 +1098,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobDetachedArrayBuffer) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1171,7 +1168,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1225,7 +1221,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobTypedArrayNoEmbedderFieldCallback) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1348,7 +1343,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithLocker) {
const char* source1 = "function f() { return 42; }";
DisableEmbeddedBlobRefcounting();
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::StartupData data1 = CreateSnapshotDataBlob(source1);
v8::Isolate::CreateParams params1;
@@ -1426,7 +1420,6 @@ UNINITIALIZED_TEST(SnapshotDataBlobWithWarmup) {
DisableEmbeddedBlobRefcounting();
v8::StartupData cold = CreateSnapshotDataBlob(nullptr);
v8::StartupData warm = WarmUpSnapshotDataBlobInternal(cold, warmup);
- ReadOnlyHeap::ClearSharedHeapForTest();
delete[] cold.data;
v8::Isolate::CreateParams params;
@@ -1463,7 +1456,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithWarmup) {
DisableEmbeddedBlobRefcounting();
v8::StartupData cold = CreateSnapshotDataBlob(source);
v8::StartupData warm = WarmUpSnapshotDataBlobInternal(cold, warmup);
- ReadOnlyHeap::ClearSharedHeapForTest();
delete[] cold.data;
v8::Isolate::CreateParams params;
@@ -1496,14 +1488,13 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
// Flood the startup snapshot with shared function infos. If they are
// serialized before the immortal immovable root, the root will no longer end
// up on the first page.
- Vector<const uint8_t> source =
+ Vector<const char> source =
ConstructSource(StaticCharVector("var a = [];"),
StaticCharVector("a.push(function() {return 7});"),
StaticCharVector("\0"), 10000);
DisableEmbeddedBlobRefcounting();
- v8::StartupData data =
- CreateSnapshotDataBlob(reinterpret_cast<const char*>(source.begin()));
+ v8::StartupData data = CreateSnapshotDataBlob(source.begin());
v8::Isolate::CreateParams params;
params.snapshot_blob = &data;
@@ -1804,13 +1795,13 @@ TEST(CodeSerializerLargeCodeObject) {
// code. Don't even bother generating optimized code to avoid timeouts.
FLAG_always_opt = false;
- Vector<const uint8_t> source = ConstructSource(
+ Vector<const char> source = ConstructSource(
StaticCharVector("var j=1; if (j == 0) {"),
StaticCharVector(
"for (let i of Object.prototype) for (let k = 0; k < 0; ++k);"),
StaticCharVector("} j=7; j"), 2000);
Handle<String> source_str =
- isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
+ isolate->factory()->NewStringFromUtf8(source).ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
@@ -1860,12 +1851,12 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
v8::HandleScope scope(CcTest::isolate());
- Vector<const uint8_t> source = ConstructSource(
+ Vector<const char> source = ConstructSource(
StaticCharVector("var j=1; if (j == 0) {"),
StaticCharVector("for (var i = 0; i < Object.prototype; i++);"),
StaticCharVector("} j=7; var s = 'happy_hippo'; j"), 20000);
Handle<String> source_str =
- isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
+ isolate->factory()->NewStringFromUtf8(source).ToHandleChecked();
// Create a string on an evacuation candidate in old space.
Handle<String> moving_object;
@@ -1922,6 +1913,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
delete cache;
source.Dispose();
}
+
TEST(CodeSerializerLargeStrings) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1931,15 +1923,15 @@ TEST(CodeSerializerLargeStrings) {
v8::HandleScope scope(CcTest::isolate());
- Vector<const uint8_t> source_s = ConstructSource(
+ Vector<const char> source_s = ConstructSource(
StaticCharVector("var s = \""), StaticCharVector("abcdef"),
StaticCharVector("\";"), 1000000);
- Vector<const uint8_t> source_t = ConstructSource(
+ Vector<const char> source_t = ConstructSource(
StaticCharVector("var t = \""), StaticCharVector("uvwxyz"),
StaticCharVector("\"; s + t"), 999999);
Handle<String> source_str =
- f->NewConsString(f->NewStringFromOneByte(source_s).ToHandleChecked(),
- f->NewStringFromOneByte(source_t).ToHandleChecked())
+ f->NewConsString(f->NewStringFromUtf8(source_s).ToHandleChecked(),
+ f->NewStringFromUtf8(source_t).ToHandleChecked())
.ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
@@ -1992,23 +1984,23 @@ TEST(CodeSerializerThreeBigStrings) {
const int32_t length_of_b = kMaxRegularHeapObjectSize / 2;
const int32_t length_of_c = kMaxRegularHeapObjectSize / 2;
- Vector<const uint8_t> source_a =
+ Vector<const char> source_a =
ConstructSource(StaticCharVector("var a = \""), StaticCharVector("a"),
StaticCharVector("\";"), length_of_a);
Handle<String> source_a_str =
- f->NewStringFromOneByte(source_a).ToHandleChecked();
+ f->NewStringFromUtf8(source_a).ToHandleChecked();
- Vector<const uint8_t> source_b =
+ Vector<const char> source_b =
ConstructSource(StaticCharVector("var b = \""), StaticCharVector("b"),
StaticCharVector("\";"), length_of_b);
Handle<String> source_b_str =
- f->NewStringFromOneByte(source_b).ToHandleChecked();
+ f->NewStringFromUtf8(source_b).ToHandleChecked();
- Vector<const uint8_t> source_c =
+ Vector<const char> source_c =
ConstructSource(StaticCharVector("var c = \""), StaticCharVector("c"),
StaticCharVector("\";"), length_of_c);
Handle<String> source_c_str =
- f->NewStringFromOneByte(source_c).ToHandleChecked();
+ f->NewStringFromUtf8(source_c).ToHandleChecked();
Handle<String> source_str =
f->NewConsString(
@@ -2178,10 +2170,10 @@ TEST(CodeSerializerLargeExternalString) {
v8::HandleScope scope(CcTest::isolate());
// Create a huge external internalized string to use as variable name.
- Vector<const uint8_t> string =
+ Vector<const char> string =
ConstructSource(StaticCharVector(""), StaticCharVector("abcdef"),
StaticCharVector(""), 999999);
- Handle<String> name = f->NewStringFromOneByte(string).ToHandleChecked();
+ Handle<String> name = f->NewStringFromUtf8(string).ToHandleChecked();
SerializerOneByteResource one_byte_resource(
reinterpret_cast<const char*>(string.begin()), string.length());
name = f->InternalizeString(name);
@@ -2665,7 +2657,6 @@ UNINITIALIZED_TEST(SnapshotCreatorMultipleContexts) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2804,7 +2795,6 @@ UNINITIALIZED_TEST(SnapshotCreatorExternalReferences) {
// Deserialize with the original external reference.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2830,7 +2820,6 @@ UNINITIALIZED_TEST(SnapshotCreatorExternalReferences) {
// Deserialize with some other external reference.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2879,7 +2868,6 @@ UNINITIALIZED_TEST(SnapshotCreatorShortExternalReferences) {
// Deserialize with an incomplete list of external references.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2940,7 +2928,6 @@ UNINITIALIZED_TEST(SnapshotCreatorNoExternalReferencesDefault) {
// Deserialize with an incomplete list of external references.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2990,7 +2977,6 @@ UNINITIALIZED_TEST(SnapshotCreatorPreparseDataAndNoOuterScope) {
// Deserialize with an incomplete list of external references.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3030,7 +3016,6 @@ UNINITIALIZED_TEST(SnapshotCreatorArrayJoinWithKeep) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
v8::StartupData blob = CreateCustomSnapshotArrayJoinWithKeep();
- ReadOnlyHeap::ClearSharedHeapForTest();
// Deserialize with an incomplete list of external references.
{
@@ -3076,7 +3061,6 @@ UNINITIALIZED_TEST(SnapshotCreatorDuplicateFunctions) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
v8::StartupData blob = CreateCustomSnapshotWithDuplicateFunctions();
- ReadOnlyHeap::ClearSharedHeapForTest();
// Deserialize with an incomplete list of external references.
{
@@ -3105,7 +3089,6 @@ TEST(SnapshotCreatorNoExternalReferencesCustomFail1) {
// Deserialize with an incomplete list of external references.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3131,7 +3114,6 @@ TEST(SnapshotCreatorNoExternalReferencesCustomFail2) {
// Deserialize with an incomplete list of external references.
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3250,7 +3232,6 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
}
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3415,7 +3396,6 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
}
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3496,7 +3476,6 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
isolate->Dispose();
}
{
- ReadOnlyHeap::ClearSharedHeapForTest();
SnapshotCreator creator(nullptr, &blob);
v8::Isolate* isolate = creator.GetIsolate();
{
@@ -3523,7 +3502,6 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3668,7 +3646,6 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
}
{
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3778,7 +3755,7 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
FreeCurrentEmbeddedBlob();
}
-UNINITIALIZED_TEST(ReinitializeHashSeedJSCollectionRehashable) {
+UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
DisableAlwaysOpt();
i::FLAG_rehash_snapshot = true;
i::FLAG_hash_seed = 42;
@@ -3796,29 +3773,23 @@ UNINITIALIZED_TEST(ReinitializeHashSeedJSCollectionRehashable) {
CompileRun(
"var m = new Map();"
"m.set('a', 1);"
- "m.set('b', 2);"
- "var s = new Set();"
- "s.add(1);"
- "s.add(globalThis);");
+ "m.set('b', 2);");
ExpectInt32("m.get('b')", 2);
- ExpectTrue("s.has(1)");
- ExpectTrue("s.has(globalThis)");
creator.SetDefaultContext(context);
}
blob =
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
- CHECK(blob.CanBeRehashed());
+ CHECK(!blob.CanBeRehashed());
}
- ReadOnlyHeap::ClearSharedHeapForTest();
i::FLAG_hash_seed = 1337;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
create_params.snapshot_blob = &blob;
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
- // Check that rehashing has been performed.
- CHECK_EQ(static_cast<uint64_t>(1337),
+ // Check that no rehashing has been performed.
+ CHECK_EQ(static_cast<uint64_t>(42),
HashSeed(reinterpret_cast<i::Isolate*>(isolate)));
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
@@ -3826,8 +3797,6 @@ UNINITIALIZED_TEST(ReinitializeHashSeedJSCollectionRehashable) {
CHECK(!context.IsEmpty());
v8::Context::Scope context_scope(context);
ExpectInt32("m.get('b')", 2);
- ExpectTrue("s.has(1)");
- ExpectTrue("s.has(globalThis)");
}
isolate->Dispose();
delete[] blob.data;
@@ -3879,7 +3848,6 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
CHECK(blob.CanBeRehashed());
}
- ReadOnlyHeap::ClearSharedHeapForTest();
i::FLAG_hash_seed = 1337;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3948,7 +3916,6 @@ UNINITIALIZED_TEST(WeakArraySerializationInSnapshot) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
- ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 9442dab6f5..0fd9cc4f66 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -19,6 +19,7 @@
#include "src/strings/char-predicates.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -113,8 +114,7 @@ TEST(TestBuiltinSpecialization) {
CodeAssemblerTester asm_tester(isolate, 0);
TestTorqueAssembler m(asm_tester.state());
{
- TNode<Object> temp = m.SmiConstant(0);
- m.TestBuiltinSpecialization(m.UncheckedCast<Context>(temp));
+ m.TestBuiltinSpecialization();
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -170,8 +170,7 @@ TEST(TestFunctionPointerToGeneric) {
CodeAssemblerTester asm_tester(isolate, 0);
TestTorqueAssembler m(asm_tester.state());
{
- TNode<Object> temp = m.SmiConstant(0);
- m.TestFunctionPointerToGeneric(m.UncheckedCast<Context>(temp));
+ m.TestFunctionPointerToGeneric();
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -694,6 +693,43 @@ TEST(TestBitFieldStore) {
}
}
+TEST(TestBitFieldInit) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ const int kNumParams = 4;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ // Untag all of the parameters to get plain integer values.
+ TNode<BoolT> a =
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(0))));
+ TNode<Uint16T> b =
+ m.UncheckedCast<Uint16T>(m.Unsigned(m.SmiToInt32(m.Parameter(1))));
+ TNode<Uint32T> c =
+ m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter(2))));
+ TNode<BoolT> d =
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(3))));
+
+ // Call the Torque-defined macro, which verifies that reading each bitfield
+ // out of val yields the correct result.
+ m.TestBitFieldInit(a, b, c, d);
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ // Test every possible bit combination for this 8-bit value.
+ for (int a = 0; a <= 1; ++a) {
+ for (int b = 0; b <= 7; ++b) {
+ for (int c = 0; c <= 7; ++c) {
+ for (int d = 0; d <= 1; ++d) {
+ ft.Call(ft.Val(a), ft.Val(b), ft.Val(c), ft.Val(d));
+ }
+ }
+ }
+ }
+}
+
TEST(TestBitFieldUintptrOps) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
new file mode 100644
index 0000000000..a2249444b1
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -0,0 +1,311 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/struct-types.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-module-runner.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_gc {
+
+WASM_EXEC_TEST(BasicStruct) {
+ // TODO(7748): Implement support in other tiers.
+ if (execution_tier == ExecutionTier::kLiftoff) return;
+ if (execution_tier == ExecutionTier::kInterpreter) return;
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(gc);
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ StructType::Builder type_builder(&zone, 2);
+ type_builder.AddField(kWasmI32);
+ type_builder.AddField(kWasmI32);
+ int32_t type_index = builder->AddStructType(type_builder.Build());
+ ValueType kRefTypes[] = {ValueType(ValueType::kRef, type_index)};
+ ValueType kOptRefType = ValueType(ValueType::kOptRef, type_index);
+ FunctionSig sig_q_v(1, 0, kRefTypes);
+
+ // Test struct.new and struct.get.
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ f->builder()->AddExport(CStrVector("f"), f);
+ byte f_code[] = {WASM_STRUCT_GET(type_index, 0,
+ WASM_STRUCT_NEW(type_index, WASM_I32V(42),
+ WASM_I32V(64))),
+ kExprEnd};
+ f->EmitCode(f_code, sizeof(f_code));
+
+ // Test struct.new and struct.get.
+ WasmFunctionBuilder* g = builder->AddFunction(sigs.i_v());
+ g->builder()->AddExport(CStrVector("g"), g);
+ byte g_code[] = {WASM_STRUCT_GET(type_index, 1,
+ WASM_STRUCT_NEW(type_index, WASM_I32V(42),
+ WASM_I32V(64))),
+ kExprEnd};
+ g->EmitCode(g_code, sizeof(g_code));
+
+ // Test struct.new, returning struct references to JS.
+ WasmFunctionBuilder* h = builder->AddFunction(&sig_q_v);
+ h->builder()->AddExport(CStrVector("h"), h);
+ byte h_code[] = {WASM_STRUCT_NEW(type_index, WASM_I32V(42), WASM_I32V(64)),
+ kExprEnd};
+ h->EmitCode(h_code, sizeof(h_code));
+
+ // Test struct.set, struct refs types in locals.
+ WasmFunctionBuilder* j = builder->AddFunction(sigs.i_v());
+ uint32_t j_local_index = j->AddLocal(kOptRefType);
+ uint32_t j_field_index = 0;
+ j->builder()->AddExport(CStrVector("j"), j);
+ byte j_code[] = {
+ WASM_SET_LOCAL(j_local_index,
+ WASM_STRUCT_NEW(type_index, WASM_I32V(42), WASM_I32V(64))),
+ WASM_STRUCT_SET(type_index, j_field_index, WASM_GET_LOCAL(j_local_index),
+ WASM_I32V(-99)),
+ WASM_STRUCT_GET(type_index, j_field_index, WASM_GET_LOCAL(j_local_index)),
+ kExprEnd};
+ j->EmitCode(j_code, sizeof(j_code));
+
+ // Test struct.set, ref.as_non_null,
+ // struct refs types in globals and if-results.
+ uint32_t k_global_index = builder->AddGlobal(kOptRefType, true);
+ WasmFunctionBuilder* k = builder->AddFunction(sigs.i_v());
+ uint32_t k_field_index = 0;
+ k->builder()->AddExport(CStrVector("k"), k);
+ byte k_code[] = {
+ WASM_SET_GLOBAL(k_global_index, WASM_STRUCT_NEW(type_index, WASM_I32V(55),
+ WASM_I32V(66))),
+ WASM_STRUCT_GET(type_index, k_field_index,
+ WASM_REF_AS_NON_NULL(WASM_IF_ELSE_R(
+ kOptRefType, WASM_I32V(1),
+ WASM_GET_GLOBAL(k_global_index), WASM_REF_NULL))),
+ kExprEnd};
+ k->EmitCode(k_code, sizeof(k_code));
+
+ // Test br_on_null 1.
+ WasmFunctionBuilder* l = builder->AddFunction(sigs.i_v());
+ uint32_t l_local_index = l->AddLocal(kOptRefType);
+ l->builder()->AddExport(CStrVector("l"), l);
+ byte l_code[] = {
+ WASM_BLOCK_I(WASM_I32V(42),
+ // Branch will be taken.
+ // 42 left on stack outside the block (not 52).
+ WASM_BR_ON_NULL(0, WASM_GET_LOCAL(l_local_index)),
+ WASM_I32V(52), WASM_BR(0)),
+ kExprEnd};
+ l->EmitCode(l_code, sizeof(l_code));
+
+ // Test br_on_null 2.
+ WasmFunctionBuilder* m = builder->AddFunction(sigs.i_v());
+ uint32_t m_field_index = 0;
+ m->builder()->AddExport(CStrVector("m"), m);
+ byte m_code[] = {
+ WASM_BLOCK_I(
+ WASM_I32V(42),
+ WASM_STRUCT_GET(
+ type_index, m_field_index,
+ // Branch will not be taken.
+ // 52 left on stack outside the block (not 42).
+ WASM_BR_ON_NULL(0, WASM_STRUCT_NEW(type_index, WASM_I32V(52),
+ WASM_I32V(62)))),
+ WASM_BR(0)),
+ kExprEnd};
+ m->EmitCode(m_code, sizeof(m_code));
+
+ // Test ref.eq
+ WasmFunctionBuilder* n = builder->AddFunction(sigs.i_v());
+ uint32_t n_local_index = n->AddLocal(kOptRefType);
+ n->builder()->AddExport(CStrVector("n"), n);
+ byte n_code[] = {
+ WASM_SET_LOCAL(n_local_index,
+ WASM_STRUCT_NEW(type_index, WASM_I32V(55), WASM_I32V(66))),
+ WASM_I32_ADD(
+ WASM_I32_SHL(
+ WASM_REF_EQ( // true
+ WASM_GET_LOCAL(n_local_index), WASM_GET_LOCAL(n_local_index)),
+ WASM_I32V(0)),
+ WASM_I32_ADD(
+ WASM_I32_SHL(WASM_REF_EQ( // false
+ WASM_GET_LOCAL(n_local_index),
+ WASM_STRUCT_NEW(type_index, WASM_I32V(55),
+ WASM_I32V(66))),
+ WASM_I32V(1)),
+ WASM_I32_ADD(
+ WASM_I32_SHL( // false
+ WASM_REF_EQ(WASM_GET_LOCAL(n_local_index), WASM_REF_NULL),
+ WASM_I32V(2)),
+ WASM_I32_SHL(WASM_REF_EQ( // true
+ WASM_REF_NULL, WASM_REF_NULL),
+ WASM_I32V(3))))),
+ kExprEnd};
+ n->EmitCode(n_code, sizeof(n_code));
+ // Result: 0b1001
+
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(&buffer);
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "Test");
+ Handle<WasmInstanceObject> instance =
+ testing::CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
+ .ToHandleChecked();
+
+ CHECK_EQ(42, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "f", 0, nullptr));
+ CHECK_EQ(64, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "g", 0, nullptr));
+
+ // TODO(7748): This uses the JavaScript interface to retrieve the plain
+ // WasmStruct. Once the JS interaction story is settled, this may well
+ // need to be changed.
+ Handle<WasmExportedFunction> h_export =
+ testing::GetExportedFunction(isolate, instance, "h").ToHandleChecked();
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ Handle<Object> ref_result =
+ Execution::Call(isolate, h_export, undefined, 0, nullptr)
+ .ToHandleChecked();
+ CHECK(ref_result->IsWasmStruct());
+
+ CHECK_EQ(-99, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "j", 0, nullptr));
+
+ CHECK_EQ(55, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "k", 0, nullptr));
+
+ CHECK_EQ(42, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "l", 0, nullptr));
+
+ CHECK_EQ(52, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "m", 0, nullptr));
+
+ CHECK_EQ(0b1001, testing::CallWasmFunctionForTesting(
+ isolate, instance, &thrower, "n", 0, nullptr));
+}
+
+WASM_EXEC_TEST(BasicArray) {
+ // TODO(7748): Implement support in other tiers.
+ if (execution_tier == ExecutionTier::kLiftoff) return;
+ if (execution_tier == ExecutionTier::kInterpreter) return;
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(gc);
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ ArrayType type(wasm::kWasmI32);
+ int32_t type_index = builder->AddArrayType(&type);
+ ValueType kRefTypes[] = {ValueType(ValueType::kRef, type_index)};
+ FunctionSig sig_q_v(1, 0, kRefTypes);
+ ValueType kOptRefType = ValueType(ValueType::kOptRef, type_index);
+
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ uint32_t local_index = f->AddLocal(kOptRefType);
+ f->builder()->AddExport(CStrVector("f"), f);
+ // f: a = [12, 12, 12]; a[1] = 42; return a[arg0]
+ byte f_code[] = {
+ WASM_SET_LOCAL(local_index,
+ WASM_ARRAY_NEW(type_index, WASM_I32V(12), WASM_I32V(3))),
+ WASM_ARRAY_SET(type_index, WASM_GET_LOCAL(local_index), WASM_I32V(1),
+ WASM_I32V(42)),
+ WASM_ARRAY_GET(type_index, WASM_GET_LOCAL(local_index),
+ WASM_GET_LOCAL(0)),
+ kExprEnd};
+ f->EmitCode(f_code, sizeof(f_code));
+
+ // Reads and returns an array's length.
+ WasmFunctionBuilder* g = builder->AddFunction(sigs.i_v());
+ f->builder()->AddExport(CStrVector("g"), g);
+ byte g_code[] = {
+ WASM_ARRAY_LEN(type_index,
+ WASM_ARRAY_NEW(type_index, WASM_I32V(0), WASM_I32V(42))),
+ kExprEnd};
+ g->EmitCode(g_code, sizeof(g_code));
+
+ WasmFunctionBuilder* h = builder->AddFunction(&sig_q_v);
+ h->builder()->AddExport(CStrVector("h"), h);
+ // Create an array of length 2, initialized to [42, 42].
+ byte h_code[] = {WASM_ARRAY_NEW(type_index, WASM_I32V(42), WASM_I32V(2)),
+ kExprEnd};
+ h->EmitCode(h_code, sizeof(h_code));
+
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(&buffer);
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "Test");
+ Handle<WasmInstanceObject> instance =
+ testing::CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
+ .ToHandleChecked();
+
+ Handle<Object> argv[] = {handle(Smi::FromInt(0), isolate)};
+ CHECK_EQ(12, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "f", 1, argv));
+ argv[0] = handle(Smi::FromInt(1), isolate);
+ CHECK_EQ(42, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "f", 1, argv));
+ argv[0] = handle(Smi::FromInt(2), isolate);
+ CHECK_EQ(12, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "f", 1, argv));
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ {
+ Handle<WasmExportedFunction> f_export =
+ testing::GetExportedFunction(isolate, instance, "f").ToHandleChecked();
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ argv[0] = handle(Smi::FromInt(3), isolate);
+ MaybeHandle<Object> no_result =
+ Execution::Call(isolate, f_export, undefined, 1, argv);
+ CHECK(no_result.is_null());
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+ argv[0] = handle(Smi::FromInt(-1), isolate);
+ no_result = Execution::Call(isolate, f_export, undefined, 1, argv);
+ CHECK(no_result.is_null());
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+ }
+
+ CHECK_EQ(42, testing::CallWasmFunctionForTesting(isolate, instance, &thrower,
+ "g", 0, nullptr));
+
+ // TODO(7748): This uses the JavaScript interface to retrieve the plain
+ // WasmArray. Once the JS interaction story is settled, this may well
+ // need to be changed.
+ Handle<WasmExportedFunction> h_export =
+ testing::GetExportedFunction(isolate, instance, "h").ToHandleChecked();
+ Handle<Object> ref_result =
+ Execution::Call(isolate, h_export, undefined, 0, nullptr)
+ .ToHandleChecked();
+ CHECK(ref_result->IsWasmArray());
+#if OBJECT_PRINT
+ ref_result->Print();
+#endif
+}
+
+} // namespace test_gc
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index 4984bf4524..9196c56aa8 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -42,12 +42,14 @@ class LiftoffCompileEnvironment {
CompilationEnv env = module_builder_.CreateCompilationEnv();
WasmFeatures detected1;
WasmFeatures detected2;
- WasmCompilationResult result1 = ExecuteLiftoffCompilation(
- isolate_->allocator(), &env, test_func.body,
- test_func.function->func_index, isolate_->counters(), &detected1);
- WasmCompilationResult result2 = ExecuteLiftoffCompilation(
- isolate_->allocator(), &env, test_func.body,
- test_func.function->func_index, isolate_->counters(), &detected2);
+ WasmCompilationResult result1 =
+ ExecuteLiftoffCompilation(isolate_->allocator(), &env, test_func.body,
+ test_func.function->func_index, kNoDebugging,
+ isolate_->counters(), &detected1);
+ WasmCompilationResult result2 =
+ ExecuteLiftoffCompilation(isolate_->allocator(), &env, test_func.body,
+ test_func.function->func_index, kNoDebugging,
+ isolate_->counters(), &detected2);
CHECK(result1.succeeded());
CHECK(result2.succeeded());
@@ -68,14 +70,13 @@ class LiftoffCompileEnvironment {
std::vector<int> breakpoints = {}) {
auto test_func = AddFunction(return_types, param_types, raw_function_bytes);
- CompilationEnv env = module_builder_.CreateCompilationEnv(
- breakpoints.empty() ? TestingModuleBuilder::kNoDebug
- : TestingModuleBuilder::kDebug);
+ CompilationEnv env = module_builder_.CreateCompilationEnv();
WasmFeatures detected;
std::unique_ptr<DebugSideTable> debug_side_table_via_compilation;
- ExecuteLiftoffCompilation(
- CcTest::i_isolate()->allocator(), &env, test_func.body, 0, nullptr,
- &detected, VectorOf(breakpoints), &debug_side_table_via_compilation);
+ ExecuteLiftoffCompilation(CcTest::i_isolate()->allocator(), &env,
+ test_func.body, 0, kForDebugging, nullptr,
+ &detected, VectorOf(breakpoints),
+ &debug_side_table_via_compilation);
// If there are no breakpoint, then {ExecuteLiftoffCompilation} should
// provide the same debug side table.
@@ -306,6 +307,8 @@ TEST(Liftoff_debug_side_table_simple) {
{WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))});
CheckDebugSideTable(
{
+ // function entry, locals in registers.
+ {Register(kWasmI32), Register(kWasmI32)},
// OOL stack check, locals spilled, stack empty.
{Stack(kWasmI32), Stack(kWasmI32)},
},
@@ -320,6 +323,8 @@ TEST(Liftoff_debug_side_table_call) {
WASM_GET_LOCAL(0))});
CheckDebugSideTable(
{
+ // function entry, local in register.
+ {Register(kWasmI32)},
// call, local spilled, stack empty.
{Stack(kWasmI32)},
// OOL stack check, local spilled, stack empty.
@@ -338,6 +343,8 @@ TEST(Liftoff_debug_side_table_call_const) {
WASM_GET_LOCAL(0))});
CheckDebugSideTable(
{
+ // function entry, local in register.
+ {Register(kWasmI32)},
// call, local is kConst.
{Constant(kWasmI32, kConst)},
// OOL stack check, local spilled.
@@ -355,6 +362,8 @@ TEST(Liftoff_debug_side_table_indirect_call) {
WASM_GET_LOCAL(0))});
CheckDebugSideTable(
{
+ // function entry, local in register.
+ {Register(kWasmI32)},
// indirect call, local spilled, stack empty.
{Stack(kWasmI32)},
// OOL stack check, local spilled, stack empty.
@@ -375,6 +384,8 @@ TEST(Liftoff_debug_side_table_loop) {
{WASM_I32V_1(kConst), WASM_LOOP(WASM_BR_IF(0, WASM_GET_LOCAL(0)))});
CheckDebugSideTable(
{
+ // function entry, local in register.
+ {Register(kWasmI32)},
// OOL stack check, local spilled, stack empty.
{Stack(kWasmI32)},
// OOL loop stack check, local spilled, stack has {kConst}.
@@ -390,6 +401,8 @@ TEST(Liftoff_debug_side_table_trap) {
{WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))});
CheckDebugSideTable(
{
+ // function entry, locals in registers.
+ {Register(kWasmI32), Register(kWasmI32)},
// OOL stack check, local spilled, stack empty.
{Stack(kWasmI32), Stack(kWasmI32)},
// OOL trap (div by zero), locals spilled, stack empty.
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 178f86bac9..16700cac8c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -569,7 +569,7 @@ WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) {
WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1), MachineRepresentation::kWord16)));
- uint64_t initial = 4444333322221111, local = 0x9999888877776666;
+ uint64_t initial = 0x4444333322221111, local = 0x9999888877776666;
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(static_cast<uint16_t>(initial), r.Call(initial, local));
CHECK_EQ(static_cast<uint16_t>(CompareExchange(initial, initial, local)),
@@ -782,6 +782,35 @@ WASM_EXEC_TEST(I64AtomicCompareExchangeUseOnlyHighWord) {
CHECK_EQ(0x12345678, r.Call());
}
+WASM_EXEC_TEST(I64AtomicExchangeUseOnlyLowWord) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ memory[1] = 0x1234567890abcdeful;
+ r.builder().SetHasSharedMemory();
+ // Test that we can use just the low word of an I64AtomicLoad.
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_BINOP(
+ kExprI64AtomicExchange, WASM_I32V(8), WASM_I64V(1),
+ MachineRepresentation::kWord64)));
+ CHECK_EQ(0x90abcdef, r.Call());
+}
+
+WASM_EXEC_TEST(I64AtomicExchangeUseOnlyHighWord) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ uint64_t* memory =
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ memory[1] = 0x1234567890abcdeful;
+ r.builder().SetHasSharedMemory();
+ // Test that we can use just the high word of an I64AtomicLoad.
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_ROR(
+ WASM_ATOMICS_BINOP(kExprI64AtomicExchange, WASM_I32V(8),
+ WASM_I64V(1), MachineRepresentation::kWord64),
+ WASM_I64V(32))));
+ CHECK_EQ(0x12345678, r.Call());
+}
+
WASM_EXEC_TEST(I64AtomicCompareExchange32UZeroExtended) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t> r(execution_tier);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index ab826df7d7..5afe0ea22f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -367,21 +367,21 @@ void TestTableCopyInbounds(ExecutionTier execution_tier, int table_dst,
}
}
-WASM_EXEC_TEST(TableCopyInboundsFrom0To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom0To0) {
TestTableCopyInbounds(execution_tier, 0, 0);
}
-WASM_EXEC_TEST(TableCopyInboundsFrom3To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom3To0) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyInbounds(execution_tier, 3, 0);
}
-WASM_EXEC_TEST(TableCopyInboundsFrom5To9) {
+WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom5To9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyInbounds(execution_tier, 5, 9);
}
-WASM_EXEC_TEST(TableCopyInboundsFrom6To6) {
+WASM_COMPILED_EXEC_TEST(TableCopyInboundsFrom6To6) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyInbounds(execution_tier, 6, 6);
}
@@ -476,12 +476,14 @@ void TestTableInitElems(ExecutionTier execution_tier, int table_index) {
CheckTableCall(isolate, table, &r, call_index, 0, 1, 2, 3, 4);
}
-WASM_EXEC_TEST(TableInitElems0) { TestTableInitElems(execution_tier, 0); }
-WASM_EXEC_TEST(TableInitElems7) {
+WASM_COMPILED_EXEC_TEST(TableInitElems0) {
+ TestTableInitElems(execution_tier, 0);
+}
+WASM_COMPILED_EXEC_TEST(TableInitElems7) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableInitElems(execution_tier, 7);
}
-WASM_EXEC_TEST(TableInitElems9) {
+WASM_COMPILED_EXEC_TEST(TableInitElems9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableInitElems(execution_tier, 9);
}
@@ -555,12 +557,12 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
r.CheckCallViaJS(0xDEADBEEF, 0, 10, 1);
}
-WASM_EXEC_TEST(TableInitOob0) { TestTableInitOob(execution_tier, 0); }
-WASM_EXEC_TEST(TableInitOob7) {
+WASM_COMPILED_EXEC_TEST(TableInitOob0) { TestTableInitOob(execution_tier, 0); }
+WASM_COMPILED_EXEC_TEST(TableInitOob7) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableInitOob(execution_tier, 7);
}
-WASM_EXEC_TEST(TableInitOob9) {
+WASM_COMPILED_EXEC_TEST(TableInitOob9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableInitOob(execution_tier, 9);
}
@@ -628,21 +630,21 @@ void TestTableCopyElems(ExecutionTier execution_tier, int table_dst,
}
}
-WASM_EXEC_TEST(TableCopyElemsFrom0To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom0To0) {
TestTableCopyElems(execution_tier, 0, 0);
}
-WASM_EXEC_TEST(TableCopyElemsFrom3To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom3To0) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyElems(execution_tier, 3, 0);
}
-WASM_EXEC_TEST(TableCopyElemsFrom5To9) {
+WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom5To9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyElems(execution_tier, 5, 9);
}
-WASM_EXEC_TEST(TableCopyElemsFrom6To6) {
+WASM_COMPILED_EXEC_TEST(TableCopyElemsFrom6To6) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyElems(execution_tier, 6, 6);
}
@@ -703,21 +705,21 @@ void TestTableCopyCalls(ExecutionTier execution_tier, int table_dst,
}
}
-WASM_EXEC_TEST(TableCopyCallsFrom0To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyCallsFrom0To0) {
TestTableCopyCalls(execution_tier, 0, 0);
}
-WASM_EXEC_TEST(TableCopyCallsFrom3To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyCallsFrom3To0) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyCalls(execution_tier, 3, 0);
}
-WASM_EXEC_TEST(TableCopyCallsFrom5To9) {
+WASM_COMPILED_EXEC_TEST(TableCopyCallsFrom5To9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyCalls(execution_tier, 5, 9);
}
-WASM_EXEC_TEST(TableCopyCallsFrom6To6) {
+WASM_COMPILED_EXEC_TEST(TableCopyCallsFrom6To6) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyCalls(execution_tier, 6, 6);
}
@@ -779,21 +781,21 @@ void TestTableCopyOobWrites(ExecutionTier execution_tier, int table_dst,
CheckTable(isolate, table, f0, f1, f2, f3, f4);
}
-WASM_EXEC_TEST(TableCopyOobWritesFrom0To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom0To0) {
TestTableCopyOobWrites(execution_tier, 0, 0);
}
-WASM_EXEC_TEST(TableCopyOobWritesFrom3To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom3To0) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyOobWrites(execution_tier, 3, 0);
}
-WASM_EXEC_TEST(TableCopyOobWritesFrom5To9) {
+WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom5To9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyOobWrites(execution_tier, 5, 9);
}
-WASM_EXEC_TEST(TableCopyOobWritesFrom6To6) {
+WASM_COMPILED_EXEC_TEST(TableCopyOobWritesFrom6To6) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyOobWrites(execution_tier, 6, 6);
}
@@ -838,26 +840,26 @@ void TestTableCopyOob1(ExecutionTier execution_tier, int table_dst,
}
}
-WASM_EXEC_TEST(TableCopyOob1From0To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyOob1From0To0) {
TestTableCopyOob1(execution_tier, 0, 0);
}
-WASM_EXEC_TEST(TableCopyOob1From3To0) {
+WASM_COMPILED_EXEC_TEST(TableCopyOob1From3To0) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyOob1(execution_tier, 3, 0);
}
-WASM_EXEC_TEST(TableCopyOob1From5To9) {
+WASM_COMPILED_EXEC_TEST(TableCopyOob1From5To9) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyOob1(execution_tier, 5, 9);
}
-WASM_EXEC_TEST(TableCopyOob1From6To6) {
+WASM_COMPILED_EXEC_TEST(TableCopyOob1From6To6) {
EXPERIMENTAL_FLAG_SCOPE(anyref);
TestTableCopyOob1(execution_tier, 6, 6);
}
-WASM_EXEC_TEST(ElemDropTwice) {
+WASM_COMPILED_EXEC_TEST(ElemDropTwice) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddIndirectFunctionTable(nullptr, 1);
@@ -868,7 +870,7 @@ WASM_EXEC_TEST(ElemDropTwice) {
r.CheckCallViaJS(0);
}
-WASM_EXEC_TEST(ElemDropThenTableInit) {
+WASM_COMPILED_EXEC_TEST(ElemDropThenTableInit) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
r.builder().AddIndirectFunctionTable(nullptr, 1);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index cddc6f7468..0b238e468d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -12,7 +12,7 @@ namespace internal {
namespace wasm {
namespace test_run_wasm_exceptions {
-WASM_EXEC_TEST(TryCatchThrow) {
+WASM_COMPILED_EXEC_TEST(TryCatchThrow) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -32,7 +32,7 @@ WASM_EXEC_TEST(TryCatchThrow) {
r.CheckCallViaJS(kResult1, 1);
}
-WASM_EXEC_TEST(TryCatchCallDirect) {
+WASM_COMPILED_EXEC_TEST(TryCatchCallDirect) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -60,7 +60,7 @@ WASM_EXEC_TEST(TryCatchCallDirect) {
r.CheckCallViaJS(kResult1, 1);
}
-WASM_EXEC_TEST(TryCatchCallIndirect) {
+WASM_COMPILED_EXEC_TEST(TryCatchCallIndirect) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -97,7 +97,7 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
r.CheckCallViaJS(kResult1, 1);
}
-WASM_EXEC_TEST(TryCatchCallExternal) {
+WASM_COMPILED_EXEC_TEST(TryCatchCallExternal) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
HandleScope scope(CcTest::InitIsolateOnce());
@@ -127,7 +127,7 @@ WASM_EXEC_TEST(TryCatchCallExternal) {
r.CheckCallViaJS(kResult1, 1);
}
-WASM_EXEC_TEST(TryCatchTrapTypeError) {
+WASM_COMPILED_EXEC_TEST(TryCatchTrapTypeError) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
HandleScope scope(CcTest::InitIsolateOnce());
@@ -189,22 +189,22 @@ void TestTrapNotCaught(byte* code, size_t code_size,
} // namespace
-WASM_EXEC_TEST(TryCatchTrapUnreachable) {
+WASM_COMPILED_EXEC_TEST(TryCatchTrapUnreachable) {
byte code[] = {WASM_UNREACHABLE};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
-WASM_EXEC_TEST(TryCatchTrapMemOutOfBounds) {
+WASM_COMPILED_EXEC_TEST(TryCatchTrapMemOutOfBounds) {
byte code[] = {WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-1))};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
-WASM_EXEC_TEST(TryCatchTrapDivByZero) {
+WASM_COMPILED_EXEC_TEST(TryCatchTrapDivByZero) {
byte code[] = {WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(0))};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
-WASM_EXEC_TEST(TryCatchTrapRemByZero) {
+WASM_COMPILED_EXEC_TEST(TryCatchTrapRemByZero) {
byte code[] = {WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(0))};
TestTrapNotCaught(code, arraysize(code), execution_tier);
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 0db42c69b4..e7fec1a464 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -71,7 +71,7 @@ ManuallyImportedJSFunction CreateJSSelector(FunctionSig* sig, int which) {
}
} // namespace
-WASM_EXEC_TEST(Run_Int32Sub_jswrapped) {
+WASM_COMPILED_EXEC_TEST(Run_Int32Sub_jswrapped) {
WasmRunner<int, int, int> r(execution_tier);
BUILD(r, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -79,7 +79,7 @@ WASM_EXEC_TEST(Run_Int32Sub_jswrapped) {
r.CheckCallViaJS(-8723487, -8000000, 723487);
}
-WASM_EXEC_TEST(Run_Float32Div_jswrapped) {
+WASM_COMPILED_EXEC_TEST(Run_Float32Div_jswrapped) {
WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -87,7 +87,7 @@ WASM_EXEC_TEST(Run_Float32Div_jswrapped) {
r.CheckCallViaJS(64, -16, -0.25);
}
-WASM_EXEC_TEST(Run_Float64Add_jswrapped) {
+WASM_COMPILED_EXEC_TEST(Run_Float64Add_jswrapped) {
WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -95,7 +95,7 @@ WASM_EXEC_TEST(Run_Float64Add_jswrapped) {
r.CheckCallViaJS(-5.5, -5.25, -0.25);
}
-WASM_EXEC_TEST(Run_I32Popcount_jswrapped) {
+WASM_COMPILED_EXEC_TEST(Run_I32Popcount_jswrapped) {
WasmRunner<int, int> r(execution_tier);
BUILD(r, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
@@ -104,7 +104,7 @@ WASM_EXEC_TEST(Run_I32Popcount_jswrapped) {
r.CheckCallViaJS(6, 0x3F);
}
-WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
+WASM_COMPILED_EXEC_TEST(Run_CallJS_Add_jswrapped) {
TestSignatures sigs;
HandleScope scope(CcTest::InitIsolateOnce());
const char* source = "(function(a) { return a + 99; })";
@@ -121,7 +121,7 @@ WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
r.CheckCallViaJS(-666666801, -666666900);
}
-WASM_EXEC_TEST(Run_IndirectCallJSFunction) {
+WASM_COMPILED_EXEC_TEST(Run_IndirectCallJSFunction) {
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
@@ -192,42 +192,42 @@ void RunJSSelectTest(ExecutionTier tier, int which) {
}
}
-WASM_EXEC_TEST(Run_JSSelect_0) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_0) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 0);
}
-WASM_EXEC_TEST(Run_JSSelect_1) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_1) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 1);
}
-WASM_EXEC_TEST(Run_JSSelect_2) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_2) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 2);
}
-WASM_EXEC_TEST(Run_JSSelect_3) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_3) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 3);
}
-WASM_EXEC_TEST(Run_JSSelect_4) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_4) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 4);
}
-WASM_EXEC_TEST(Run_JSSelect_5) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_5) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 5);
}
-WASM_EXEC_TEST(Run_JSSelect_6) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_6) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 6);
}
-WASM_EXEC_TEST(Run_JSSelect_7) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelect_7) {
CcTest::InitializeVM();
RunJSSelectTest(execution_tier, 7);
}
@@ -262,42 +262,42 @@ void RunWASMSelectTest(ExecutionTier tier, int which) {
}
}
-WASM_EXEC_TEST(Run_WASMSelect_0) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_0) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 0);
}
-WASM_EXEC_TEST(Run_WASMSelect_1) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_1) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 1);
}
-WASM_EXEC_TEST(Run_WASMSelect_2) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_2) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 2);
}
-WASM_EXEC_TEST(Run_WASMSelect_3) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_3) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 3);
}
-WASM_EXEC_TEST(Run_WASMSelect_4) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_4) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 4);
}
-WASM_EXEC_TEST(Run_WASMSelect_5) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_5) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 5);
}
-WASM_EXEC_TEST(Run_WASMSelect_6) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_6) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 6);
}
-WASM_EXEC_TEST(Run_WASMSelect_7) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelect_7) {
CcTest::InitializeVM();
RunWASMSelectTest(execution_tier, 7);
}
@@ -334,44 +334,44 @@ void RunWASMSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
}
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_0) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_0) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 0, 1);
RunWASMSelectAlignTest(execution_tier, 0, 2);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_1) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_1) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 1, 2);
RunWASMSelectAlignTest(execution_tier, 1, 3);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_2) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_2) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 2, 3);
RunWASMSelectAlignTest(execution_tier, 2, 4);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_3) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_3) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 3, 3);
RunWASMSelectAlignTest(execution_tier, 3, 4);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_4) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_4) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 4, 3);
RunWASMSelectAlignTest(execution_tier, 4, 4);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_7) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_7) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 7, 5);
RunWASMSelectAlignTest(execution_tier, 7, 6);
RunWASMSelectAlignTest(execution_tier, 7, 7);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_8) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_8) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 8, 5);
RunWASMSelectAlignTest(execution_tier, 8, 6);
@@ -379,7 +379,7 @@ WASM_EXEC_TEST(Run_WASMSelectAlign_8) {
RunWASMSelectAlignTest(execution_tier, 8, 8);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_9) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_9) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 9, 6);
RunWASMSelectAlignTest(execution_tier, 9, 7);
@@ -387,7 +387,7 @@ WASM_EXEC_TEST(Run_WASMSelectAlign_9) {
RunWASMSelectAlignTest(execution_tier, 9, 9);
}
-WASM_EXEC_TEST(Run_WASMSelectAlign_10) {
+WASM_COMPILED_EXEC_TEST(Run_WASMSelectAlign_10) {
CcTest::InitializeVM();
RunWASMSelectAlignTest(execution_tier, 10, 7);
RunWASMSelectAlignTest(execution_tier, 10, 8);
@@ -449,37 +449,37 @@ void RunJSSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
}
}
-WASM_EXEC_TEST(Run_JSSelectAlign_0) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_0) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 0, 1);
RunJSSelectAlignTest(execution_tier, 0, 2);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_1) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_1) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 1, 2);
RunJSSelectAlignTest(execution_tier, 1, 3);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_2) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_2) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 2, 3);
RunJSSelectAlignTest(execution_tier, 2, 4);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_3) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_3) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 3, 3);
RunJSSelectAlignTest(execution_tier, 3, 4);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_4) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_4) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 4, 3);
RunJSSelectAlignTest(execution_tier, 4, 4);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_7) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_7) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 7, 3);
RunJSSelectAlignTest(execution_tier, 7, 4);
@@ -487,7 +487,7 @@ WASM_EXEC_TEST(Run_JSSelectAlign_7) {
RunJSSelectAlignTest(execution_tier, 7, 4);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_8) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_8) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 8, 5);
RunJSSelectAlignTest(execution_tier, 8, 6);
@@ -495,7 +495,7 @@ WASM_EXEC_TEST(Run_JSSelectAlign_8) {
RunJSSelectAlignTest(execution_tier, 8, 8);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_9) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_9) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 9, 6);
RunJSSelectAlignTest(execution_tier, 9, 7);
@@ -503,7 +503,7 @@ WASM_EXEC_TEST(Run_JSSelectAlign_9) {
RunJSSelectAlignTest(execution_tier, 9, 9);
}
-WASM_EXEC_TEST(Run_JSSelectAlign_10) {
+WASM_COMPILED_EXEC_TEST(Run_JSSelectAlign_10) {
CcTest::InitializeVM();
RunJSSelectAlignTest(execution_tier, 10, 7);
RunJSSelectAlignTest(execution_tier, 10, 8);
@@ -560,11 +560,11 @@ void RunPickerTest(ExecutionTier tier, bool indirect) {
r.CheckCallApplyViaJS(right, rc_fn.function_index(), args_right, 1);
}
-WASM_EXEC_TEST(Run_ReturnCallImportedFunction) {
+WASM_COMPILED_EXEC_TEST(Run_ReturnCallImportedFunction) {
RunPickerTest(execution_tier, false);
}
-WASM_EXEC_TEST(Run_ReturnCallIndirectImportedFunction) {
+WASM_COMPILED_EXEC_TEST(Run_ReturnCallIndirectImportedFunction) {
RunPickerTest(execution_tier, true);
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc
index 24a5978950..56c6dca248 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-liftoff.cc
@@ -84,6 +84,41 @@ WASM_SIMD_LIFTOFF_TEST(S128Return) {
CHECK_EQ(1, r.Call());
}
+// Exercise Liftoff's logic for zero-initializing stack slots. We were using an
+// incorrect instruction for storing zeroes into the slot when the slot offset
+// was too large to fit in the instruction as an immediate.
+WASM_SIMD_LIFTOFF_TEST(FillStackSlotsWithZero_CheckStartOffset) {
+ WasmRunner<int64_t> r(ExecutionTier::kLiftoff, kNoLowerSimd);
+ // Function that takes in 32 i64 arguments, returns i64. This gets us a large
+ // enough starting offset from which we spill locals.
+ // start = 32 * 8 + 16 (instance) = 272 (cannot fit in signed int9).
+ FunctionSig* sig =
+ r.CreateSig<int64_t, int64_t, int64_t, int64_t, int64_t, int64_t, int64_t,
+ int64_t, int64_t, int64_t, int64_t, int64_t, int64_t, int64_t,
+ int64_t, int64_t, int64_t, int64_t, int64_t, int64_t, int64_t,
+ int64_t, int64_t, int64_t, int64_t, int64_t, int64_t, int64_t,
+ int64_t, int64_t, int64_t, int64_t, int64_t>();
+ WasmFunctionCompiler& simd_func = r.NewFunction(sig);
+
+ // We zero 16 bytes at a time using stp, so allocate locals such that we get a
+ // remainder, 8 in this case, so we hit the case where we use str.
+ simd_func.AllocateLocal(kWasmS128);
+ simd_func.AllocateLocal(kWasmI64);
+ BUILD(simd_func, WASM_I64V_1(1));
+
+ BUILD(r, WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1), WASM_I64V_1(1),
+ WASM_CALL_FUNCTION0(simd_func.function_index()));
+
+ CHECK_EQ(1, r.Call());
+}
+
#undef WASM_SIMD_LIFTOFF_TEST
} // namespace test_run_wasm_simd_liftoff
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
new file mode 100644
index 0000000000..517f8438a4
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/wasm-tier.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_simd {
+
+#define WASM_SIMD_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ ExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_simd_lowered) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kTurbofan); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
+
+WASM_SIMD_TEST(I8x16ToF32x4) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ byte param1 = 0;
+ BUILD(r,
+ WASM_SET_GLOBAL(
+ 0, WASM_SIMD_UNOP(kExprF32x4Sqrt,
+ WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(param1)))),
+ WASM_ONE);
+
+ // Arbitrary pattern that doesn't end up creating a NaN.
+ r.Call(0x5b);
+ float f = bit_cast<float>(0x5b5b5b5b);
+ float actual = ReadLittleEndianValue<float>(&g[0]);
+ float expected = std::sqrt(f);
+ CHECK_EQ(expected, actual);
+}
+
+} // namespace test_run_wasm_simd
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 23ab076e4a..977d21e875 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -99,12 +99,18 @@ T Div(T a, T b) {
template <typename T>
T Minimum(T a, T b) {
- return a <= b ? a : b;
+ // Follow one of the possible implementation given in
+ // https://en.cppreference.com/w/cpp/algorithm/min so that it works the same
+ // way for floats (when given NaNs/Infs).
+ return (b < a) ? b : a;
}
template <typename T>
T Maximum(T a, T b) {
- return a >= b ? a : b;
+ // Follow one of the possible implementation given in
+ // https://en.cppreference.com/w/cpp/algorithm/max so that it works the same
+ // way for floats (when given NaNs/Infs).
+ return (a < b) ? b : a;
}
template <typename T>
@@ -408,85 +414,6 @@ bool ExpectFused(ExecutionTier tier) {
lane_index, WASM_GET_LOCAL(value))), \
WASM_RETURN1(WASM_ZERO))
-#define TO_BYTE(val) static_cast<byte>(val)
-// TODO(v8:10258): We have support for emitting multi-byte opcodes now, so this
-// can change to simply, op, once the decoder is fixed to decode multi byte
-// opcodes.
-#define WASM_SIMD_OP(op) kSimdPrefix, TO_BYTE(op)
-#define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
-#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
-#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
-#define WASM_SIMD_SHIFT_OP(op, x, y) x, y, WASM_SIMD_OP(op)
-#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
- x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
-#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
-
-#define WASM_SIMD_F64x2_SPLAT(x) WASM_SIMD_SPLAT(F64x2, x)
-#define WASM_SIMD_F64x2_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprF64x2ExtractLane), TO_BYTE(lane)
-#define WASM_SIMD_F64x2_REPLACE_LANE(lane, x, y) \
- x, y, WASM_SIMD_OP(kExprF64x2ReplaceLane), TO_BYTE(lane)
-
-#define WASM_SIMD_F32x4_SPLAT(x) WASM_SIMD_SPLAT(F32x4, x)
-#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
-#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
- x, y, WASM_SIMD_OP(kExprF32x4ReplaceLane), TO_BYTE(lane)
-
-#define WASM_SIMD_I64x2_SPLAT(x) WASM_SIMD_SPLAT(I64x2, x)
-#define WASM_SIMD_I64x2_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprI64x2ExtractLane), TO_BYTE(lane)
-#define WASM_SIMD_I64x2_REPLACE_LANE(lane, x, y) \
- x, y, WASM_SIMD_OP(kExprI64x2ReplaceLane), TO_BYTE(lane)
-
-#define WASM_SIMD_I32x4_SPLAT(x) WASM_SIMD_SPLAT(I32x4, x)
-#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(lane)
-#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
- x, y, WASM_SIMD_OP(kExprI32x4ReplaceLane), TO_BYTE(lane)
-
-#define WASM_SIMD_I16x8_SPLAT(x) WASM_SIMD_SPLAT(I16x8, x)
-#define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprI16x8ExtractLaneS), TO_BYTE(lane)
-#define WASM_SIMD_I16x8_EXTRACT_LANE_U(lane, x) \
- x, WASM_SIMD_OP(kExprI16x8ExtractLaneU), TO_BYTE(lane)
-#define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
- x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
-
-#define WASM_SIMD_I8x16_SPLAT(x) WASM_SIMD_SPLAT(I8x16, x)
-#define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
- x, WASM_SIMD_OP(kExprI8x16ExtractLaneS), TO_BYTE(lane)
-#define WASM_SIMD_I8x16_EXTRACT_LANE_U(lane, x) \
- x, WASM_SIMD_OP(kExprI8x16ExtractLaneU), TO_BYTE(lane)
-#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
- x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
-
-#define WASM_SIMD_S8x16_SHUFFLE_OP(opcode, m, x, y) \
- x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
- TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
- TO_BYTE(m[7]), TO_BYTE(m[8]), TO_BYTE(m[9]), TO_BYTE(m[10]), \
- TO_BYTE(m[11]), TO_BYTE(m[12]), TO_BYTE(m[13]), TO_BYTE(m[14]), \
- TO_BYTE(m[15])
-
-#define WASM_SIMD_LOAD_MEM(index) \
- index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, ZERO_OFFSET
-#define WASM_SIMD_LOAD_MEM_OFFSET(offset, index) \
- index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, offset
-#define WASM_SIMD_STORE_MEM(index, val) \
- index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, ZERO_OFFSET
-#define WASM_SIMD_STORE_MEM_OFFSET(offset, index, val) \
- index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, offset
-
-#define WASM_SIMD_F64x2_QFMA(a, b, c) a, b, c, WASM_SIMD_OP(kExprF64x2Qfma)
-#define WASM_SIMD_F64x2_QFMS(a, b, c) a, b, c, WASM_SIMD_OP(kExprF64x2Qfms)
-#define WASM_SIMD_F32x4_QFMA(a, b, c) a, b, c, WASM_SIMD_OP(kExprF32x4Qfma)
-#define WASM_SIMD_F32x4_QFMS(a, b, c) a, b, c, WASM_SIMD_OP(kExprF32x4Qfms)
-
-#define WASM_SIMD_LOAD_SPLAT(opcode, index) \
- index, WASM_SIMD_OP(opcode), ZERO_ALIGNMENT, ZERO_OFFSET
-#define WASM_SIMD_LOAD_EXTEND(opcode, index) \
- index, WASM_SIMD_OP(opcode), ZERO_ALIGNMENT, ZERO_OFFSET
-
// Runs tests of compiled code, using the interpreter as a reference.
#define WASM_SIMD_COMPILED_TEST(name) \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
@@ -548,7 +475,7 @@ WASM_SIMD_TEST(S128Globals) {
// Set up a global to hold input and output vectors.
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
- BUILD_V(r, WASM_SET_GLOBAL(1, WASM_GET_GLOBAL(0)), WASM_ONE);
+ BUILD(r, WASM_SET_GLOBAL(1, WASM_GET_GLOBAL(0)), WASM_ONE);
FOR_INT32_INPUTS(x) {
for (int i = 0; i < 4; i++) {
@@ -829,6 +756,18 @@ WASM_SIMD_TEST(F32x4Max) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
}
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+WASM_SIMD_TEST_NO_LOWERING(F32x4Pmin) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmin, Minimum);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F32x4Pmax) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmax, Maximum);
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+
void RunF32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, FloatCompareOp expected_op) {
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
@@ -940,8 +879,8 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
// Set up a global to hold output vector.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
byte param1 = 0;
- BUILD_V(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
- WASM_ONE);
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+ WASM_ONE);
FOR_INT64_INPUTS(x) {
r.Call(x);
@@ -1206,7 +1145,8 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
CHECK_EQ(1., ReadLittleEndianValue<double>(&g1[1]));
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L(
@@ -1226,7 +1166,8 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
+ // V8_TARGET_ARCH_MIPS64
bool IsExtreme(double x) {
double abs_x = std::fabs(x);
@@ -1417,6 +1358,18 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Div) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, Div);
}
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+WASM_SIMD_TEST_NO_LOWERING(F64x2Pmin) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmin, Minimum);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Pmax) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmax, Maximum);
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+
void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleCompareOp expected_op) {
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
@@ -1661,7 +1614,8 @@ WASM_SIMD_TEST(I16x8ReplaceLane) {
}
}
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_IA32 || \
+ V8_TARGET_ARCH_X64
WASM_SIMD_TEST_NO_LOWERING(I8x16BitMask) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -1721,7 +1675,8 @@ WASM_SIMD_TEST_NO_LOWERING(I32x4BitMask) {
CHECK_EQ(actual, expected);
}
}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_IA32 ||
+ // V8_TARGET_ARCH_X64
WASM_SIMD_TEST(I8x16Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -2059,17 +2014,17 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4Shl) {
+WASM_SIMD_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
LogicalShiftLeft);
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4ShrS) {
+WASM_SIMD_TEST(I32x4ShrS) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
ArithmeticShiftRight);
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4ShrU) {
+WASM_SIMD_TEST(I32x4ShrU) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
LogicalShiftRight);
}
@@ -2328,17 +2283,17 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8Shl) {
+WASM_SIMD_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
LogicalShiftLeft);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ShrS) {
+WASM_SIMD_TEST(I16x8ShrS) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
ArithmeticShiftRight);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ShrU) {
+WASM_SIMD_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
LogicalShiftRight);
}
@@ -2520,6 +2475,7 @@ WASM_SIMD_TEST(I8x16LeU) {
}
WASM_SIMD_TEST(I8x16Mul) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul,
base::MulWithWraparound);
}
@@ -2562,17 +2518,17 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16Shl) {
+WASM_SIMD_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
LogicalShiftLeft);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16ShrS) {
+WASM_SIMD_TEST(I8x16ShrS) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
ArithmeticShiftRight);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16ShrU) {
+WASM_SIMD_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
LogicalShiftRight);
}
@@ -3039,7 +2995,7 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
+ FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte zero = r.AllocateLocal(kWasmS128); \
@@ -3195,8 +3151,9 @@ WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
+ BUILD(r,
+ WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(76)))),
WASM_SET_LOCAL(1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
WASM_SIMD_I32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
CHECK_EQ(76, r.Call());
@@ -3407,6 +3364,29 @@ WASM_SIMD_TEST(SimdLoadStoreLoadMemargOffset) {
}
}
+// Test a multi-byte opcode with offset values that encode into valid opcodes.
+// This is to exercise decoding logic and make sure we get the lengths right.
+WASM_SIMD_TEST(S8x16LoadSplatOffset) {
+ // This offset is [82, 22] when encoded, which contains valid opcodes.
+ constexpr int offset = 4354;
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ int8_t* memory = r.builder().AddMemoryElems<int8_t>(kWasmPageSize);
+ int8_t* global = r.builder().AddGlobal<int8_t>(kWasmS128);
+ BUILD(r,
+ WASM_SET_GLOBAL(
+ 0, WASM_SIMD_LOAD_SPLAT_OFFSET(kExprS8x16LoadSplat, WASM_I32V(0),
+ U32V_2(offset))),
+ WASM_ONE);
+
+ // We don't really care about all valid values, so just test for 1.
+ int8_t x = 7;
+ r.builder().WriteMemory(&memory[offset], x);
+ r.Call();
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(x, ReadLittleEndianValue<int8_t>(&global[i]));
+ }
+}
+
template <typename T>
void RunLoadSplatTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode op) {
@@ -3450,20 +3430,25 @@ void RunLoadExtendTest(ExecutionTier execution_tier, LowerSimd lower_simd,
constexpr int lanes_s = 16 / sizeof(S);
constexpr int lanes_t = 16 / sizeof(T);
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
- T* global = r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_LOAD_EXTEND(op, WASM_I32V(mem_index))),
- WASM_ONE);
+ // Load extends always load 64 bits, so alignment values can be from 0 to 3.
+ for (byte alignment = 0; alignment <= 3; alignment++) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
+ T* global = r.builder().AddGlobal<T>(kWasmS128);
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_LOAD_EXTEND_ALIGNMENT(
+ op, WASM_I32V(mem_index), alignment)),
+ WASM_ONE);
- for (S x : compiler::ValueHelper::GetVector<S>()) {
- for (int i = 0; i < lanes_s; i++) {
- // 16-th byte in memory is lanes-th element (size T) of memory.
- r.builder().WriteMemory(&memory[lanes_s + i], x);
- }
- r.Call();
- for (int i = 0; i < lanes_t; i++) {
- CHECK_EQ(static_cast<T>(x), ReadLittleEndianValue<T>(&global[i]));
+ for (S x : compiler::ValueHelper::GetVector<S>()) {
+ for (int i = 0; i < lanes_s; i++) {
+ // 16-th byte in memory is lanes-th element (size T) of memory.
+ r.builder().WriteMemory(&memory[lanes_s + i], x);
+ }
+ r.Call();
+ for (int i = 0; i < lanes_t; i++) {
+ CHECK_EQ(static_cast<T>(x), ReadLittleEndianValue<T>(&global[i]));
+ }
}
}
}
@@ -3497,11 +3482,9 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2Load32x2S) {
kExprI64x2Load32x2S);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
+ FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
@@ -3522,7 +3505,7 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AllTrue) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
+ FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
@@ -3540,8 +3523,6 @@ WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(BitSelect) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 1aebac3c76..436071ed09 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -3752,9 +3752,9 @@ TEST(Liftoff_tier_up) {
memcpy(buffer.get(), sub_code->instructions().begin(), sub_size);
desc.buffer = buffer.get();
desc.instr_size = static_cast<int>(sub_size);
- std::unique_ptr<WasmCode> new_code =
- native_module->AddCode(add.function_index(), desc, 0, 0, {}, {},
- WasmCode::kFunction, ExecutionTier::kTurbofan);
+ std::unique_ptr<WasmCode> new_code = native_module->AddCode(
+ add.function_index(), desc, 0, 0, {}, {}, WasmCode::kFunction,
+ ExecutionTier::kTurbofan, kNoDebugging);
native_module->PublishCode(std::move(new_code));
// Second run should now execute {sub}.
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 9f011ecf38..a1d97fc622 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -7,8 +7,8 @@
#include "src/execution/frames-inl.h"
#include "src/objects/property-descriptor.h"
#include "src/utils/utils.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-objects-inl.h"
-
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -116,7 +116,7 @@ class BreakHandler : public debug::DebugDelegate {
// Check the current position.
StackTraceFrameIterator frame_it(isolate_);
- auto summ = FrameSummary::GetTop(frame_it.frame()).AsWasmInterpreted();
+ auto summ = FrameSummary::GetTop(frame_it.frame()).AsWasm();
CHECK_EQ(expected_breaks_[count_].position, summ.byte_offset());
expected_breaks_[count_].pre_action();
@@ -152,14 +152,6 @@ Handle<BreakPoint> SetBreakpoint(WasmRunnerBase* runner, int function_index,
runner->main_isolate()->factory()->NewBreakPoint(
break_index++, runner->main_isolate()->factory()->empty_string());
CHECK(WasmScript::SetBreakPoint(script, &code_offset, break_point));
- int set_byte_offset = code_offset - func_offset;
- CHECK_EQ(expected_set_byte_offset, set_byte_offset);
- // Also set breakpoint on the debug info of the instance directly, since the
- // instance chain is not setup properly in tests.
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::SetBreakpoint(debug_info, function_index, set_byte_offset);
-
return break_point;
}
@@ -172,11 +164,6 @@ void ClearBreakpoint(WasmRunnerBase* runner, int function_index,
Handle<Script> script(instance->module_object().script(),
runner->main_isolate());
CHECK(WasmScript::ClearBreakPoint(script, code_offset, break_point));
- // Also clear breakpoint on the debug info of the instance directly, since the
- // instance chain is not setup properly in tests.
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::ClearBreakpoint(debug_info, function_index, byte_offset);
}
// Wrapper with operator<<.
@@ -243,21 +230,23 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
HandleScope handles(isolate_);
StackTraceFrameIterator frame_it(isolate_);
- auto summ = FrameSummary::GetTop(frame_it.frame()).AsWasmInterpreted();
- Handle<WasmInstanceObject> instance = summ.wasm_instance();
-
- auto frame =
- instance->debug_info().GetInterpretedFrame(frame_it.frame()->fp(), 0);
- CHECK_EQ(expected.locals.size(), frame->GetLocalCount());
- for (int i = 0; i < frame->GetLocalCount(); ++i) {
- CHECK_EQ(WasmValWrapper{expected.locals[i]},
- WasmValWrapper{frame->GetLocalValue(i)});
+ WasmFrame* frame = WasmFrame::cast(frame_it.frame());
+ DebugInfo* debug_info = frame->native_module()->GetDebugInfo();
+
+ int num_locals = debug_info->GetNumLocals(isolate_, frame->pc());
+ CHECK_EQ(expected.locals.size(), num_locals);
+ for (int i = 0; i < num_locals; ++i) {
+ WasmValue local_value = debug_info->GetLocalValue(
+ i, isolate_, frame->pc(), frame->fp(), frame->callee_fp());
+ CHECK_EQ(WasmValWrapper{expected.locals[i]}, WasmValWrapper{local_value});
}
- CHECK_EQ(expected.stack.size(), frame->GetStackHeight());
- for (int i = 0; i < frame->GetStackHeight(); ++i) {
- CHECK_EQ(WasmValWrapper{expected.stack[i]},
- WasmValWrapper{frame->GetStackValue(i)});
+ int stack_depth = debug_info->GetStackDepth(isolate_, frame->pc());
+ CHECK_EQ(expected.stack.size(), stack_depth);
+ for (int i = 0; i < stack_depth; ++i) {
+ WasmValue stack_value = debug_info->GetStackValue(
+ i, isolate_, frame->pc(), frame->fp(), frame->callee_fp());
+ CHECK_EQ(WasmValWrapper{expected.stack[i]}, WasmValWrapper{stack_value});
}
isolate_->debug()->PrepareStep(StepAction::StepIn);
@@ -379,6 +368,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
WasmRunner<int, int> runner(execution_tier);
+ runner.TierDown();
WasmFunctionCompiler& f2 = runner.NewFunction<void>();
f2.AllocateLocal(kWasmI32);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc b/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
index 5a7b2bc201..c62aef3192 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
@@ -43,14 +43,20 @@ namespace {
template <typename... FunctionArgsT>
class TestCode {
public:
- TestCode(WasmRunnerBase* runner, std::initializer_list<byte> code)
- : compiler_(&runner->NewFunction<FunctionArgsT...>()), code_(code) {
+ TestCode(WasmRunnerBase* runner, std::initializer_list<byte> code,
+ std::initializer_list<ValueType::Kind> locals = {})
+ : compiler_(&runner->NewFunction<FunctionArgsT...>()),
+ code_(code),
+ locals_(static_cast<uint32_t>(locals.size())) {
+ for (ValueType::Kind T : locals) {
+ compiler_->AllocateLocal(ValueType(T));
+ }
compiler_->Build(code.begin(), code.end());
}
Handle<BreakPoint> BreakOnReturn(WasmRunnerBase* runner) {
runner->TierDown();
- uint32_t return_offset_in_function = FindReturn();
+ uint32_t return_offset_in_function = locals_ + FindReturn();
int function_index = compiler_->function_index();
int function_offset =
@@ -66,14 +72,6 @@ class TestCode {
break_index++, runner->main_isolate()->factory()->empty_string());
CHECK(WasmScript::SetBreakPoint(script, &return_offset_in_module,
break_point));
- int set_breakpoint_offset = return_offset_in_module - function_offset;
- // Also set breakpoint on the debug info of the instance directly, since
- // the instance chain is not set up properly in tests.
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- WasmDebugInfo::SetBreakpoint(debug_info, function_index,
- set_breakpoint_offset);
-
return break_point;
}
@@ -98,6 +96,7 @@ class TestCode {
WasmFunctionCompiler* compiler_;
std::vector<byte> code_;
+ uint32_t locals_;
};
class WasmEvaluatorBuilder {
@@ -108,6 +107,9 @@ class WasmEvaluatorBuilder {
: zone_(&allocator_, ZONE_NAME), builder_(&zone_) {
get_memory_function_index = AddImport<void, uint32_t, uint32_t, uint32_t>(
CStrVector("__getMemory"));
+ get_local_function_index =
+ AddImport<void, uint32_t, uint32_t>(CStrVector("__getLocal"));
+ sbrk_function_index = AddImport<uint32_t, uint32_t>(CStrVector("__sbrk"));
wasm_format_function =
builder_.AddFunction(WasmRunnerBase::CreateSig<uint32_t>(&zone_));
wasm_format_function->SetName(CStrVector("wasm_format"));
@@ -119,7 +121,8 @@ class WasmEvaluatorBuilder {
template <typename ReturnT, typename... ArgTs>
uint32_t AddImport(Vector<const char> name) {
return builder_.AddImport(
- name, WasmRunnerBase::CreateSig<ReturnT, ArgTs...>(&zone_));
+ name, WasmRunnerBase::CreateSig<ReturnT, ArgTs...>(&zone_),
+ CStrVector("env"));
}
void push_back(std::initializer_list<byte> code) {
@@ -127,9 +130,19 @@ class WasmEvaluatorBuilder {
static_cast<uint32_t>(code.size()));
}
+ void CallSbrk(std::initializer_list<byte> args) {
+ push_back(args);
+ push_back({WASM_CALL_FUNCTION0(sbrk_function_index)});
+ }
+
+ void CallGetLocal(std::initializer_list<byte> args) {
+ push_back(args);
+ push_back({WASM_CALL_FUNCTION0(get_local_function_index)});
+ }
+
void CallGetMemory(std::initializer_list<byte> args) {
push_back(args);
- push_back({WASM_CALL_FUNCTION0(wasm_format_function->func_index())});
+ push_back({WASM_CALL_FUNCTION0(get_memory_function_index)});
}
ZoneBuffer bytes() {
@@ -143,6 +156,8 @@ class WasmEvaluatorBuilder {
Zone zone_;
WasmModuleBuilder builder_;
uint32_t get_memory_function_index = 0;
+ uint32_t get_local_function_index = 0;
+ uint32_t sbrk_function_index = 0;
WasmFunctionBuilder* wasm_format_function = nullptr;
};
@@ -189,15 +204,12 @@ class WasmBreakHandler : public debug::DebugDelegate {
// Check the current position.
StackTraceFrameIterator frame_it(isolate_);
- FrameSummary::WasmInterpretedFrameSummary summary =
- FrameSummary::GetTop(frame_it.frame()).AsWasmInterpreted();
- Handle<WasmInstanceObject> instance = summary.wasm_instance();
- WasmInterpreter::FramePtr frame =
- instance->debug_info().GetInterpretedFrame(frame_it.frame()->fp(), 0);
+ WasmFrame* frame = WasmFrame::cast(frame_it.frame());
+ Handle<WasmInstanceObject> instance{frame->wasm_instance(), isolate_};
MaybeHandle<String> result_handle = v8::internal::wasm::DebugEvaluate(
{evaluator_bytes_.begin(), evaluator_bytes_.size()}, instance,
- std::move(frame));
+ frame_it.frame());
Maybe<std::string> error_message = GetPendingExceptionAsString();
Maybe<std::string> result_message =
@@ -218,7 +230,7 @@ WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_CompileFailed) {
code.BreakOnReturn(&runner);
WasmEvaluatorBuilder evaluator(execution_tier);
- // Create a module that doesn't compile by missing the END bytecode
+ // Create a module that doesn't compile by missing the END bytecode.
evaluator.push_back({WASM_RETURN1(WASM_I32V_1(33))});
Isolate* isolate = runner.main_isolate();
@@ -262,11 +274,12 @@ WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_ExecuteFailed_SEGV) {
TestCode<int> code(&runner, {WASM_RETURN1(WASM_I32V_1(32))});
- // Create a module that doesn't compile by missing the END bytecode
- WasmEvaluatorBuilder evaluator(execution_tier);
+ // Use a max memory size of 2 here to verify the precondition for the
+ // GrowMemory test below.
+ WasmEvaluatorBuilder evaluator(execution_tier, 1, 2);
code.BreakOnReturn(&runner);
- // Load 1 byte from an address that's too high
+ // Load 1 byte from an address that's too high.
evaluator.CallGetMemory(
{WASM_I32V_1(32), WASM_I32V_1(1), WASM_I32V_3((1 << 16) + 1)});
evaluator.push_back({WASM_RETURN1(WASM_I32V_1(33)), WASM_END});
@@ -283,6 +296,34 @@ WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_ExecuteFailed_SEGV) {
std::string::npos);
}
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_GrowMemory) {
+ WasmRunner<int> runner(execution_tier);
+ runner.builder().AddMemoryElems<int32_t>(64);
+
+ TestCode<int> code(
+ &runner,
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_I32V_1(32), WASM_I32V_2('A')),
+ WASM_RETURN1(WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(32)))});
+ code.BreakOnReturn(&runner);
+
+ WasmEvaluatorBuilder evaluator(execution_tier, 1, 2);
+ // Grow the memory.
+ evaluator.CallSbrk({WASM_I32V_1(1)});
+ // Load 1 byte from an address that's too high for the default memory.
+ evaluator.CallGetMemory(
+ {WASM_I32V_1(32), WASM_I32V_1(1), WASM_I32V_3((1 << 16) + 1)});
+ evaluator.push_back({WASM_RETURN1(WASM_I32V_3((1 << 16) + 1)), WASM_END});
+
+ Isolate* isolate = runner.main_isolate();
+ WasmBreakHandler break_handler(isolate, evaluator.bytes());
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK(result.error.IsNothing());
+ CHECK_EQ(result.result.ToChecked(), "A");
+}
+
WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_LinearMemory) {
WasmRunner<int> runner(execution_tier);
runner.builder().AddMemoryElems<int32_t>(64);
@@ -309,6 +350,30 @@ WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_LinearMemory) {
CHECK_EQ(result.result.ToChecked(), "A");
}
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_Locals) {
+ WasmRunner<int> runner(execution_tier);
+ runner.builder().AddMemoryElems<int32_t>(64);
+
+ TestCode<int> code(
+ &runner,
+ {WASM_SET_LOCAL(0, WASM_I32V_2('A')), WASM_RETURN1(WASM_GET_LOCAL(0))},
+ {ValueType::kI32});
+ code.BreakOnReturn(&runner);
+
+ WasmEvaluatorBuilder evaluator(execution_tier);
+ evaluator.CallGetLocal({WASM_I32V_1(0), WASM_I32V_1(33)});
+ evaluator.push_back({WASM_RETURN1(WASM_I32V_1(33)), WASM_END});
+
+ Isolate* isolate = runner.main_isolate();
+ WasmBreakHandler break_handler(isolate, evaluator.bytes());
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK(result.error.IsNothing());
+ CHECK_EQ(result.result.ToChecked(), "A");
+}
+
} // namespace
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
deleted file mode 100644
index 75e927fafe..0000000000
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstdint>
-
-#include "src/base/overflowing-math.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-objects.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/value-helper.h"
-#include "test/cctest/wasm/wasm-run-utils.h"
-#include "test/common/wasm/wasm-macro-gen.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-/**
- * We test the interface from Wasm compiled code to the Wasm interpreter by
- * building a module with two functions. The external function is called from
- * this test, and will be compiled code. It takes its arguments and passes them
- * on to the internal function, which will be redirected to the interpreter.
- * If the internal function has an i64 parameter, is has to be replaced by two
- * i32 parameters on the external function.
- * The internal function just converts all its arguments to f64, sums them up
- * and returns the sum.
- */
-namespace {
-
-template <typename T>
-class ArgPassingHelper {
- public:
- ArgPassingHelper(WasmRunnerBase* runner, WasmFunctionCompiler* inner_compiler,
- std::initializer_list<uint8_t> bytes_inner_function,
- std::initializer_list<uint8_t> bytes_outer_function,
- const T& expected_lambda)
- : isolate_(runner->main_isolate()),
- expected_lambda_(expected_lambda),
- debug_info_(WasmInstanceObject::GetOrCreateDebugInfo(
- runner->builder().instance_object())) {
- std::vector<uint8_t> inner_code{bytes_inner_function};
- inner_compiler->Build(inner_code.data(),
- inner_code.data() + inner_code.size());
-
- std::vector<uint8_t> outer_code{bytes_outer_function};
- runner->Build(outer_code.data(), outer_code.data() + outer_code.size());
-
- int funcs_to_redict[] = {
- static_cast<int>(inner_compiler->function_index())};
- runner->builder().SetExecutable();
- WasmDebugInfo::RedirectToInterpreter(debug_info_,
- ArrayVector(funcs_to_redict));
- main_fun_wrapper_ = runner->builder().WrapCode(runner->function_index());
- }
-
- template <typename... Args>
- void CheckCall(Args... args) {
- Handle<Object> arg_objs[] = {isolate_->factory()->NewNumber(args)...};
-
- uint64_t num_interpreted_before = debug_info_->NumInterpretedCalls();
- Handle<Object> global(isolate_->context().global_object(), isolate_);
- MaybeHandle<Object> retval = Execution::Call(
- isolate_, main_fun_wrapper_, global, arraysize(arg_objs), arg_objs);
- uint64_t num_interpreted_after = debug_info_->NumInterpretedCalls();
- // Check that we really went through the interpreter.
- CHECK_EQ(num_interpreted_before + 1, num_interpreted_after);
- // Check the result.
- double result = retval.ToHandleChecked()->Number();
- double expected = expected_lambda_(args...);
- CHECK_DOUBLE_EQ(expected, result);
- }
-
- private:
- Isolate* isolate_;
- T expected_lambda_;
- Handle<WasmDebugInfo> debug_info_;
- Handle<JSFunction> main_fun_wrapper_;
-};
-
-template <typename T>
-static ArgPassingHelper<T> GetHelper(
- WasmRunnerBase* runner, WasmFunctionCompiler* inner_compiler,
- std::initializer_list<uint8_t> bytes_inner_function,
- std::initializer_list<uint8_t> bytes_outer_function,
- const T& expected_lambda) {
- return ArgPassingHelper<T>(runner, inner_compiler, bytes_inner_function,
- bytes_outer_function, expected_lambda);
-}
-
-} // namespace
-
-// Pass int32_t, return int32_t.
-TEST(TestArgumentPassing_int32) {
- WasmRunner<int32_t, int32_t> runner(ExecutionTier::kTurbofan);
- WasmFunctionCompiler& f2 = runner.NewFunction<int32_t, int32_t>();
-
- auto helper = GetHelper(
- &runner, &f2,
- {// Return 2*<0> + 1.
- WASM_I32_ADD(WASM_I32_MUL(WASM_I32V_1(2), WASM_GET_LOCAL(0)), WASM_ONE)},
- {// Call f2 with param <0>.
- WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())},
- [](int32_t a) {
- return base::AddWithWraparound(base::MulWithWraparound(2, a), 1);
- });
-
- FOR_INT32_INPUTS(v) { helper.CheckCall(v); }
-}
-
-// Pass int64_t, return double.
-TEST(TestArgumentPassing_double_int64) {
- WasmRunner<double, int32_t, int32_t> runner(ExecutionTier::kTurbofan);
- WasmFunctionCompiler& f2 = runner.NewFunction<double, int64_t>();
-
- auto helper = GetHelper(
- &runner, &f2,
- {// Return (double)<0>.
- WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0))},
- {// Call f2 with param (<0> | (<1> << 32)).
- WASM_I64_IOR(WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)),
- WASM_I64_SHL(WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(1)),
- WASM_I64V_1(32))),
- WASM_CALL_FUNCTION0(f2.function_index())},
- [](int32_t a, int32_t b) {
- int64_t a64 = static_cast<int64_t>(a) & 0xFFFFFFFF;
- int64_t b64 = static_cast<uint64_t>(static_cast<int64_t>(b)) << 32;
- return static_cast<double>(a64 | b64);
- });
-
- FOR_INT32_INPUTS(v1) {
- FOR_INT32_INPUTS(v2) { helper.CheckCall(v1, v2); }
- }
-
- FOR_INT64_INPUTS(v) {
- int32_t v1 = static_cast<int32_t>(v);
- int32_t v2 = static_cast<int32_t>(v >> 32);
- helper.CheckCall(v1, v2);
- helper.CheckCall(v2, v1);
- }
-}
-
-// Pass double, return int64_t.
-TEST(TestArgumentPassing_int64_double) {
- // Outer function still returns double.
- WasmRunner<double, double> runner(ExecutionTier::kTurbofan);
- WasmFunctionCompiler& f2 = runner.NewFunction<int64_t, double>();
-
- auto helper = GetHelper(
- &runner, &f2,
- {// Return (int64_t)<0>.
- WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0))},
- {// Call f2 with param <0>, convert returned value back to double.
- WASM_F64_SCONVERT_I64(WASM_SEQ(
- WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())))},
- [](double d) { return d; });
-
- for (int64_t i : compiler::ValueHelper::int64_vector()) {
- helper.CheckCall(i);
- }
-}
-
-// Pass float, return double.
-TEST(TestArgumentPassing_float_double) {
- WasmRunner<double, float> runner(ExecutionTier::kTurbofan);
- WasmFunctionCompiler& f2 = runner.NewFunction<double, float>();
-
- auto helper = GetHelper(
- &runner, &f2,
- {// Return 2*(double)<0> + 1.
- WASM_F64_ADD(
- WASM_F64_MUL(WASM_F64(2), WASM_F64_CONVERT_F32(WASM_GET_LOCAL(0))),
- WASM_F64(1))},
- {// Call f2 with param <0>.
- WASM_GET_LOCAL(0), WASM_CALL_FUNCTION0(f2.function_index())},
- [](float f) { return 2. * static_cast<double>(f) + 1.; });
-
- FOR_FLOAT32_INPUTS(f) { helper.CheckCall(f); }
-}
-
-// Pass two doubles, return double.
-TEST(TestArgumentPassing_double_double) {
- WasmRunner<double, double, double> runner(ExecutionTier::kTurbofan);
- WasmFunctionCompiler& f2 = runner.NewFunction<double, double, double>();
-
- auto helper = GetHelper(&runner, &f2,
- {// Return <0> + <1>.
- WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))},
- {// Call f2 with params <0>, <1>.
- WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_CALL_FUNCTION0(f2.function_index())},
- [](double a, double b) { return a + b; });
-
- FOR_FLOAT64_INPUTS(d1) {
- FOR_FLOAT64_INPUTS(d2) { helper.CheckCall(d1, d2); }
- }
-}
-
-// Pass int32_t, int64_t, float and double, return double.
-TEST(TestArgumentPassing_AllTypes) {
- // The second and third argument will be combined to an i64.
- WasmRunner<double, int32_t, int32_t, int32_t, float, double> runner(
- ExecutionTier::kTurbofan);
- WasmFunctionCompiler& f2 =
- runner.NewFunction<double, int32_t, int64_t, float, double>();
-
- auto helper = GetHelper(
- &runner, &f2,
- {
- // Convert all arguments to double, add them and return the sum.
- WASM_F64_ADD( // <0+1+2> + <3>
- WASM_F64_ADD( // <0+1> + <2>
- WASM_F64_ADD( // <0> + <1>
- WASM_F64_SCONVERT_I32(
- WASM_GET_LOCAL(0)), // <0> to double
- WASM_F64_SCONVERT_I64(
- WASM_GET_LOCAL(1))), // <1> to double
- WASM_F64_CONVERT_F32(WASM_GET_LOCAL(2))), // <2> to double
- WASM_GET_LOCAL(3)) // <3>
- },
- {WASM_GET_LOCAL(0), // first arg
- WASM_I64_IOR(WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(1)), // second arg
- WASM_I64_SHL(WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(2)),
- WASM_I64V_1(32))),
- WASM_GET_LOCAL(3), // third arg
- WASM_GET_LOCAL(4), // fourth arg
- WASM_CALL_FUNCTION0(f2.function_index())},
- [](int32_t a, int32_t b, int32_t c, float d, double e) {
- return 0. + a + (static_cast<int64_t>(b) & 0xFFFFFFFF) +
- ((static_cast<int64_t>(c) & 0xFFFFFFFF) << 32) + d + e;
- });
-
- auto CheckCall = [&](int32_t a, int64_t b, float c, double d) {
- int32_t b0 = static_cast<int32_t>(b);
- int32_t b1 = static_cast<int32_t>(b >> 32);
- helper.CheckCall(a, b0, b1, c, d);
- helper.CheckCall(a, b1, b0, c, d);
- };
-
- Vector<const int32_t> test_values_i32 = compiler::ValueHelper::int32_vector();
- Vector<const int64_t> test_values_i64 = compiler::ValueHelper::int64_vector();
- Vector<const float> test_values_f32 = compiler::ValueHelper::float32_vector();
- Vector<const double> test_values_f64 =
- compiler::ValueHelper::float64_vector();
- size_t max_len =
- std::max(std::max(test_values_i32.size(), test_values_i64.size()),
- std::max(test_values_f32.size(), test_values_f64.size()));
- for (size_t i = 0; i < max_len; ++i) {
- int32_t i32 = test_values_i32[i % test_values_i32.size()];
- int64_t i64 = test_values_i64[i % test_values_i64.size()];
- float f32 = test_values_f32[i % test_values_f32.size()];
- double f64 = test_values_f64[i % test_values_f64.size()];
- CheckCall(i32, i64, f32, f64);
- }
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 2a486303e8..75bdff0571 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -69,10 +69,11 @@ class WasmSerializationTest {
base::WriteUnalignedValue<uint32_t>(num_functions_slot, 0);
}
- MaybeHandle<WasmModuleObject> Deserialize() {
+ MaybeHandle<WasmModuleObject> Deserialize(
+ Vector<const char> source_url = {}) {
return DeserializeNativeModule(CcTest::i_isolate(),
VectorOf(serialized_bytes_),
- VectorOf(wire_bytes_), {});
+ VectorOf(wire_bytes_), source_url);
}
void DeserializeAndRun() {
@@ -200,6 +201,19 @@ TEST(DeserializeValidModule) {
test.CollectGarbage();
}
+TEST(DeserializeWithSourceUrl) {
+ WasmSerializationTest test;
+ {
+ HandleScope scope(CcTest::i_isolate());
+ const std::string url = "http://example.com/example.wasm";
+ Handle<WasmModuleObject> module_object;
+ CHECK(test.Deserialize(VectorOf(url)).ToHandle(&module_object));
+ String source_url = String::cast(module_object->script().source_url());
+ CHECK_EQ(url, source_url.ToCString().get());
+ }
+ test.CollectGarbage();
+}
+
TEST(DeserializeMismatchingVersion) {
WasmSerializationTest test;
{
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index badeb8e7a0..3639569f89 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -86,7 +86,8 @@ class SharedEngineIsolate {
Handle<WasmInstanceObject> ImportInstance(SharedModule shared_module) {
Handle<WasmModuleObject> module_object =
- isolate()->wasm_engine()->ImportNativeModule(isolate(), shared_module);
+ isolate()->wasm_engine()->ImportNativeModule(isolate(), shared_module,
+ {});
ErrorThrower thrower(isolate(), "ImportInstance");
MaybeHandle<WasmInstanceObject> instance =
isolate()->wasm_engine()->SyncInstantiate(isolate(), &thrower,
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 864b8885a2..7ef79a6350 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -104,7 +104,7 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
} // namespace
// Call from JS to wasm to JS and throw an Error from JS.
-WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
+WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
TestSignatures sigs;
HandleScope scope(CcTest::InitIsolateOnce());
const char* source =
@@ -153,7 +153,7 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
}
// Trigger a trap in wasm, stack should contain a source url.
-WASM_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
+WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
// Create a WasmRunner with stack checks and traps enabled.
WasmRunner<int> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
@@ -210,7 +210,7 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
}
// Trigger a trap in wasm, stack should be JS -> wasm -> wasm.
-WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
+WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
for (int pos_shift = 0; pos_shift < 3; ++pos_shift) {
// Test a position with 1, 2 or 3 bytes needed to represent it.
int unreachable_pos = 1 << (8 * pos_shift);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 21808b0557..2ffd72aaaf 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -66,7 +66,7 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
} // namespace
// Trigger a trap for executing unreachable.
-WASM_EXEC_TEST(Unreachable) {
+WASM_COMPILED_EXEC_TEST(Unreachable) {
// Create a WasmRunner with stack checks and traps enabled.
WasmRunner<void> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
TestSignatures sigs;
@@ -100,7 +100,7 @@ WASM_EXEC_TEST(Unreachable) {
}
// Trigger a trap for loading from out-of-bounds.
-WASM_EXEC_TEST(IllegalLoad) {
+WASM_COMPILED_EXEC_TEST(IllegalLoad) {
WasmRunner<void> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
TestSignatures sigs;
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 96980e6df7..15903bcb3a 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -18,35 +18,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-template <>
-void AppendSingle(std::vector<byte>* code, WasmOpcode op) {
- // We do not yet have opcodes that take up more than 2 byte (decoded). But if
- // that changes, this will need to be updated.
- DCHECK_EQ(0, op >> 16);
- byte prefix = (op >> 8) & 0xff;
- byte opcode = op & 0xff;
-
- if (!prefix) {
- code->push_back(opcode);
- return;
- }
-
- // Ensure the prefix is really one of the supported prefixed opcodes.
- DCHECK(WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(prefix)));
- code->push_back(prefix);
-
- // Decoded opcodes fit in a byte (0x00-0xff).
- DCHECK_LE(LEBHelper::sizeof_u32v(opcode), 2);
- // Therefore, the encoding needs max 2 bytes.
- uint8_t encoded[2];
- uint8_t* d = encoded;
- // d is updated to after the last uint8_t written.
- LEBHelper::write_u32v(&d, opcode);
- for (uint8_t* p = encoded; p < d; p++) {
- code->push_back(*p);
- }
-}
-
TestingModuleBuilder::TestingModuleBuilder(
Zone* zone, ManuallyImportedJSFunction* maybe_import, ExecutionTier tier,
RuntimeExceptionSupport exception_support, LowerSimd lower_simd)
@@ -164,22 +135,11 @@ uint32_t TestingModuleBuilder::AddFunction(const FunctionSig* sig,
test_module_->num_declared_functions);
if (name) {
Vector<const byte> name_vec = Vector<const byte>::cast(CStrVector(name));
- test_module_->function_names.AddForTesting(
+ test_module_->lazily_generated_names.AddForTesting(
index, {AddBytes(name_vec), static_cast<uint32_t>(name_vec.length())});
}
if (interpreter_) {
interpreter_->AddFunctionForTesting(&test_module_->functions.back());
- // Patch the jump table to call the interpreter for this function.
- wasm::WasmCompilationResult result = compiler::CompileWasmInterpreterEntry(
- isolate_->wasm_engine(), native_module_->enabled_features(), index,
- sig);
- std::unique_ptr<wasm::WasmCode> code = native_module_->AddCode(
- index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), wasm::WasmCode::kInterpreterEntry,
- wasm::ExecutionTier::kInterpreter);
- native_module_->PublishCode(std::move(code));
}
DCHECK_LT(index, kMaxFunctions); // limited for testing.
return index;
@@ -195,6 +155,7 @@ void TestingModuleBuilder::FreezeSignatureMapAndInitializeWrapperCache() {
}
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
+ CHECK(!interpreter_);
FreezeSignatureMapAndInitializeWrapperCache();
SetExecutable();
return WasmInstanceObject::GetOrCreateWasmExternalFunction(
@@ -338,18 +299,14 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment(
return index;
}
-CompilationEnv TestingModuleBuilder::CreateCompilationEnv(
- AssumeDebugging debug) {
+CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
// This is a hack so we don't need to call
// trap_handler::IsTrapHandlerEnabled().
const bool is_trap_handler_enabled =
V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler;
return {test_module_ptr_,
is_trap_handler_enabled ? kUseTrapHandler : kNoTrapHandler,
- runtime_exception_support_,
- enabled_features_,
- lower_simd(),
- debug};
+ runtime_exception_support_, enabled_features_, lower_simd()};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
@@ -364,10 +321,6 @@ const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
}
Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
- Handle<Script> script =
- isolate_->factory()->NewScript(isolate_->factory()->empty_string());
- script->set_type(Script::TYPE_WASM);
-
const bool kUsesLiftoff = true;
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(test_module_.get(),
@@ -375,6 +328,8 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
auto native_module = isolate_->wasm_engine()->NewNativeModule(
isolate_, enabled_features_, test_module_, code_size_estimate);
native_module->SetWireBytes(OwnedVector<const uint8_t>());
+ Handle<Script> script =
+ isolate_->wasm_engine()->GetOrCreateScript(isolate_, native_module);
Handle<WasmModuleObject> module_object =
WasmModuleObject::New(isolate_, std::move(native_module), script);
@@ -580,13 +535,17 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
func_wire_bytes.begin(), func_wire_bytes.end()};
NativeModule* native_module =
builder_->instance_object()->module_object().native_module();
- WasmCompilationUnit unit(function_->func_index, builder_->execution_tier());
+ ForDebugging for_debugging =
+ native_module->IsTieredDown() ? kForDebugging : kNoDebugging;
+ WasmCompilationUnit unit(function_->func_index, builder_->execution_tier(),
+ for_debugging);
WasmFeatures unused_detected_features;
WasmCompilationResult result = unit.ExecuteCompilation(
isolate()->wasm_engine(), &env,
native_module->compilation_state()->GetWireBytesStorage(),
isolate()->counters(), &unused_detected_features);
- WasmCode* code = native_module->AddCompiledCode(std::move(result));
+ WasmCode* code = native_module->PublishCode(
+ native_module->AddCompiledCode(std::move(result)));
DCHECK_NOT_NULL(code);
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index d1bc9293b6..5d008d882a 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -75,42 +75,6 @@ using compiler::Node;
r.Build(code, code + arraysize(code)); \
} while (false)
-template <typename T>
-void AppendSingle(std::vector<byte>* code, T t) {
- static_assert(std::is_integral<T>::value,
- "Special types need specializations");
- code->push_back(t);
-}
-
-// Specialized for WasmOpcode.
-template <>
-void AppendSingle<WasmOpcode>(std::vector<byte>* code, WasmOpcode op);
-
-template <typename... T>
-void Append(std::vector<byte>* code, T... ts) {
- static_assert(sizeof...(ts) == 0, "Base case for appending bytes to code.");
-}
-
-template <typename First, typename... Rest>
-void Append(std::vector<byte>* code, First first, Rest... rest) {
- AppendSingle(code, first);
- Append(code, rest...);
-}
-
-// Like BUILD but pushes code bytes into a std::vector instead of an array
-// initializer. This is useful for opcodes (like SIMD), that are LEB128
-// (variable-sized). We use recursive template instantiations with variadic
-// template arguments, so that the Append calls can handle either bytes or
-// opcodes. AppendSingle is specialized for WasmOpcode, and appends multiple
-// bytes. This allows existing callers to swap out the BUILD macro for BUILD_V
-// macro without changes. Also see https://crbug.com/v8/10258.
-#define BUILD_V(r, ...) \
- do { \
- std::vector<byte> code; \
- Append(&code, __VA_ARGS__); \
- r.Build(code.data(), code.data() + code.size()); \
- } while (false)
-
// For tests that must manually import a JSFunction with source code.
struct ManuallyImportedJSFunction {
const FunctionSig* sig;
@@ -146,12 +110,10 @@ class TestingModuleBuilder {
}
byte AddSignature(const FunctionSig* sig) {
- DCHECK_EQ(test_module_->signatures.size(),
- test_module_->signature_ids.size());
- test_module_->signatures.push_back(sig);
- auto canonical_sig_num = test_module_->signature_map.FindOrInsert(*sig);
- test_module_->signature_ids.push_back(canonical_sig_num);
- size_t size = test_module_->signatures.size();
+ // TODO(7748): This will need updating for struct/array types support.
+ DCHECK_EQ(test_module_->types.size(), test_module_->signature_ids.size());
+ test_module_->add_signature(sig);
+ size_t size = test_module_->types.size();
CHECK_GT(127, size);
return static_cast<byte>(size - 1);
}
@@ -258,10 +220,13 @@ class TestingModuleBuilder {
void SetExecutable() { native_module_->SetExecutable(true); }
- void TierDown() { native_module_->TierDown(isolate_); }
+ void TierDown() {
+ native_module_->SetTieringState(kTieredDown);
+ native_module_->TriggerRecompilation();
+ execution_tier_ = ExecutionTier::kLiftoff;
+ }
- enum AssumeDebugging : bool { kDebug = true, kNoDebug = false };
- CompilationEnv CreateCompilationEnv(AssumeDebugging = kNoDebug);
+ CompilationEnv CreateCompilationEnv();
ExecutionTier execution_tier() const { return execution_tier_; }
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 1da88a406a..e8c819eac4 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -89,10 +89,15 @@
#define WASM_BLOCK_F(...) kExprBlock, kLocalF32, __VA_ARGS__, kExprEnd
#define WASM_BLOCK_D(...) kExprBlock, kLocalF64, __VA_ARGS__, kExprEnd
+#define TYPE_IMM(t) \
+ static_cast<byte>((t).value_type_code()), U32V_1((t).ref_index())
+
#define WASM_BLOCK_T(t, ...) \
kExprBlock, static_cast<byte>((t).value_type_code()), __VA_ARGS__, kExprEnd
-#define WASM_BLOCK_X(index, ...) \
+#define WASM_BLOCK_R(t, ...) kExprBlock, TYPE_IMM(t), __VA_ARGS__, kExprEnd
+
+#define WASM_BLOCK_X(index, ...) \
kExprBlock, static_cast<byte>(index), __VA_ARGS__, kExprEnd
#define WASM_INFINITE_LOOP kExprLoop, kLocalVoid, kExprBr, DEPTH_0, kExprEnd
@@ -106,7 +111,9 @@
#define WASM_LOOP_T(t, ...) \
kExprLoop, static_cast<byte>((t).value_type_code()), __VA_ARGS__, kExprEnd
-#define WASM_LOOP_X(index, ...) \
+#define WASM_LOOP_R(t, ...) kExprLoop, TYPE_IMM(t), __VA_ARGS__, kExprEnd
+
+#define WASM_LOOP_X(index, ...) \
kExprLoop, static_cast<byte>(index), __VA_ARGS__, kExprEnd
#define WASM_IF(cond, ...) cond, kExprIf, kLocalVoid, __VA_ARGS__, kExprEnd
@@ -114,6 +121,9 @@
#define WASM_IF_T(t, cond, ...) \
cond, kExprIf, static_cast<byte>((t).value_type_code()), __VA_ARGS__, kExprEnd
+#define WASM_IF_R(t, cond, ...) \
+ cond, kExprIf, TYPE_IMM(t), __VA_ARGS__, kExprEnd
+
#define WASM_IF_X(index, cond, ...) \
cond, kExprIf, static_cast<byte>(index), __VA_ARGS__, kExprEnd
@@ -133,12 +143,17 @@
cond, kExprIf, static_cast<byte>((t).value_type_code()), tstmt, kExprElse, \
fstmt, kExprEnd
+#define WASM_IF_ELSE_R(t, cond, tstmt, fstmt) \
+ cond, kExprIf, TYPE_IMM(t), tstmt, kExprElse, fstmt, kExprEnd
+
#define WASM_IF_ELSE_X(index, cond, tstmt, fstmt) \
cond, kExprIf, static_cast<byte>(index), tstmt, kExprElse, fstmt, kExprEnd
#define WASM_TRY_CATCH_T(t, trystmt, catchstmt) \
kExprTry, static_cast<byte>((t).value_type_code()), trystmt, kExprCatch, \
catchstmt, kExprEnd
+#define WASM_TRY_CATCH_R(t, trystmt, catchstmt) \
+ kExprTry, TYPE_IMM(t), trystmt, kExprCatch, catchstmt, kExprEnd
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
#define WASM_SELECT_I(tval, fval, cond) \
@@ -358,10 +373,6 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 48), \
static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 56)
-#define WASM_REF_NULL kExprRefNull
-#define WASM_REF_FUNC(val) kExprRefFunc, val
-#define WASM_REF_IS_NULL(val) val, kExprRefIsNull
-
#define WASM_GET_LOCAL(index) kExprLocalGet, static_cast<byte>(index)
#define WASM_SET_LOCAL(index, val) val, kExprLocalSet, static_cast<byte>(index)
#define WASM_TEE_LOCAL(index, val) val, kExprLocalTee, static_cast<byte>(index)
@@ -409,6 +420,35 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define TABLE_ZERO 0
+//------------------------------------------------------------------------------
+// Heap-allocated object operations.
+//------------------------------------------------------------------------------
+#define WASM_GC_OP(op) kGCPrefix, static_cast<byte>(op)
+#define WASM_STRUCT_NEW(index, ...) \
+ __VA_ARGS__, WASM_GC_OP(kExprStructNew), static_cast<byte>(index)
+#define WASM_STRUCT_GET(typeidx, fieldidx, struct_obj) \
+ struct_obj, WASM_GC_OP(kExprStructGet), static_cast<byte>(typeidx), \
+ static_cast<byte>(fieldidx)
+#define WASM_STRUCT_SET(typeidx, fieldidx, struct_obj, value) \
+ struct_obj, value, WASM_GC_OP(kExprStructSet), static_cast<byte>(typeidx), \
+ static_cast<byte>(fieldidx)
+#define WASM_REF_NULL kExprRefNull
+#define WASM_REF_FUNC(val) kExprRefFunc, val
+#define WASM_REF_IS_NULL(val) val, kExprRefIsNull
+#define WASM_REF_AS_NON_NULL(val) val, kExprRefAsNonNull
+#define WASM_REF_EQ(lhs, rhs) lhs, rhs, kExprRefEq
+
+#define WASM_ARRAY_NEW(index, default_value, length) \
+ default_value, length, WASM_GC_OP(kExprArrayNew), static_cast<byte>(index)
+#define WASM_ARRAY_GET(typeidx, array, index) \
+ array, index, WASM_GC_OP(kExprArrayGet), static_cast<byte>(typeidx)
+#define WASM_ARRAY_SET(typeidx, array, index, value) \
+ array, index, value, WASM_GC_OP(kExprArraySet), static_cast<byte>(typeidx)
+#define WASM_ARRAY_LEN(typeidx, array) \
+ array, WASM_GC_OP(kExprArrayLen), static_cast<byte>(typeidx)
+
+#define WASM_BR_ON_NULL(depth, ref_object) \
+ ref_object, kExprBrOnNull, static_cast<byte>(depth)
// Pass: sig_index, ...args, func_index
#define WASM_CALL_INDIRECT(sig_index, ...) \
__VA_ARGS__, kExprCallIndirect, static_cast<byte>(sig_index), TABLE_ZERO
@@ -686,7 +726,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_ATOMICS_FENCE WASM_ATOMICS_OP(kExprAtomicFence), ZERO_OFFSET
//------------------------------------------------------------------------------
-// Sign Externsion Operations.
+// Sign Extension Operations.
//------------------------------------------------------------------------------
#define WASM_I32_SIGN_EXT_I8(x) x, kExprI32SExtendI8
#define WASM_I32_SIGN_EXT_I16(x) x, kExprI32SExtendI16
@@ -695,18 +735,100 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_I64_SIGN_EXT_I32(x) x, kExprI64SExtendI32
//------------------------------------------------------------------------------
+// SIMD Operations.
+//------------------------------------------------------------------------------
+#define TO_BYTE(val) static_cast<byte>(val)
+// Encode all simd ops as a 2-byte LEB.
+#define WASM_SIMD_OP(op) kSimdPrefix, U32V_2(op & 0xff)
+#define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
+#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
+#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
+#define WASM_SIMD_SHIFT_OP(op, x, y) x, y, WASM_SIMD_OP(op)
+#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
+ x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
+#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
+
+#define WASM_SIMD_F64x2_SPLAT(x) WASM_SIMD_SPLAT(F64x2, x)
+#define WASM_SIMD_F64x2_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprF64x2ExtractLane), TO_BYTE(lane)
+#define WASM_SIMD_F64x2_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprF64x2ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_F32x4_SPLAT(x) WASM_SIMD_SPLAT(F32x4, x)
+#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
+#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprF32x4ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_I64x2_SPLAT(x) WASM_SIMD_SPLAT(I64x2, x)
+#define WASM_SIMD_I64x2_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprI64x2ExtractLane), TO_BYTE(lane)
+#define WASM_SIMD_I64x2_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprI64x2ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_I32x4_SPLAT(x) WASM_SIMD_SPLAT(I32x4, x)
+#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(lane)
+#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprI32x4ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_I16x8_SPLAT(x) WASM_SIMD_SPLAT(I16x8, x)
+#define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprI16x8ExtractLaneS), TO_BYTE(lane)
+#define WASM_SIMD_I16x8_EXTRACT_LANE_U(lane, x) \
+ x, WASM_SIMD_OP(kExprI16x8ExtractLaneU), TO_BYTE(lane)
+#define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_I8x16_SPLAT(x) WASM_SIMD_SPLAT(I8x16, x)
+#define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprI8x16ExtractLaneS), TO_BYTE(lane)
+#define WASM_SIMD_I8x16_EXTRACT_LANE_U(lane, x) \
+ x, WASM_SIMD_OP(kExprI8x16ExtractLaneU), TO_BYTE(lane)
+#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_S8x16_SHUFFLE_OP(opcode, m, x, y) \
+ x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
+ TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
+ TO_BYTE(m[7]), TO_BYTE(m[8]), TO_BYTE(m[9]), TO_BYTE(m[10]), \
+ TO_BYTE(m[11]), TO_BYTE(m[12]), TO_BYTE(m[13]), TO_BYTE(m[14]), \
+ TO_BYTE(m[15])
+
+#define WASM_SIMD_LOAD_MEM(index) \
+ index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_SIMD_LOAD_MEM_OFFSET(offset, index) \
+ index, WASM_SIMD_OP(kExprS128LoadMem), ZERO_ALIGNMENT, offset
+#define WASM_SIMD_STORE_MEM(index, val) \
+ index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_SIMD_STORE_MEM_OFFSET(offset, index, val) \
+ index, val, WASM_SIMD_OP(kExprS128StoreMem), ZERO_ALIGNMENT, offset
+
+#define WASM_SIMD_F64x2_QFMA(a, b, c) a, b, c, WASM_SIMD_OP(kExprF64x2Qfma)
+#define WASM_SIMD_F64x2_QFMS(a, b, c) a, b, c, WASM_SIMD_OP(kExprF64x2Qfms)
+#define WASM_SIMD_F32x4_QFMA(a, b, c) a, b, c, WASM_SIMD_OP(kExprF32x4Qfma)
+#define WASM_SIMD_F32x4_QFMS(a, b, c) a, b, c, WASM_SIMD_OP(kExprF32x4Qfms)
+
+#define WASM_SIMD_LOAD_SPLAT(opcode, index) \
+ index, WASM_SIMD_OP(opcode), ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_SIMD_LOAD_SPLAT_OFFSET(opcode, index, offset) \
+ index, WASM_SIMD_OP(opcode), ZERO_ALIGNMENT, offset
+#define WASM_SIMD_LOAD_EXTEND(opcode, index) \
+ index, WASM_SIMD_OP(opcode), ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_SIMD_LOAD_EXTEND_ALIGNMENT(opcode, index, alignment) \
+ index, WASM_SIMD_OP(opcode), alignment, ZERO_OFFSET
+
+//------------------------------------------------------------------------------
// Compilation Hints.
//------------------------------------------------------------------------------
#define COMPILE_STRATEGY_DEFAULT (0x00)
#define COMPILE_STRATEGY_LAZY (0x01)
#define COMPILE_STRATEGY_EAGER (0x02)
#define BASELINE_TIER_DEFAULT (0x00 << 2)
-#define BASELINE_TIER_INTERPRETER (0x01 << 2)
-#define BASELINE_TIER_BASELINE (0x02 << 2)
-#define BASELINE_TIER_OPTIMIZED (0x03 << 2)
+#define BASELINE_TIER_BASELINE (0x01 << 2)
+#define BASELINE_TIER_OPTIMIZED (0x02 << 2)
#define TOP_TIER_DEFAULT (0x00 << 4)
-#define TOP_TIER_INTERPRETER (0x01 << 4)
-#define TOP_TIER_BASELINE (0x02 << 4)
-#define TOP_TIER_OPTIMIZED (0x03 << 4)
+#define TOP_TIER_BASELINE (0x01 << 4)
+#define TOP_TIER_OPTIMIZED (0x02 << 4)
#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 067188bba0..83e2f3824b 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -106,6 +106,9 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
case ValueType::kFuncRef:
case ValueType::kNullRef:
case ValueType::kExnRef:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kEqRef:
arguments[i] =
WasmValue(Handle<Object>::cast(isolate->factory()->null_value()));
break;
diff --git a/deps/v8/test/debugger/debug/debug-liveedit-patch-positions-replace.js b/deps/v8/test/debugger/debug/debug-liveedit-patch-positions-replace.js
index 21d2d36f6a..edb086c805 100644
--- a/deps/v8/test/debugger/debug/debug-liveedit-patch-positions-replace.js
+++ b/deps/v8/test/debugger/debug/debug-liveedit-patch-positions-replace.js
@@ -65,7 +65,7 @@ function CallM(changer) {
}
// This several iterations should cause call IC for BeingReplaced call. This IC
-// will keep reference to code object of BeingRepalced function. This reference
+// will keep reference to code object of BeingReplaced function. This reference
// should also be patched. Unfortunately, this is a manually checked fact (from
// debugger or debug print) and doesn't work as an automatic test.
CallM(NoOp);
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/promise-any-caught.js b/deps/v8/test/debugger/debug/es6/debug-promises/promise-any-caught.js
new file mode 100644
index 0000000000..a5df680d68
--- /dev/null
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/promise-any-caught.js
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-promise-any
+
+// Test debug events when we only listen to uncaught exceptions and a
+// Promise p3 created by Promise.any has a catch handler, and is rejected
+// because the Promise p2 passed to Promise.any is rejected. We
+// expect no Exception debug event to be triggered, since p3 and by
+// extension p2 have a catch handler.
+
+let Debug = debug.Debug;
+
+let expected_events = 2;
+
+let p1 = Promise.resolve();
+p1.name = "p1";
+
+let p2 = p1.then(function() {
+ throw new Error("caught");
+});
+p2.name = "p2";
+
+let p3 = Promise.any([p2]);
+p3.name = "p3";
+
+p3.catch(function(e) {});
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ assertTrue(event != Debug.DebugEvent.Exception)
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/promise-any-uncaught.js b/deps/v8/test/debugger/debug/es6/debug-promises/promise-any-uncaught.js
new file mode 100644
index 0000000000..a7bb0ad70a
--- /dev/null
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/promise-any-uncaught.js
@@ -0,0 +1,67 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-promise-any
+
+// Test debug events when we only listen to uncaught exceptions and a
+// Promise p3 created by Promise.any has no catch handler, and is rejected
+// because the Promise p2 passed to Promise.any is rejected.
+// We expect one event for p2; the system recognizes the rejection of p3
+// to be redundant and based on the rejection of p2 and does not trigger
+// an additional rejection.
+
+let Debug = debug.Debug;
+
+let expected_events = 1;
+let log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ expected_events--;
+ assertTrue(expected_events >= 0);
+ assertEquals("uncaught", event_data.exception().message);
+ // Assert that the debug event is triggered at the throw site.
+ assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+ assertTrue(event_data.uncaught());
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+let p1 = Promise.resolve();
+p1.name = "p1";
+
+let p2 = p1.then(function() {
+ log.push("throw");
+ throw new Error("uncaught"); // event
+});
+p2.name = "p2";
+
+let p3 = Promise.any([p2]);
+p3.name = "p3";
+
+log.push("end main");
+
+function testDone(iteration) {
+ function checkResult() {
+ try {
+ assertTrue(iteration < 10);
+ if (expected_events === 0) {
+ assertEquals(["end main", "throw"], log);
+ } else {
+ testDone(iteration + 1);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+ }
+
+ %EnqueueMicrotask(checkResult);
+}
+
+testDone(0);
diff --git a/deps/v8/test/debugger/debug/regress/regress-10319.js b/deps/v8/test/debugger/debug/regress/regress-10319.js
new file mode 100644
index 0000000000..7ab901958d
--- /dev/null
+++ b/deps/v8/test/debugger/debug/regress/regress-10319.js
@@ -0,0 +1,46 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var Debug = debug.Debug;
+
+var frame;
+
+Debug.setListener(function (event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ frame = exec_state.frame(0);
+
+ // Try changing the value, which hasn't yet been initialized.
+ assertEquals(3, frame.evaluate("result = 3").value());
+ assertEquals(3, frame.evaluate("result").value());
+ }
+});
+
+function makeCounter() {
+ // If the variable `result` were stack-allocated, it would be 3 at this point
+ // due to the debugging activity during function entry. However, for a
+ // heap-allocated variable, the debugger evaluated `result = 3` in a temporary
+ // scope instead and had no effect on this variable.
+ assertEquals(undefined, result);
+
+ var result = 0;
+
+ // Regardless of how `result` is allocated, it should now be initialized.
+ assertEquals(0, result);
+
+ // Close over `result` to cause it to be heap-allocated.
+ return () => ++result;
+}
+
+// Break on entry to a function which includes heap-allocated variables.
+%ScheduleBreak();
+makeCounter();
+
+// Check the frame state which was collected during the breakpoint.
+assertEquals(1, frame.localCount());
+assertEquals('result', frame.localName(0));
+assertEquals(undefined, frame.localValue(0).value());
+assertEquals(3, frame.scopeCount());
+assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
+assertEquals(debug.ScopeType.Script, frame.scope(1).scopeType());
+assertEquals(debug.ScopeType.Global, frame.scope(2).scopeType());
diff --git a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
index e7ba10349c..686aea16b1 100644
--- a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
+++ b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff.js
@@ -4,7 +4,10 @@
// Flags: --experimental-wasm-anyref
-load("test/mjsunit/wasm/wasm-module-builder.js");
+// Test that tiering up and tiering down works even if functions cannot be
+// compiled with Liftoff.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
// Create a simple Wasm module.
function create_builder(i) {
diff --git a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
index 20dc1e1c5e..d2d1c5480f 100644
--- a/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
+++ b/deps/v8/test/debugger/debug/wasm/debug-enabled-tier-down-wasm.js
@@ -23,7 +23,7 @@ function checkTieredDown(instance) {
}
}
-function checkTieredUp(instance) {
+function waitForTieredUp(instance) {
// Busy waiting until all functions are tiered up.
let num_liftoff_functions = 0;
while (true) {
@@ -37,6 +37,8 @@ function checkTieredUp(instance) {
}
}
+// In the 'isolates' test, this test runs in parallel to itself on two isolates.
+// All checks below should still hold.
const instance = create_builder().instantiate();
const Debug = new DebugWrapper();
Debug.enable();
@@ -44,8 +46,9 @@ checkTieredDown(instance);
const newInstance = create_builder(num_functions*2).instantiate();
checkTieredDown(newInstance);
Debug.disable();
-checkTieredUp(instance);
-checkTieredUp(newInstance);
+// Eventually the instances will be completely tiered up again.
+waitForTieredUp(instance);
+waitForTieredUp(newInstance);
// Async.
async function testTierDownToLiftoffAsync() {
@@ -55,8 +58,8 @@ async function testTierDownToLiftoffAsync() {
const newAsyncInstance = await create_builder(num_functions*3).asyncInstantiate();
checkTieredDown(newAsyncInstance);
Debug.disable();
- checkTieredUp(asyncInstance);
- checkTieredUp(newAsyncInstance);
+ waitForTieredUp(asyncInstance);
+ waitForTieredUp(newAsyncInstance);
}
assertPromiseResult(testTierDownToLiftoffAsync());
diff --git a/deps/v8/test/debugger/debug/wasm/debug-step-into-wasm.js b/deps/v8/test/debugger/debug/wasm/debug-step-into-wasm.js
index 71ae2a61e7..8454c24668 100644
--- a/deps/v8/test/debugger/debug/wasm/debug-step-into-wasm.js
+++ b/deps/v8/test/debugger/debug/wasm/debug-step-into-wasm.js
@@ -85,8 +85,8 @@ Debug.setListener(listener2);
f();
Debug.setListener(null);
-assertEquals(break_count, 3);
-assertEquals(js_break_line, 3);
-assertEquals(wasm_break_count, 4);
-assertEquals(break_count2, 2);
+assertEquals(3, break_count);
+assertEquals(3, js_break_line);
+assertEquals(4, wasm_break_count);
+assertEquals(2, break_count2);
assertNull(exception);
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index a10c503d74..6c2f06538c 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -42,6 +42,10 @@
# Very slow in stress mode.
'regress/regress-2318': [SKIP],
+
+ # Currently fails because breakpoint information is inconsistent after
+ # disabling and re-enabling the debugger (https://crbug.com/v8/10403).
+ 'debug/wasm/debug-step-into-wasm': [SKIP],
}], # variant == stress
##############################################################################
@@ -51,12 +55,6 @@
}], # variant == stress and (arch == arm or arch == arm64) and simulator_run
##############################################################################
-['variant == stress_incremental_marking', {
- # BUG(chromium:772010).
- 'debug/debug-*': [PASS, ['system == windows', SKIP]],
-}], # variant == stress_incremental_marking
-
-##############################################################################
['gc_stress == True', {
# Skip tests not suitable for GC stress.
# Tests taking too long
@@ -88,6 +86,11 @@
# https://crbug.com/v8/8147
'debug/debug-liveedit-*': [SKIP],
'debug/debug-set-variable-value': [SKIP],
+
+ # Rely on (blocking) concurrent compilation.
+ 'debug/regress/regress-opt-after-debug-deopt': [SKIP],
+ 'debug/regress/regress-prepare-break-while-recompile': [SKIP],
+ 'regress/regress-7421': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -149,15 +152,9 @@
'debug/wasm/frame-inspection': [SKIP],
}],
-##############################################################################
-['isolates', {
- # WebAssembly debugging does not work reliably when multiple isolates are
- # involved (https://crbug.com/v8/10359).
- # (this list might need to be extended by more debugging tests as they
- # start flaking)
- 'debug/wasm/debug-enabled-tier-down-wasm': [SKIP],
- 'debug/wasm/debug-enabled-tier-down-wasm-unsupported-liftoff': [SKIP],
- 'regress/regress-crbug-1032042': [SKIP],
-}], # 'isolates'
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
]
diff --git a/deps/v8/test/debugging/debugging.status b/deps/v8/test/debugging/debugging.status
index b5ebc84474..7cbaca0e9b 100644
--- a/deps/v8/test/debugging/debugging.status
+++ b/deps/v8/test/debugging/debugging.status
@@ -2,4 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-[]
+[
+
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
+]
diff --git a/deps/v8/test/debugging/testcfg.py b/deps/v8/test/debugging/testcfg.py
index 6f7fedb25b..1a212de969 100644
--- a/deps/v8/test/debugging/testcfg.py
+++ b/deps/v8/test/debugging/testcfg.py
@@ -79,7 +79,8 @@ class PYTestLoader(testsuite.GenericTestLoader):
@property
def excluded_files(self):
- return {'gdb_rsp.py', 'testcfg.py', '__init__.py'}
+ return {'gdb_rsp.py', 'testcfg.py', '__init__.py', 'test_basic.py',
+ 'test_float.py', 'test_memory.py', 'test_trap.py'}
@property
def extensions(self):
diff --git a/deps/v8/test/debugging/wasm/gdb-server/OWNERS b/deps/v8/test/debugging/wasm/gdb-server/OWNERS
new file mode 100644
index 0000000000..4b8c1919e8
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/OWNERS
@@ -0,0 +1,3 @@
+paolosev@microsoft.com
+
+# COMPONENT: Blink>JavaScript>WebAssembly \ No newline at end of file
diff --git a/deps/v8/test/debugging/wasm/gdb-server/breakpoints.py b/deps/v8/test/debugging/wasm/gdb-server/breakpoints.py
new file mode 100644
index 0000000000..98fb192075
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/breakpoints.py
@@ -0,0 +1,58 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_basic.js
+
+import sys
+import unittest
+
+import gdb_rsp
+import test_files.test_basic as test_basic
+
+class Tests(unittest.TestCase):
+ def test_initial_breakpoint(self):
+ # Testing that the debuggee suspends when the debugger attaches.
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ reply = connection.RspRequest('?')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+ def test_setting_removing_breakpoint(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ func_addr = module_load_addr + test_basic.BREAK_ADDRESS_1
+
+ # Set a breakpoint.
+ reply = connection.RspRequest('Z0,%x,1' % func_addr)
+ self.assertEqual(reply, 'OK')
+
+ # When we run the program, we should hit the breakpoint.
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+ gdb_rsp.CheckInstructionPtr(connection, func_addr)
+
+ # Check that we can remove the breakpoint.
+ reply = connection.RspRequest('z0,%x,0' % func_addr)
+ self.assertEqual(reply, 'OK')
+ # Requesting removing a breakpoint on an address that does not
+ # have one should return an error.
+ reply = connection.RspRequest('z0,%x,0' % func_addr)
+ self.assertEqual(reply, 'E03')
+
+ def test_setting_breakpoint_on_invalid_address(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ # Requesting a breakpoint on an invalid address should give an error.
+ reply = connection.RspRequest('Z0,%x,1' % (1 << 32))
+ self.assertEqual(reply, 'E03')
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/connect.py b/deps/v8/test/debugging/wasm/gdb-server/connect.py
index f2bac29425..9fd9628b5a 100644
--- a/deps/v8/test/debugging/wasm/gdb-server/connect.py
+++ b/deps/v8/test/debugging/wasm/gdb-server/connect.py
@@ -1,24 +1,27 @@
-# Copyright 2019 the V8 project authors. All rights reserved.
+# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# Flags: -expose-wasm --wasm_gdb_remote --wasm-pause-waiting-for-debugger --wasm-interpret-all test/debugging/wasm/gdb-server/test_files/test.js
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_basic.js
+from ctypes import *
import os
import subprocess
import unittest
import sys
+
import gdb_rsp
+import test_files.test_basic as test_basic
# These are set up by Main().
COMMAND = None
-
class Tests(unittest.TestCase):
+
def test_disconnect(self):
process = gdb_rsp.PopenDebugStub(COMMAND)
try:
- # Connect and record the instruction pointer.
+ # Connect.
connection = gdb_rsp.GdbRspConnection()
connection.Close()
# Reconnect 3 times.
@@ -28,6 +31,29 @@ class Tests(unittest.TestCase):
finally:
gdb_rsp.KillProcess(process)
+ def test_kill(self):
+ process = gdb_rsp.PopenDebugStub(COMMAND)
+ try:
+ connection = gdb_rsp.GdbRspConnection()
+ # Request killing the target.
+ reply = connection.RspRequest('k')
+ self.assertEqual(reply, 'OK')
+ signal = c_byte(process.wait()).value
+ self.assertEqual(signal, gdb_rsp.RETURNCODE_KILL)
+ finally:
+ gdb_rsp.KillProcess(process)
+
+ def test_detach(self):
+ process = gdb_rsp.PopenDebugStub(COMMAND)
+ try:
+ connection = gdb_rsp.GdbRspConnection()
+ # Request detaching from the target.
+ # This resumes execution, so we get the normal exit() status.
+ reply = connection.RspRequest('D')
+ self.assertEqual(reply, 'OK')
+ finally:
+ gdb_rsp.KillProcess(process)
+
def Main():
index = sys.argv.index('--')
diff --git a/deps/v8/test/debugging/wasm/gdb-server/float.py b/deps/v8/test/debugging/wasm/gdb-server/float.py
new file mode 100644
index 0000000000..fd732e32a5
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/float.py
@@ -0,0 +1,71 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_float.js
+
+import os
+import re
+import struct
+import subprocess
+import sys
+import unittest
+
+import gdb_rsp
+import test_files.test_float as test_float
+
+# These are set up by Main().
+COMMAND = None
+
+class Tests(unittest.TestCase):
+
+ def RunToWasm(self, connection):
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_float.FUNC_START_ADDR
+
+ # Set a breakpoint.
+ reply = connection.RspRequest('Z0,%x,1' % breakpoint_addr)
+ self.assertEqual(reply, 'OK')
+
+ # When we run the program, we should hit the breakpoint.
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+ # Remove the breakpoint.
+ reply = connection.RspRequest('z0,%x,1' % breakpoint_addr)
+ self.assertEqual(reply, 'OK')
+
+ def test_loaded_modules(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ modules = gdb_rsp.GetLoadedModules(connection)
+ connection.Close()
+ assert(len(modules) > 0)
+
+ def test_wasm_local_float(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ self.RunToWasm(connection)
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+
+ reply = connection.RspRequest('qWasmLocal:0;0')
+ value = struct.unpack('f', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(test_float.ARG_0, value)
+
+ reply = connection.RspRequest('qWasmLocal:0;1')
+ value = struct.unpack('f', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(test_float.ARG_1, value)
+
+ # invalid local
+ reply = connection.RspRequest('qWasmLocal:0;9')
+ self.assertEqual("E03", reply)
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py b/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py
index 131725f83d..f1981ed9a8 100644
--- a/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py
+++ b/deps/v8/test/debugging/wasm/gdb-server/gdb_rsp.py
@@ -1,13 +1,25 @@
-# Copyright 2019 the V8 project authors. All rights reserved.
+# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import re
import socket
+import struct
import subprocess
import time
+import xml.etree.ElementTree
SOCKET_ADDR = ('localhost', 8765)
+SIGTRAP = 5
+SIGSEGV = 11
+RETURNCODE_KILL = -9
+
+ARCH = 'wasm32'
+REG_DEFS = {
+ ARCH: [('pc', 'Q'), ],
+}
+
def EnsurePortIsAvailable(addr=SOCKET_ADDR):
# As a sanity check, check that the TCP port is available by binding to it
@@ -21,6 +33,11 @@ def EnsurePortIsAvailable(addr=SOCKET_ADDR):
sock.bind(addr)
sock.close()
+def RspChecksum(data):
+ checksum = 0
+ for char in data:
+ checksum = (checksum + ord(char)) & 0xff
+ return checksum
class GdbRspConnection(object):
@@ -48,6 +65,40 @@ class GdbRspConnection(object):
raise Exception('Could not connect to the debug stub in %i seconds'
% timeout_in_seconds)
+ def _GetReply(self):
+ reply = ''
+ message_finished = re.compile('#[0-9a-fA-F]{2}')
+ while True:
+ data = self._socket.recv(1024)
+ if len(data) == 0:
+ raise AssertionError('EOF on socket reached with '
+ 'incomplete reply message: %r' % reply)
+ reply += data
+ if message_finished.match(reply[-3:]):
+ break
+ match = re.match('\+?\$([^#]*)#([0-9a-fA-F]{2})$', reply)
+ if match is None:
+ raise AssertionError('Unexpected reply message: %r' % reply)
+ reply_body = match.group(1)
+ checksum = match.group(2)
+ expected_checksum = '%02x' % RspChecksum(reply_body)
+ if checksum != expected_checksum:
+ raise AssertionError('Bad RSP checksum: %r != %r' %
+ (checksum, expected_checksum))
+ # Send acknowledgement.
+ self._socket.send('+')
+ return reply_body
+
+ # Send an rsp message, but don't wait for or expect a reply.
+ def RspSendOnly(self, data):
+ msg = '$%s#%02x' % (data, RspChecksum(data))
+ return self._socket.send(msg)
+
+ def RspRequest(self, data):
+ self.RspSendOnly(data)
+ reply = self._GetReply()
+ return reply
+
def Close(self):
self._socket.close()
@@ -71,3 +122,87 @@ def KillProcess(process):
else:
raise
process.wait()
+
+
+class LaunchDebugStub(object):
+ def __init__(self, command):
+ self._proc = PopenDebugStub(command)
+
+ def __enter__(self):
+ try:
+ return GdbRspConnection()
+ except:
+ KillProcess(self._proc)
+ raise
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ KillProcess(self._proc)
+
+
+def AssertEquals(x, y):
+ if x != y:
+ raise AssertionError('%r != %r' % (x, y))
+
+def DecodeHex(data):
+ assert len(data) % 2 == 0, data
+ return bytes(bytearray([int(data[index * 2 : (index + 1) * 2], 16) for index in xrange(len(data) // 2)]))
+
+def EncodeHex(data):
+ return ''.join('%02x' % ord(byte) for byte in data)
+
+def DecodeUInt64Array(data):
+ assert len(data) % 16 == 0, data
+ result = []
+ for index in xrange(len(data) // 16):
+ value = 0
+ for digit in xrange(7, -1, -1):
+ value = value * 256 + int(data[index * 16 + digit * 2 : index * 16 + (digit + 1) * 2], 16)
+ result.append(value)
+ return result
+
+def AssertReplySignal(reply, signal):
+ AssertEquals(ParseThreadStopReply(reply)['signal'], signal)
+
+def ParseThreadStopReply(reply):
+ match = re.match('T([0-9a-f]{2})thread-pcs:([0-9a-f]+);thread:([0-9a-f]+);$', reply)
+ if not match:
+ raise AssertionError('Bad thread stop reply: %r' % reply)
+ return {'signal': int(match.group(1), 16),
+ 'thread_pc': int(match.group(2), 16),
+ 'thread_id': int(match.group(3), 16)}
+
+def CheckInstructionPtr(connection, expected_ip):
+ ip_value = DecodeRegs(connection.RspRequest('g'))['pc']
+ AssertEquals(ip_value, expected_ip)
+
+def DecodeRegs(reply):
+ defs = REG_DEFS[ARCH]
+ names = [reg_name for reg_name, reg_fmt in defs]
+ fmt = ''.join([reg_fmt for reg_name, reg_fmt in defs])
+
+ values = struct.unpack_from(fmt, DecodeHex(reply))
+ return dict(zip(names, values))
+
+def GetLoadedModules(connection):
+ modules = {}
+ reply = connection.RspRequest('qXfer:libraries:read')
+ AssertEquals(reply[0], 'l')
+ library_list = xml.etree.ElementTree.fromstring(reply[1:])
+ AssertEquals(library_list.tag, 'library-list')
+ for library in library_list:
+ AssertEquals(library.tag, 'library')
+ section = library.find('section')
+ address = section.get('address')
+ assert long(address) > 0
+ modules[long(address)] = library.get('name')
+ return modules
+
+def GetLoadedModuleAddress(connection):
+ modules = GetLoadedModules(connection)
+ assert len(modules) > 0
+ return modules.keys()[0]
+
+def ReadCodeMemory(connection, address, size):
+ reply = connection.RspRequest('m%x,%x' % (address, size))
+ assert not reply.startswith('E'), reply
+ return DecodeHex(reply)
diff --git a/deps/v8/test/debugging/wasm/gdb-server/memory.py b/deps/v8/test/debugging/wasm/gdb-server/memory.py
new file mode 100644
index 0000000000..db102a7107
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/memory.py
@@ -0,0 +1,96 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_memory.js
+
+import struct
+import sys
+import unittest
+
+import gdb_rsp
+import test_files.test_memory as test_memory
+
+# These are set up by Main().
+COMMAND = None
+
+class Tests(unittest.TestCase):
+ # Test that reading from an unreadable address gives a sensible error.
+ def CheckReadMemoryAtInvalidAddr(self, connection):
+ mem_addr = 0xffffffff
+ result = connection.RspRequest('m%x,%x' % (mem_addr, 1))
+ self.assertEquals(result, 'E02')
+
+ def RunToWasm(self, connection, breakpoint_addr):
+ # Set a breakpoint.
+ reply = connection.RspRequest('Z0,%x,1' % breakpoint_addr)
+ self.assertEqual(reply, 'OK')
+
+ # When we run the program, we should hit the breakpoint.
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+ # Remove the breakpoint.
+ reply = connection.RspRequest('z0,%x,1' % breakpoint_addr)
+ self.assertEqual(reply, 'OK')
+
+ def test_reading_and_writing_memory(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_memory.FUNC0_START_ADDR
+ self.RunToWasm(connection, breakpoint_addr)
+
+ self.CheckReadMemoryAtInvalidAddr(connection)
+
+ # Check reading code memory space.
+ expected_data = b'\0asm'
+ result = gdb_rsp.ReadCodeMemory(connection, module_load_addr, len(expected_data))
+ self.assertEqual(result, expected_data)
+
+ # Check reading instance memory at a valid range.
+ reply = connection.RspRequest('qWasmMem:0;%x;%x' % (32, 4))
+ value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEquals(int(value), 0)
+
+ # Check reading instance memory at an invalid range.
+ reply = connection.RspRequest('qWasmMem:0;%x;%x' % (0xf0000000, 4))
+ self.assertEqual(reply, 'E03')
+
+ def test_wasm_global(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_memory.FUNC0_START_ADDR
+ self.RunToWasm(connection, breakpoint_addr)
+
+ # Check reading valid global.
+ reply = connection.RspRequest('qWasmGlobal:0;0')
+ value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(0, value)
+
+ # Check reading invalid global.
+ reply = connection.RspRequest('qWasmGlobal:0;9')
+ self.assertEqual("E03", reply)
+
+ def test_wasm_call_stack(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_memory.FUNC0_START_ADDR
+ self.RunToWasm(connection, breakpoint_addr)
+
+ reply = connection.RspRequest('qWasmCallStack')
+ stack = gdb_rsp.DecodeUInt64Array(reply)
+ assert(len(stack) > 2) # At least two Wasm frames, plus one or more JS frames.
+ self.assertEqual(stack[0], module_load_addr + test_memory.FUNC0_START_ADDR)
+ self.assertEqual(stack[1], module_load_addr + test_memory.FUNC1_RETURN_ADDR)
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/status.py b/deps/v8/test/debugging/wasm/gdb-server/status.py
new file mode 100644
index 0000000000..0b333dce05
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/status.py
@@ -0,0 +1,109 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_basic.js
+
+import re
+import struct
+import sys
+import unittest
+
+import gdb_rsp
+import test_files.test_basic as test_basic
+
+# These are set up by Main().
+COMMAND = None
+
+class Tests(unittest.TestCase):
+
+ def test_loaded_modules(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ modules = gdb_rsp.GetLoadedModules(connection)
+ connection.Close()
+ assert(len(modules) > 0)
+
+ def test_checking_thread_state(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ # Query wasm thread id
+ reply = connection.RspRequest('qfThreadInfo')
+ match = re.match('m([0-9])$', reply)
+ if match is None:
+ raise AssertionError('Bad active thread list reply: %r' % reply)
+ thread_id = int(match.group(1), 10)
+ # There should not be other threads.
+ reply = connection.RspRequest('qsThreadInfo')
+ self.assertEqual("l", reply)
+ # Test that valid thread should be alive.
+ reply = connection.RspRequest('T%d' % (thread_id))
+ self.assertEqual("OK", reply)
+ # Test invalid thread id.
+ reply = connection.RspRequest('T42')
+ self.assertEqual("E02", reply)
+
+ def test_wasm_local(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_basic.BREAK_ADDRESS_2
+
+ reply = connection.RspRequest('Z0,%x,1' % breakpoint_addr)
+ self.assertEqual("OK", reply)
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+ reply = connection.RspRequest('qWasmLocal:0;0')
+ value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(test_basic.ARG_0, value)
+
+ reply = connection.RspRequest('qWasmLocal:0;1')
+ value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(test_basic.ARG_1, value)
+
+ # invalid local
+ reply = connection.RspRequest('qWasmLocal:0;9')
+ self.assertEqual("E03", reply)
+
+ def test_wasm_stack_value(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_basic.BREAK_ADDRESS_2
+
+ reply = connection.RspRequest('Z0,%x,1' % breakpoint_addr)
+ self.assertEqual("OK", reply)
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+ reply = connection.RspRequest('qWasmStackValue:0;0')
+ value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(test_basic.ARG_0, value)
+
+ reply = connection.RspRequest('qWasmStackValue:0;1')
+ value = struct.unpack('I', gdb_rsp.DecodeHex(reply))[0]
+ self.assertEqual(test_basic.ARG_1, value)
+
+ # invalid index
+ reply = connection.RspRequest('qWasmStackValue:0;2')
+ self.assertEqual("E03", reply)
+
+ def test_modifying_code_is_disallowed(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ # Pick an arbitrary address in the code segment.
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ breakpoint_addr = module_load_addr + test_basic.BREAK_ADDRESS_1
+ # Writing to the code area should be disallowed.
+ data = '\x00'
+ write_command = 'M%x,%x:%s' % (breakpoint_addr, len(data), gdb_rsp.EncodeHex(data))
+ reply = connection.RspRequest(write_command)
+ self.assertEquals(reply, 'E03')
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/stepping.py b/deps/v8/test/debugging/wasm/gdb-server/stepping.py
new file mode 100644
index 0000000000..bc227ef2b0
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/stepping.py
@@ -0,0 +1,56 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_basic.js
+
+import sys
+import unittest
+
+import gdb_rsp
+import test_files.test_basic as test_basic
+
+# These are set up by Main().
+COMMAND = None
+
+class Tests(unittest.TestCase):
+ def test_single_step(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ bp_addr = module_load_addr + test_basic.BREAK_ADDRESS_0
+ reply = connection.RspRequest('Z0,%x,1' % bp_addr)
+ self.assertEqual("OK", reply)
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+ # We expect 's' to stop at the next instruction.
+ reply = connection.RspRequest('s')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+ tid = gdb_rsp.ParseThreadStopReply(reply)['thread_id']
+ self.assertEqual(tid, 1)
+ regs = gdb_rsp.DecodeRegs(connection.RspRequest('g'))
+ self.assertEqual(regs['pc'], module_load_addr + test_basic.BREAK_ADDRESS_1)
+
+ # Again.
+ reply = connection.RspRequest('s')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+ tid = gdb_rsp.ParseThreadStopReply(reply)['thread_id']
+ self.assertEqual(tid, 1)
+ regs = gdb_rsp.DecodeRegs(connection.RspRequest('g'))
+ self.assertEqual(regs['pc'], module_load_addr + test_basic.BREAK_ADDRESS_2)
+
+ # Check that we can continue after single-stepping.
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGTRAP)
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/__init__.py b/deps/v8/test/debugging/wasm/gdb-server/test_files/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/__init__.py
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test.js
deleted file mode 100644
index 0959edca30..0000000000
--- a/deps/v8/test/debugging/wasm/gdb-server/test_files/test.js
+++ /dev/null
@@ -1,33 +0,0 @@
-
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-var builder = new WasmModuleBuilder();
-builder.addFunction('mul', kSig_i_ii)
-// input is 2 args of type int and output is int
-.addBody([
- kExprLocalGet, 0, // local.get i0
- kExprLocalGet, 1, // local.get i1
- kExprI32Mul]) // i32.sub i0 i1
-.exportFunc();
-
-const instance = builder.instantiate();
-const wasm_f = instance.exports.mul;
-
-function f() {
- var result = wasm_f(21, 2);
- return result;
-}
-
-try {
- let val = 0;
- while (true) {
- val += f();
- }
-}
-catch (e) {
- print('*exception:* ' + e);
-}
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.js
new file mode 100644
index 0000000000..0c80446e41
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.js
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder
+ .addFunction('mul', kSig_i_ii)
+ // input is 2 args of type int and output is int
+ .addBody([
+ kExprLocalGet, 0, // local.get i0
+ kExprLocalGet, 1, // local.get i1
+ kExprI32Mul
+ ]) // i32.mul i0 i1
+ .exportFunc();
+
+const instance = builder.instantiate();
+const wasm_f = instance.exports.mul;
+
+function f() {
+ var result = wasm_f(21, 2);
+ return result;
+}
+
+try {
+ let val = f();
+ f();
+} catch (e) {
+ print('*exception:* ' + e);
+}
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.py b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.py
new file mode 100644
index 0000000000..7c3f520421
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_basic.py
@@ -0,0 +1,23 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# 0x00 (module
+# 0x08 [type]
+# (type $type0 (func (param i32 i32) (result i32)))
+# 0x11 [function]
+# 0x15 (export "mul" (func $func0))
+# 0x1e [code]
+# 0x20 (func $func0 (param $var0 i32) (param $var1 i32) (result i32)
+# 0x23 get_local $var0
+# 0x25 get_local $var1
+# 0x27 i32.mul
+# )
+# )
+# 0x29 [name]
+
+BREAK_ADDRESS_0 = 0x0023
+BREAK_ADDRESS_1 = 0x0025
+BREAK_ADDRESS_2 = 0x0027
+ARG_0 = 21
+ARG_1 = 2
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.js
new file mode 100644
index 0000000000..e1f45e5aa6
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.js
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder
+ .addFunction('mul', kSig_f_ff)
+ // input is 2 args of type float and output is float
+ .addBody([
+ kExprLocalGet, 0, // local.get f0
+ kExprLocalGet, 1, // local.get f1
+ kExprF32Mul, // f32.mul i0 i1
+ ])
+ .exportFunc();
+
+const instance = builder.instantiate();
+const wasm_f = instance.exports.mul;
+
+function f() {
+ var result = wasm_f(12.0, 3.5);
+ return result;
+}
+
+try {
+ let val = f();
+ print('float result: ' + val);
+} catch (e) {
+ print('*exception:* ' + e);
+}
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.py b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.py
new file mode 100644
index 0000000000..6cf59eb380
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_float.py
@@ -0,0 +1,21 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# 0x00 (module
+# 0x08 [type]
+# (type $type0 (func (param f32 f32) (result f32)))
+# 0x11 [function]
+# 0x15 (export "mul" (func $func0))
+# 0x1e [code]
+# 0x20 (func $func0 (param $var0 f32) (param $var1 f32) (result f32)
+# 0x23 get_local $var0
+# 0x25 get_local $var1
+# 0x27 f32.mul
+# )
+# )
+# 0x29 [name]
+
+ARG_0 = 12.0
+ARG_1 = 3.5
+FUNC_START_ADDR = 0x23
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
new file mode 100644
index 0000000000..c955f126fa
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+builder.addGlobal(kWasmI32).exportAs('g_n');
+
+builder.addMemory(32, 128).exportMemoryAs('mem')
+
+var func_a_idx =
+ builder.addFunction('wasm_A', kSig_v_i).addBody([kExprNop, kExprNop]).index;
+
+// wasm_B calls wasm_A <param0> times.
+builder.addFunction('wasm_B', kSig_v_i)
+ .addBody([
+ kExprLoop,
+ kWasmStmt, // while
+ kExprLocalGet,
+ 0, // -
+ kExprIf,
+ kWasmStmt, // if <param0> != 0
+ kExprLocalGet,
+ 0, // -
+ kExprI32Const,
+ 1, // -
+ kExprI32Sub, // -
+ kExprLocalSet,
+ 0, // decrease <param0>
+ ...wasmI32Const(1024), // some longer i32 const (2 byte imm)
+ kExprCallFunction,
+ func_a_idx, // -
+ kExprBr,
+ 1, // continue
+ kExprEnd, // -
+ kExprEnd, // break
+ ])
+ .exportAs('main');
+
+const instance = builder.instantiate();
+const wasm_main = instance.exports.main;
+
+function f() {
+ wasm_main(42);
+}
+f();
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.py b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.py
new file mode 100644
index 0000000000..5502bcc6e6
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.py
@@ -0,0 +1,43 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# 0x00 (module
+# 0x08 [type]
+# 0x0a (type $type0 (func (param i32)))
+# 0x0f (type $type1 (func (param i32)))
+# 0x13 [function]
+# 0x18 [memory]
+# (memory (;0;) 32 128)
+# 0x1f [global]
+# 0x27 (global $global0 i32 (i32.const 0))
+# 0x29 (export "g_n" (global $global0))
+# 0x30 (export "mem" (memory 0))
+# 0x36 (export "main" (func $func1))
+# 0x3d [code]
+# 0x3f (func $func0 (param $var0 i32)
+# 0x42 nop
+# 0x43 nop
+# )
+# 0x45 (func $func1 (param $var0 i32)
+# 0x47 loop $label0
+# 0x49 get_local $var0
+# 0x4b if
+# 0x4d get_local $var0
+# 0x4f i32.const 1
+# 0x51 i32.sub
+# 0x52 set_local $var0
+# 0x54 i32.const 1024
+# 0x57 call $func0
+# 0x59 br $label0
+# 0x5b end
+# 0x5c end $label0
+# )
+# )
+# 0x5e [name]
+
+MEM_MIN = 32
+MEM_MAX = 128
+FUNC0_START_ADDR = 0x42
+FUNC1_RETURN_ADDR = 0x59
+FUNC1_START_ADDR = 0x47
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.js
new file mode 100644
index 0000000000..2f4a9fb52c
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.js
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+builder.addMemory(32, 128).exportMemoryAs('mem')
+
+var func_a_idx =
+ builder.addFunction('wasm_A', kSig_v_v).addBody([
+ kExprI32Const, 0, // i32.const 0
+ kExprI32Const, 42, // i32.const 42
+ kExprI32StoreMem, 0, 0xff, 0xff, 0xff, 0xff, 0x0f, // i32.store offset = -1
+ ]).index;
+
+builder.addFunction('main', kSig_i_v).addBody([
+ kExprCallFunction, func_a_idx, // call $wasm_A
+ kExprI32Const, 0 // i32.const 0
+ ])
+ .exportFunc();
+
+const instance = builder.instantiate();
+const main_f = instance.exports.main;
+
+function f() {
+ var result = main_f();
+ return result;
+}
+f();
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.py b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.py
new file mode 100644
index 0000000000..a85fc4b640
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_trap.py
@@ -0,0 +1,28 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# 0x00 (module
+# 0x08 [type]
+# 0x0a (type $type0 (func))
+# 0x0e (type $type1 (func (result i32)))
+# 0x12 [function]
+# 0x17 [memory]
+# 0x19 (memory $memory0 32 128)
+# 0x1e [export]
+# 0x20 (export "mem" (memory $memory0))
+# 0x27 (export "main" (func $func1))
+# 0x2e [code]
+# 0x30 (func $func0
+# 0x33 i32.const 0
+# 0x35 i32.const 42
+# 0x37 i32.store offset=-1 align=1
+# 0x3e )
+# 0x3f (func $func1 (result i32)
+# 0x41 call $func0
+# 0x43 i32.const 0
+# 0x45 )
+# 0x46 ...
+# )
+
+TRAP_ADDRESS = 0x0037
diff --git a/deps/v8/test/debugging/wasm/gdb-server/trap.py b/deps/v8/test/debugging/wasm/gdb-server/trap.py
new file mode 100644
index 0000000000..7a6fea13d2
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/trap.py
@@ -0,0 +1,37 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Flags: -expose-wasm --wasm-gdb-remote --wasm-pause-waiting-for-debugger test/debugging/wasm/gdb-server/test_files/test_trap.js
+
+import sys
+import unittest
+
+import gdb_rsp
+import test_files.test_trap as test_trap
+
+# These are set up by Main().
+COMMAND = None
+
+class Tests(unittest.TestCase):
+ def test_trap(self):
+ with gdb_rsp.LaunchDebugStub(COMMAND) as connection:
+ module_load_addr = gdb_rsp.GetLoadedModuleAddress(connection)
+ reply = connection.RspRequest('c')
+ gdb_rsp.AssertReplySignal(reply, gdb_rsp.SIGSEGV)
+ tid = gdb_rsp.ParseThreadStopReply(reply)['thread_id']
+ self.assertEqual(tid, 1)
+ regs = gdb_rsp.DecodeRegs(connection.RspRequest('g'))
+ self.assertEqual(regs['pc'], module_load_addr + test_trap.TRAP_ADDRESS)
+
+
+def Main():
+ index = sys.argv.index('--')
+ args = sys.argv[index + 1:]
+ # The remaining arguments go to unittest.main().
+ global COMMAND
+ COMMAND = args
+ unittest.main(argv=sys.argv[:index])
+
+if __name__ == '__main__':
+ Main()
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
index 65e51c238b..0a08dfa2e9 100644
--- a/deps/v8/test/fuzzer/fuzzer.status
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -13,4 +13,9 @@
'wasm_compile/*': [SKIP],
}], # lite_mode or variant == jitless
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index c411d933d6..691fcf67e6 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -80,7 +80,11 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::internal::Handle<v8::internal::Script> script =
factory->NewScript(source.ToHandleChecked());
- v8::internal::ParseInfo info(i_isolate, *script);
+ v8::internal::UnoptimizedCompileState state(i_isolate);
+ v8::internal::UnoptimizedCompileFlags flags =
+ v8::internal::UnoptimizedCompileFlags::ForScriptCompile(i_isolate,
+ *script);
+ v8::internal::ParseInfo info(i_isolate, flags, &state);
if (!v8::internal::parsing::ParseProgram(&info, script, i_isolate)) {
i_isolate->OptionalRescheduleException(true);
}
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 1241061a7b..a7ab963b92 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -79,6 +79,17 @@ class DataRange {
DISALLOW_COPY_AND_ASSIGN(DataRange);
};
+template <>
+bool DataRange::get<bool>() {
+ // SPECIALIZATION FOR BOOL
+ // The -O3 on release will break the result. This creates a different
+ // observable side effect when invoking get<bool> between debug and release
+ // version, which eventually makes the code output different as well as
+ // raising various unrecoverable errors on runtime. It is caused by undefined
+ // behavior of assigning boolean via memcpy from randomized bytes.
+ return get<uint8_t>() % 2 == 0;
+}
+
ValueType GetValueType(DataRange* data) {
// TODO(v8:8460): We do not add kWasmS128 here yet because this method is used
// to generate globals, and since we do not have v128.const yet, there is no
@@ -194,6 +205,13 @@ class WasmGenerator {
case kExprI64AtomicXor:
case kExprI64AtomicExchange:
case kExprI64AtomicCompareExchange:
+ case kExprI16x8Load8x8S:
+ case kExprI16x8Load8x8U:
+ case kExprI32x4Load16x4S:
+ case kExprI32x4Load16x4U:
+ case kExprI64x2Load32x2S:
+ case kExprI64x2Load32x2U:
+ case kExprS64x2LoadSplat:
return 3;
case kExprI32LoadMem:
case kExprI64LoadMem32S:
@@ -220,6 +238,7 @@ class WasmGenerator {
case kExprI64AtomicXor32U:
case kExprI64AtomicExchange32U:
case kExprI64AtomicCompareExchange32U:
+ case kExprS32x4LoadSplat:
return 2;
case kExprI32LoadMem16S:
case kExprI32LoadMem16U:
@@ -245,6 +264,7 @@ class WasmGenerator {
case kExprI64AtomicXor16U:
case kExprI64AtomicExchange16U:
case kExprI64AtomicCompareExchange16U:
+ case kExprS16x8LoadSplat:
return 1;
case kExprI32LoadMem8S:
case kExprI32LoadMem8U:
@@ -270,6 +290,7 @@ class WasmGenerator {
case kExprI64AtomicXor8U:
case kExprI64AtomicExchange8U:
case kExprI64AtomicCompareExchange8U:
+ case kExprS8x16LoadSplat:
return 0;
default:
return 0;
@@ -312,6 +333,14 @@ class WasmGenerator {
builder_->EmitWithPrefix(Op);
}
+ void simd_shuffle(DataRange* data) {
+ Generate<ValueType::kS128, ValueType::kS128>(data);
+ builder_->EmitWithPrefix(kExprS8x16Shuffle);
+ for (int i = 0; i < kSimd128Size; i++) {
+ builder_->EmitByte(static_cast<uint8_t>(data->get<byte>() % 32));
+ }
+ }
+
void drop(DataRange* data) {
Generate(GetValueType(data), data);
builder_->Emit(kExprDrop);
@@ -1151,21 +1180,125 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
ValueType::kS128>,
&WasmGenerator::simd_op<kExprI32x4GeU, ValueType::kS128,
ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4Neg, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4Shl, ValueType::kS128, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI32x4ShrS, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI32x4ShrU, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI32x4Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4Sub, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4Mul, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4MinS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4MinU, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4MaxS, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4MaxU, ValueType::kS128,
+ ValueType::kS128>,
&WasmGenerator::simd_op<kExprI64x2Splat, ValueType::kI64>,
+ &WasmGenerator::simd_op<kExprI64x2Neg, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI64x2Shl, ValueType::kS128, ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI64x2ShrS, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI64x2ShrU, ValueType::kS128,
+ ValueType::kI32>,
+ &WasmGenerator::simd_op<kExprI64x2Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI64x2Sub, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI64x2Mul, ValueType::kS128,
+ ValueType::kS128>,
+
&WasmGenerator::simd_op<kExprF32x4Splat, ValueType::kF32>,
+ &WasmGenerator::simd_op<kExprF32x4Eq, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Ne, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Lt, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Gt, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Le, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Ge, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Abs, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Neg, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Sqrt, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Sub, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Mul, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Div, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Min, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4Max, ValueType::kS128,
+ ValueType::kS128>,
+
&WasmGenerator::simd_op<kExprF64x2Splat, ValueType::kF64>,
+ &WasmGenerator::simd_op<kExprF64x2Eq, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Ne, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Lt, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Gt, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Le, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Ge, ValueType::kS128, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Abs, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Neg, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Sqrt, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Add, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Sub, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Mul, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Div, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Min, ValueType::kS128,
+ ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF64x2Max, ValueType::kS128,
+ ValueType::kS128>,
- &WasmGenerator::simd_op<kExprI32x4Add, ValueType::kS128,
+ &WasmGenerator::simd_op<kExprI32x4SConvertF32x4, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4UConvertF32x4, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4SConvertI32x4, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprF32x4UConvertI32x4, ValueType::kS128>,
+
+ &WasmGenerator::simd_op<kExprI8x16SConvertI16x8, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::simd_op<kExprI64x2Add, ValueType::kS128,
+ &WasmGenerator::simd_op<kExprI8x16UConvertI16x8, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::simd_op<kExprF32x4Add, ValueType::kS128,
+ &WasmGenerator::simd_op<kExprI16x8SConvertI32x4, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::simd_op<kExprF64x2Add, ValueType::kS128,
+ &WasmGenerator::simd_op<kExprI16x8UConvertI32x4, ValueType::kS128,
+ ValueType::kS128>,
+
+ &WasmGenerator::simd_op<kExprI16x8SConvertI8x16Low, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8SConvertI8x16High, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8UConvertI8x16Low, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI16x8UConvertI8x16High, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4SConvertI16x8Low, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4SConvertI16x8High, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4UConvertI16x8Low, ValueType::kS128>,
+ &WasmGenerator::simd_op<kExprI32x4UConvertI16x8High, ValueType::kS128>,
+
+ &WasmGenerator::simd_shuffle,
+ &WasmGenerator::simd_op<kExprS8x16Swizzle, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::memop<kExprS128LoadMem>};
+ &WasmGenerator::memop<kExprS128LoadMem>,
+ &WasmGenerator::memop<kExprI16x8Load8x8S>,
+ &WasmGenerator::memop<kExprI16x8Load8x8U>,
+ &WasmGenerator::memop<kExprI32x4Load16x4S>,
+ &WasmGenerator::memop<kExprI32x4Load16x4U>,
+ &WasmGenerator::memop<kExprI64x2Load32x2S>,
+ &WasmGenerator::memop<kExprI64x2Load32x2U>,
+ &WasmGenerator::memop<kExprS8x16LoadSplat>,
+ &WasmGenerator::memop<kExprS16x8LoadSplat>,
+ &WasmGenerator::memop<kExprS32x4LoadSplat>,
+ &WasmGenerator::memop<kExprS64x2LoadSplat>};
GenerateOneOf(alternatives, data);
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 590bc1bc17..1c8bafcea2 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -172,7 +172,14 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
<< glob.mutability << ");\n";
}
- for (const FunctionSig* sig : module->signatures) {
+ // TODO(7748): Support array/struct types.
+#if DEBUG
+ for (uint8_t kind : module->type_kinds) {
+ DCHECK_EQ(kWasmFunctionTypeCode, kind);
+ }
+#endif
+ for (TypeDefinition type : module->types) {
+ const FunctionSig* sig = type.function_sig;
os << "builder.addType(makeSig(" << PrintParameters(sig) << ", "
<< PrintReturns(sig) << "));\n";
}
@@ -191,6 +198,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
for (const WasmElemSegment& elem_segment : module->elem_segments) {
os << "builder.addElementSegment(";
+ os << elem_segment.table_index << ", ";
switch (elem_segment.offset.kind) {
case WasmInitExpr::kGlobalIndex:
os << elem_segment.offset.val.global_index << ", true";
@@ -251,7 +259,6 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
os << "assertThrows(function() { builder.instantiate(); }, "
"WebAssembly.CompileError);\n";
}
- os << "\n";
}
void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
index c5a8d155c4..a71c5d0ec5 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
@@ -5,7 +5,7 @@ Running test: enableDebugger
Running test: addScript
Script nr 1 parsed!
First script; assuming testFunction.
-Flooding script with breakpoints for the lines 3 to 21...
+Flooding script with breakpoints for the lines 3 to 19...
Setting breakpoint on line 3
error: undefined
Setting breakpoint on line 4
@@ -38,36 +38,26 @@ Setting breakpoint on line 17
error: undefined
Setting breakpoint on line 18
error: undefined
-Setting breakpoint on line 19
-error: undefined
-Setting breakpoint on line 20
-error: undefined
Running test: runTestFunction
Script nr 2 parsed!
Paused #1
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":17,"columnNumber":2}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":17,"columnNumber":12}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #2
- [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #3
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":12}
- - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
-Paused #4
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
- - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
-Paused #5
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
-Paused #6
+Paused #4
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Running test: finished
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
index 2b4c8343d5..32182b5807 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
@@ -25,8 +25,6 @@ function testFunction() {
debugger;
}
- %PrepareFunctionForOptimization(generateAsmJs);
- %OptimizeFunctionOnNextCall(generateAsmJs);
var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
fun();
}
@@ -106,6 +104,13 @@ function handleScriptParsed(messageObject)
var startLine = messageObject.params.startLine + 3;
var endLine = messageObject.params.endLine;
+ if (startLine > endLine) {
+ InspectorTest.log(
+ `Terminating early: start line ${startLine} is after end line ${endLine}.`
+ );
+ return;
+ }
+
InspectorTest.log('First script; assuming testFunction.');
InspectorTest.log(
'Flooding script with breakpoints for the lines ' + startLine + ' to ' +
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
index 18b61dcf4d..ac4cfa1485 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
@@ -11,10 +11,10 @@ Paused #1
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
First time paused, setting breakpoints!
-Flooding script with breakpoints for all lines (0 - 24)...
+Flooding script with breakpoints for all lines (0 - 22)...
Setting breakpoint on line 0
error: undefined
Setting breakpoint on line 1
@@ -59,27 +59,23 @@ Setting breakpoint on line 20
error: undefined
Setting breakpoint on line 21
error: undefined
-Setting breakpoint on line 22
-error: undefined
-Setting breakpoint on line 23
-error: undefined
Script nr 3 parsed!
Resuming...
Paused #2
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 4 parsed!
Resuming...
Paused #3
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":22,"columnNumber":17}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":17}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 5 parsed!
Resuming...
Paused #4
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":23,"columnNumber":2}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":21,"columnNumber":2}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 6 parsed!
Resuming...
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
index 5a5d1fcf69..e10c25513a 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
@@ -25,8 +25,6 @@ function testFunction() {
debugger;
}
- %PrepareFunctionForOptimization(generateAsmJs);
- %OptimizeFunctionOnNextCall(generateAsmJs);
var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
fun();
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index d21ebc783e..bdeb37bccf 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -42,7 +42,7 @@ expression: Promise.resolve(42)
{
name : [[PromiseStatus]]
type : string
- value : resolved
+ value : fulfilled
}
{
name : [[PromiseValue]]
@@ -251,7 +251,7 @@ expression: Promise.resolve(42)
{
name : [[PromiseStatus]]
type : string
- value : resolved
+ value : fulfilled
}
{
name : [[PromiseValue]]
diff --git a/deps/v8/test/inspector/debugger/wasm-anyref-global.js b/deps/v8/test/inspector/debugger/wasm-anyref-global.js
index 1c47cfc8d8..bf64649afb 100644
--- a/deps/v8/test/inspector/debugger/wasm-anyref-global.js
+++ b/deps/v8/test/inspector/debugger/wasm-anyref-global.js
@@ -57,16 +57,17 @@ let {session, contextGroup, Protocol} =
InspectorTest.log('Paused in debugger.');
let scopeChain = callFrames[0].scopeChain;
for (let scope of scopeChain) {
- if (scope.type != 'global') continue;
+ if (scope.type != 'module') continue;
- let globalObjectProps = (await Protocol.Runtime.getProperties({
+ let moduleObjectProps = (await Protocol.Runtime.getProperties({
'objectId': scope.object.objectId
})).result.result;
- for (let prop of globalObjectProps) {
+ for (let prop of moduleObjectProps) {
+ if (prop.name != 'globals') continue;
let subProps = (await Protocol.Runtime.getProperties({
- objectId: prop.value.objectId
- })).result.result;
+ objectId: prop.value.objectId
+ })).result.result;
let values =
subProps.map((value) => `"${value.name}": ${value.value.value}`)
.join(', ');
diff --git a/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt b/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt
new file mode 100644
index 0000000000..20d0e846bd
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt
@@ -0,0 +1,11 @@
+Test that breakpoints do not survive a restart of the debugger.
+Instantiating.
+Waiting for wasm script (ignoring first non-wasm script).
+Setting breakpoint.
+Calling func.
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01
+func returned.
+Restarting debugger.
+Calling func.
+func returned.
+Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart.js b/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart.js
new file mode 100644
index 0000000000..116f048dc3
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart.js
@@ -0,0 +1,63 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test that breakpoints do not survive a restart of the debugger.');
+session.setupScriptMap();
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+
+const func =
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+
+const module_bytes = JSON.stringify(builder.toArray());
+
+function instantiate(bytes) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ let module = new WebAssembly.Module(buffer);
+ return new WebAssembly.Instance(module);
+}
+contextGroup.addScript(instantiate.toString());
+
+Protocol.Debugger.onPaused(async msg => {
+ await session.logSourceLocation(msg.params.callFrames[0].location);
+ Protocol.Debugger.resume();
+});
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Instantiating.');
+ // Spawn asynchronously:
+ Protocol.Runtime.evaluate(
+ {'expression': `const instance = instantiate(${module_bytes});`});
+ InspectorTest.log(
+ 'Waiting for wasm script (ignoring first non-wasm script).');
+ const [, {params: wasm_script}] = await Protocol.Debugger.onceScriptParsed(2);
+ InspectorTest.log('Setting breakpoint.');
+ await Protocol.Debugger.setBreakpoint({
+ 'location': {
+ 'scriptId': wasm_script.scriptId,
+ 'lineNumber': 0,
+ 'columnNumber': func.body_offset
+ }
+ });
+ for (let run of [0, 1]) {
+ InspectorTest.log('Calling func.');
+ await Protocol.Runtime.evaluate({'expression': 'instance.exports.func()'});
+ InspectorTest.log('func returned.');
+ if (run == 1) continue;
+ InspectorTest.log('Restarting debugger.');
+ await Protocol.Debugger.disable();
+ await Protocol.Debugger.enable();
+ }
+ InspectorTest.log('Finished.');
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-debug-evaluate-expected.txt b/deps/v8/test/inspector/debugger/wasm-debug-evaluate-expected.txt
new file mode 100644
index 0000000000..de08ecbf02
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-debug-evaluate-expected.txt
@@ -0,0 +1,5 @@
+Tests wasm debug-evaluate
+Test: TestGetMemory
+Result: 2
+Expected: 2
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-debug-evaluate.js b/deps/v8/test/inspector/debugger/wasm-debug-evaluate.js
new file mode 100644
index 0000000000..e9776ce81e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-debug-evaluate.js
@@ -0,0 +1,155 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-expose-debug-eval
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests wasm debug-evaluate');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function instantiate(bytes) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ let module = new WebAssembly.Module(buffer);
+ return new WebAssembly.Instance(module);
+}
+contextGroup.addScript(instantiate.toString());
+
+function printFailure(message) {
+ if (!message.result) {
+ InspectorTest.logMessage(message);
+ }
+ return message;
+}
+
+async function getWasmScript() {
+ while (true) {
+ const script = await Protocol.Debugger.onceScriptParsed();
+ if (script.params.url.startsWith('wasm://')) return script.params;
+ }
+}
+
+async function handleDebuggerPaused(data, messageObject) {
+ const topFrameId = messageObject.params.callFrames[0].callFrameId;
+ const params = {callFrameId: topFrameId, evaluator: data};
+ try {
+ const evalResult = await Protocol.Debugger.executeWasmEvaluator(params);
+ InspectorTest.log('Result: ' + evalResult.result.result.value);
+ } catch (err) {
+ InspectorTest.log(
+ 'Eval failed: ' + err + '\nGot: ' + JSON.stringify(evalResult));
+ }
+ await Protocol.Debugger.resume();
+}
+
+async function runTest(testName, breakLine, debuggeeBytes, snippetBytes) {
+ try {
+ await Protocol.Debugger.onPaused(
+ handleDebuggerPaused.bind(null, snippetBytes));
+ InspectorTest.log('Test: ' + testName);
+ const scriptListener = getWasmScript();
+ const module = JSON.stringify(debuggeeBytes);
+ await Protocol.Runtime.evaluate(
+ {'expression': `const instance = instantiate(${module})`});
+ const script = await scriptListener;
+ const msg = await Protocol.Debugger.setBreakpoint({
+ 'location': {
+ 'scriptId': script.scriptId,
+ 'lineNumber': 0,
+ 'columnNumber': breakLine
+ }
+ });
+ printFailure(msg);
+ const eval = await Protocol.Runtime.evaluate(
+ {'expression': 'instance.exports.main()'});
+ InspectorTest.log(
+ 'Expected: ' + String.fromCharCode(eval.result.result.value));
+ InspectorTest.log('Finished!');
+ } catch (err) {
+ InspectorTest.log(err.message);
+ }
+}
+
+// copied from v8
+function encode64(data) {
+ const BASE =
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+ const PAD = '=';
+ var ret = '';
+ var leftchar = 0;
+ var leftbits = 0;
+ for (var i = 0; i < data.length; i++) {
+ leftchar = (leftchar << 8) | data[i];
+ leftbits += 8;
+ while (leftbits >= 6) {
+ const curr = (leftchar >> (leftbits - 6)) & 0x3f;
+ leftbits -= 6;
+ ret += BASE[curr];
+ }
+ }
+ if (leftbits == 2) {
+ ret += BASE[(leftchar & 3) << 4];
+ ret += PAD + PAD;
+ } else if (leftbits == 4) {
+ ret += BASE[(leftchar & 0xf) << 2];
+ ret += PAD;
+ }
+ return ret;
+}
+
+(async () => {
+ try {
+ await Protocol.Debugger.enable();
+
+ await (async function TestGetMemory() {
+ const debuggee_builder = new WasmModuleBuilder();
+ debuggee_builder.addMemory(256, 256);
+ const mainFunc =
+ debuggee_builder.addFunction('main', kSig_i_v)
+ .addBody([
+ // clang-format off
+ kExprI32Const, 32,
+ kExprI32Const, 50,
+ kExprI32StoreMem, 0, 0,
+ kExprI32Const, 32,
+ kExprI32LoadMem, 0, 0,
+ kExprReturn
+ // clang-format on
+ ])
+ .exportAs('main');
+
+ const snippet_builder = new WasmModuleBuilder();
+ snippet_builder.addMemory(1, 1);
+ const getMemoryIdx = snippet_builder.addImport(
+ 'env', '__getMemory', makeSig([kWasmI32, kWasmI32, kWasmI32], []));
+ const heapBase = 32; // Just pick some position in memory
+ snippet_builder.addFunction('wasm_format', kSig_i_v)
+ .addBody([
+ // clang-format off
+ // __getMemory(32, 4, heapBase)
+ kExprI32Const, 32, kExprI32Const, 4, kExprI32Const, heapBase,
+ kExprCallFunction, getMemoryIdx,
+ // return heapBase
+ kExprI32Const, heapBase,
+ kExprReturn
+ // clang-format on
+ ])
+ .exportAs('wasm_format');
+
+ const debuggeeModule = debuggee_builder.toArray();
+ await runTest(
+ 'TestGetMemory', mainFunc.body_offset + 9, debuggeeModule,
+ encode64(snippet_builder.toArray()));
+ })();
+
+ } catch (err) {
+ InspectorTest.log(err)
+ }
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-global-names.js b/deps/v8/test/inspector/debugger/wasm-global-names.js
index 22d0eb30ea..70e6b0f8dd 100644
--- a/deps/v8/test/inspector/debugger/wasm-global-names.js
+++ b/deps/v8/test/inspector/debugger/wasm-global-names.js
@@ -58,16 +58,16 @@ function test(moduleBytes) {
InspectorTest.log('Paused in debugger.');
let scopeChain = callFrames[0].scopeChain;
for (let scope of scopeChain) {
- if (scope.type != 'global') continue;
-
- let globalObjectProps = (await Protocol.Runtime.getProperties({
+ if (scope.type != 'module') continue;
+ let moduleObjectProps = (await Protocol.Runtime.getProperties({
'objectId': scope.object.objectId
})).result.result;
- for (let prop of globalObjectProps) {
+ for (let prop of moduleObjectProps) {
+ if (prop.name != 'globals') continue;
let subProps = (await Protocol.Runtime.getProperties({
- objectId: prop.value.objectId
- })).result.result;
+ objectId: prop.value.objectId
+ })).result.result;
let values = subProps.map((value) => `"${value.name}"`).join(', ');
InspectorTest.log(` ${prop.name}: {${values}}`);
}
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
index 04c48c92f6..ed457e29c4 100644
--- a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers-expected.txt
@@ -4,100 +4,100 @@ Testing i32.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
-Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
-Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
-Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
-Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
-Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
-Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
-Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
-Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
-Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
-Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
-Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
-Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
-Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
-Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
-Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
-Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
-Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
-Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+Paused at offset 48; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: []
+Paused at offset 50; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0]
+Paused at offset 52; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1]
+Paused at offset 54; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2]
+Paused at offset 56; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3]
+Paused at offset 58; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4]
+Paused at offset 60; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 39]
+Paused at offset 74; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 42]
+Paused at offset 75; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 44]
+Paused at offset 76; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 45]
+Paused at offset 77; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [45]
main returned.
Testing i64.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
-Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
-Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
-Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
-Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
-Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
-Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
-Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
-Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
-Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
-Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
-Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
-Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
-Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
-Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
-Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
-Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
-Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
-Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+Paused at offset 48; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: []
+Paused at offset 50; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0]
+Paused at offset 52; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1]
+Paused at offset 54; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2]
+Paused at offset 56; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3]
+Paused at offset 58; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4]
+Paused at offset 60; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 39]
+Paused at offset 74; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 42]
+Paused at offset 75; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 44]
+Paused at offset 76; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 45]
+Paused at offset 77; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [45]
main returned.
Testing f32.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
-Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
-Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
-Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
-Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
-Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
-Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
-Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
-Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
-Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
-Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
-Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
-Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
-Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
-Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
-Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
-Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
-Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
-Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+Paused at offset 48; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: []
+Paused at offset 50; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0]
+Paused at offset 52; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1]
+Paused at offset 54; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2]
+Paused at offset 56; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3]
+Paused at offset 58; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4]
+Paused at offset 60; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 39]
+Paused at offset 74; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 42]
+Paused at offset 75; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 44]
+Paused at offset 76; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 45]
+Paused at offset 77; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [45]
main returned.
Testing f64.
Waiting for wasm script.
Setting 20 breakpoints.
Calling main.
-Paused at offset 48; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : []
-Paused at offset 50; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0]
-Paused at offset 52; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1]
-Paused at offset 54; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2]
-Paused at offset 56; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3]
-Paused at offset 58; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4]
-Paused at offset 60; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5]
-Paused at offset 62; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6]
-Paused at offset 64; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7]
-Paused at offset 66; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8]
-Paused at offset 68; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-Paused at offset 69; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 7, 17]
-Paused at offset 70; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 6, 24]
-Paused at offset 71; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 5, 30]
-Paused at offset 72; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 4, 35]
-Paused at offset 73; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 3, 39]
-Paused at offset 74; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 2, 42]
-Paused at offset 75; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 1, 44]
-Paused at offset 76; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [0, 45]
-Paused at offset 77; locals : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; stack : [45]
+Paused at offset 48; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: []
+Paused at offset 50; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0]
+Paused at offset 52; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1]
+Paused at offset 54; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2]
+Paused at offset 56; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3]
+Paused at offset 58; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4]
+Paused at offset 60; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5]
+Paused at offset 62; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6]
+Paused at offset 64; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7]
+Paused at offset 66; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8]
+Paused at offset 68; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+Paused at offset 69; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 7, 17]
+Paused at offset 70; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 6, 24]
+Paused at offset 71; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 5, 30]
+Paused at offset 72; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 4, 35]
+Paused at offset 73; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 3, 39]
+Paused at offset 74; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 2, 42]
+Paused at offset 75; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 1, 44]
+Paused at offset 76; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [0, 45]
+Paused at offset 77; locals: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; wasm-expression-stack: [45]
main returned.
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
index a8f8b65586..1d5863157d 100644
--- a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --debug-in-liftoff
-
const {session, contextGroup, Protocol} =
InspectorTest.start('Test inspecting register values in Liftoff.');
@@ -37,18 +35,22 @@ Protocol.Debugger.onPaused(async msg => {
// Inspect only the top wasm frame.
var frame = msg.params.callFrames[0];
for (var scope of frame.scopeChain) {
- if (scope.type != 'local') continue;
+ if (scope.type == 'module') continue;
var scope_properties =
await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
- for (var value of scope_properties.result.result) {
- let msg = await Protocol.Runtime.getProperties(
+ if (scope.type == 'local') {
+ for (var value of scope_properties.result.result) {
+ let msg = await Protocol.Runtime.getProperties(
{objectId: value.value.objectId});
- let str = msg.result.result.map(elem => elem.value.value).join(', ');
- line.push(`${value.name} : [${str}]`);
+ let str = msg.result.result.map(elem => elem.value.value).join(', ');
+ line.push(`${value.name}: [${str}]`);
+ }
+ } else {
+ let str = scope_properties.result.result.map(elem => elem.value.value).join(', ');
+ line.push(`${scope.type}: [${str}]`);
}
- InspectorTest.log(line.join('; '));
}
-
+ InspectorTest.log(line.join('; '));
Protocol.Debugger.resume();
});
diff --git a/deps/v8/test/inspector/debugger/wasm-memory-names-expected.txt b/deps/v8/test/inspector/debugger/wasm-memory-names-expected.txt
new file mode 100644
index 0000000000..e73262b9ab
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-memory-names-expected.txt
@@ -0,0 +1,19 @@
+Test wasm memory names
+Waiting for wasm script to be parsed.
+Setting breakpoint in wasm.
+Running main.
+Paused in debugger.
+name: memory0
+Finished.
+Waiting for wasm script to be parsed.
+Setting breakpoint in wasm.
+Running main.
+Paused in debugger.
+name: exported_memory
+Finished.
+Waiting for wasm script to be parsed.
+Setting breakpoint in wasm.
+Running main.
+Paused in debugger.
+name: module_name.imported_mem
+Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-memory-names.js b/deps/v8/test/inspector/debugger/wasm-memory-names.js
new file mode 100644
index 0000000000..769b0f200c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-memory-names.js
@@ -0,0 +1,117 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test wasm memory names');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let func;
+
+// No name memory.
+function createModuleBytesUnnamedMemory() {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1);
+ func = builder.addFunction('main', kSig_i_i)
+ .addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0])
+ .exportAs('main');
+
+ return JSON.stringify(builder.toArray());
+}
+
+// Exported memory.
+function createModuleBytesExportedMemory() {
+ let builder = new WasmModuleBuilder();
+ var memory = builder.addMemory(1, 1);
+ builder.addExportOfKind('exported_memory', kExternalMemory);
+ func = builder.addFunction('main', kSig_i_i)
+ .addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0])
+ .exportAs('main');
+
+ return JSON.stringify(builder.toArray());
+}
+
+// Imported memory.
+function createModuleBytesImportedMemory() {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory('module_name', 'imported_mem', 0, 1);
+ func = builder.addFunction('main', kSig_i_i)
+ .addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0])
+ .exportAs('main');
+
+ return JSON.stringify(builder.toArray());
+}
+
+function createInstance(moduleBytes) {
+ let module = new WebAssembly.Module((new Uint8Array(moduleBytes)).buffer);
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 1});
+ instance =
+ new WebAssembly.Instance(module, {module_name: {imported_mem: memory}});
+}
+
+async function logMemoryName(msg, Protocol) {
+ let callFrames = msg.params.callFrames;
+ InspectorTest.log('Paused in debugger.');
+
+ let scopeChain = callFrames[0].scopeChain;
+ for (let scope of scopeChain) {
+ if (scope.type != 'module') continue;
+ let moduleObjectProps = (await Protocol.Runtime.getProperties({
+ 'objectId': scope.object.objectId
+ })).result.result;
+
+ for (let prop of moduleObjectProps) {
+ InspectorTest.log(`name: ${prop.name}`);
+ }
+ }
+}
+
+async function check(moduleBytes) {
+ Protocol.Runtime.evaluate({
+ expression: `
+ createInstance(${moduleBytes});`
+ });
+
+ InspectorTest.log('Waiting for wasm script to be parsed.');
+ let scriptId;
+ while (true) {
+ let msg = await Protocol.Debugger.onceScriptParsed();
+ if (msg.params.url.startsWith('wasm://')) {
+ scriptId = msg.params.scriptId;
+ break;
+ }
+ }
+
+ InspectorTest.log('Setting breakpoint in wasm.');
+ await Protocol.Debugger.setBreakpoint(
+ {location: {scriptId, lineNumber: 0, columnNumber: func.body_offset}});
+
+ InspectorTest.log('Running main.');
+ Protocol.Runtime.evaluate({expression: 'instance.exports.main()'});
+
+ let msg = await Protocol.Debugger.oncePaused();
+ await logMemoryName(msg, Protocol);
+ await Protocol.Debugger.resume();
+
+ InspectorTest.log('Finished.');
+}
+
+contextGroup.addScript(`
+ let instance;
+ ${createInstance.toString()}`);
+
+(async function test() {
+ try {
+ Protocol.Debugger.enable();
+
+ await check(createModuleBytesUnnamedMemory());
+ await check(createModuleBytesExportedMemory());
+ await check(createModuleBytesImportedMemory());
+
+ } catch (exc) {
+ InspectorTest.log(`Failed with exception: ${exc}.`);
+ } finally {
+ InspectorTest.completeTest();
+ }
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index 45bff036b7..0b91c1a904 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -12,18 +12,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 69: Wasm opcode 0x41
Scope:
at func (0:69):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 4 (number), "var1": 0 (number), "i64_local": 0 (number), "unicodeā˜¼f64": 0 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -33,19 +31,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 71: Wasm opcode 0x21
Scope:
at func (0:71):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 4 (number), "var1": 0 (number), "i64_local": 0 (number), "unicodeā˜¼f64": 0 (number)
- stack: "0": 11 (number)
- scope (wasm-expression-stack):
0: 11 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -55,18 +51,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 73: Wasm opcode 0x41
Scope:
at func (0:73):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 0 (number), "i64_local": 0 (number), "unicodeā˜¼f64": 0 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -76,19 +70,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 75: Wasm opcode 0x21
Scope:
at func (0:75):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 0 (number), "i64_local": 0 (number), "unicodeā˜¼f64": 0 (number)
- stack: "0": 47 (number)
- scope (wasm-expression-stack):
0: 47 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -98,18 +90,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 77: Wasm opcode 0x42
Scope:
at func (0:77):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 0 (number), "unicodeā˜¼f64": 0 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -119,19 +109,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 88: Wasm opcode 0x21
Scope:
at func (0:88):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 0 (number), "unicodeā˜¼f64": 0 (number)
- stack: "0": 9223372036854775807 (string)
- scope (wasm-expression-stack):
0: 9223372036854775807 (string)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -141,18 +129,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 90: Wasm opcode 0x42
Scope:
at func (0:90):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 9223372036854775807 (string), "unicodeā˜¼f64": 0 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -162,19 +148,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 101: Wasm opcode 0x21
Scope:
at func (0:101):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": 9223372036854775807 (string), "unicodeā˜¼f64": 0 (number)
- stack: "0": -9223372036854775808 (string)
- scope (wasm-expression-stack):
0: -9223372036854775808 (string)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -184,18 +168,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 103: Wasm opcode 0x41
Scope:
at func (0:103):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -205,19 +187,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 105: Wasm opcode 0xb8
Scope:
at func (0:105):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0 (number)
- stack: "0": 1 (number)
- scope (wasm-expression-stack):
0: 1 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -227,19 +207,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 106: Wasm opcode 0x41
Scope:
at func (0:106):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0 (number)
- stack: "0": 1 (number)
- scope (wasm-expression-stack):
0: 1 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -249,20 +227,18 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 108: Wasm opcode 0xb8
Scope:
at func (0:108):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0 (number)
- stack: "0": 1 (number), "1": 7 (number)
- scope (wasm-expression-stack):
0: 1 (number)
1: 7 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -272,20 +248,18 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 109: Wasm opcode 0xa3
Scope:
at func (0:109):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0 (number)
- stack: "0": 1 (number), "1": 7 (number)
- scope (wasm-expression-stack):
0: 1 (number)
1: 7 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -295,19 +269,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 110: Wasm opcode 0x21
Scope:
at func (0:110):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0 (number)
- stack: "0": 0.14285714285714285 (number)
- scope (wasm-expression-stack):
0: 0.14285714285714285 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -317,18 +289,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 112: Wasm opcode 0x41
Scope:
at func (0:112):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0.14285714285714285 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -338,19 +308,17 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 114: Wasm opcode 0x24
Scope:
at func (0:114):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0.14285714285714285 (number)
- stack: "0": 15 (number)
- scope (wasm-expression-stack):
0: 15 (number)
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -360,18 +328,16 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 116: Wasm opcode 0x0b
Scope:
at func (0:116):
- - scope (global):
+ - scope (module):
globals: "global0": 15 (number)
- scope (local):
locals: "0": 0 (number), "i32Arg": 11 (number), "var1": 47 (number), "i64_local": -9223372036854775808 (string), "unicodeā˜¼f64": 0.14285714285714285 (number)
- stack:
- scope (wasm-expression-stack):
at call_func (0:58):
- - scope (global):
+ - scope (module):
globals: "global0": 15 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -381,11 +347,10 @@ Paused:
Script wasm://wasm/d374ef0a byte offset 60: Wasm opcode 0x0b
Scope:
at call_func (0:60):
- - scope (global):
+ - scope (module):
globals: "global0": 15 (number)
- scope (local):
locals: "var0": 4 (number), "var1": 7.199999809265137 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
index 292f0074a3..8f853c8cb5 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
@@ -13,27 +13,24 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 85: Wasm opcode 0x20
Scope:
at C (interpreted) (0:85):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
- stack:
- scope (wasm-expression-stack):
at B (liftoff) (0:76):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number), "1": 3 (number)
- scope (wasm-expression-stack):
0: 42 (number)
1: 3 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -43,28 +40,25 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 87: Wasm opcode 0x24
Scope:
at C (interpreted) (0:87):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
- stack: "0": 42 (number)
- scope (wasm-expression-stack):
0: 42 (number)
at B (liftoff) (0:76):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number), "1": 3 (number)
- scope (wasm-expression-stack):
0: 42 (number)
1: 3 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 0 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -74,27 +68,24 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 89: Wasm opcode 0x41
Scope:
at C (interpreted) (0:89):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
- stack:
- scope (wasm-expression-stack):
at B (liftoff) (0:76):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number), "1": 3 (number)
- scope (wasm-expression-stack):
0: 42 (number)
1: 3 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -104,28 +95,25 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 91: Wasm opcode 0x21
Scope:
at C (interpreted) (0:91):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 0 (number)
- stack: "0": 47 (number)
- scope (wasm-expression-stack):
0: 47 (number)
at B (liftoff) (0:76):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number), "1": 3 (number)
- scope (wasm-expression-stack):
0: 42 (number)
1: 3 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -135,27 +123,24 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 93: Wasm opcode 0x0b
Scope:
at C (interpreted) (0:93):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "i32_arg": 42 (number), "i32_local": 47 (number)
- stack:
- scope (wasm-expression-stack):
at B (liftoff) (0:76):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number), "1": 3 (number)
- scope (wasm-expression-stack):
0: 42 (number)
1: 3 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -165,20 +150,18 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 78: Wasm opcode 0x1a
Scope:
at B (liftoff) (0:78):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number), "1": 3 (number)
- scope (wasm-expression-stack):
0: 42 (number)
1: 3 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -188,19 +171,17 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 79: Wasm opcode 0x1a
Scope:
at B (liftoff) (0:79):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack: "0": 42 (number)
- scope (wasm-expression-stack):
0: 42 (number)
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -210,18 +191,16 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 80: Wasm opcode 0x0b
Scope:
at B (liftoff) (0:80):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "0": 0 (number), "i32_arg": 42 (number), "i32_local": 0 (number), "f32_local": 7.199999809265137 (number), "var5": 0 (number)
- stack:
- scope (wasm-expression-stack):
at A (liftoff) (0:54):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
@@ -231,11 +210,10 @@ Paused:
Script wasm://wasm/c4eb034a byte offset 56: Wasm opcode 0x0b
Scope:
at A (liftoff) (0:56):
- - scope (global):
+ - scope (module):
globals: "global0": 42 (number)
- scope (local):
locals: "var0": 42 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
- scope (global):
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
index ef7ec63b03..df0aa575d3 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --debug-in-liftoff
-
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test retrieving scope information from compiled Liftoff frames');
session.setupScriptMap();
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index 413aa69ce1..69e6f70165 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --debug-in-liftoff
-
let {session, contextGroup, Protocol} = InspectorTest.start(
'Test retrieving scope information when pausing in wasm functions');
session.setupScriptMap();
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
index 7b76eab18b..9169a01a89 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
@@ -1,15 +1,27 @@
Tests how wasm scripts are reported
Check that each inspector gets a wasm script at module creation time.
-Session #1: Script #0 parsed. URL: wasm://wasm/7b04570e. Source map URL: , module begin: 0, module end: 0, code offset: 34
-Session #2: Script #0 parsed. URL: wasm://wasm/7b04570e. Source map URL: , module begin: 0, module end: 0, code offset: 34
-Session #1: Script #1 parsed. URL: wasm://wasm/ba7c35be. Source map URL: wasm://dwarf, module begin: 0, module end: 0, code offset: 34
-Session #2: Script #1 parsed. URL: wasm://wasm/ba7c35be. Source map URL: wasm://dwarf, module begin: 0, module end: 0, code offset: 34
-Session #1: Script #2 parsed. URL: wasm://wasm/1baa71fe. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
-Session #2: Script #2 parsed. URL: wasm://wasm/1baa71fe. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
-Session #1: Script #3 parsed. URL: wasm://wasm/95e97206. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
-Session #2: Script #3 parsed. URL: wasm://wasm/95e97206. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
-Session #1: Script #4 parsed. URL: wasm://wasm/7ab47392. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
-Session #2: Script #4 parsed. URL: wasm://wasm/7ab47392. Source map URL: abc, module begin: 0, module end: 0, code offset: 34
+Session #1: Script #0 parsed. URL: wasm://wasm/7b04570e. Script ID: 0, Source map URL: , debug symbols: None:undefined. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #0 parsed. URL: wasm://wasm/7b04570e. Script ID: 0, Source map URL: , debug symbols: None:undefined. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #1 parsed. URL: wasm://wasm/7b04570e. Script ID: 0, Source map URL: , debug symbols: None:undefined. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #1 parsed. URL: wasm://wasm/7b04570e. Script ID: 0, Source map URL: , debug symbols: None:undefined. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #2 parsed. URL: wasm://wasm/5e8bdc5a. Script ID: 1, Source map URL: , debug symbols: ExternalDWARF:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #2 parsed. URL: wasm://wasm/5e8bdc5a. Script ID: 1, Source map URL: , debug symbols: ExternalDWARF:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #3 parsed. URL: wasm://wasm/ba7c35be. Script ID: 2, Source map URL: , debug symbols: EmbeddedDWARF:undefined. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #3 parsed. URL: wasm://wasm/ba7c35be. Script ID: 2, Source map URL: , debug symbols: EmbeddedDWARF:undefined. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #4 parsed. URL: wasm://wasm/1baa71fe. Script ID: 3, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #4 parsed. URL: wasm://wasm/1baa71fe. Script ID: 3, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #5 parsed. URL: wasm://wasm/0f5a61aa. Script ID: 4, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #5 parsed. URL: wasm://wasm/0f5a61aa. Script ID: 4, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #6 parsed. URL: wasm://wasm/2b982afe. Script ID: 5, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #6 parsed. URL: wasm://wasm/2b982afe. Script ID: 5, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #7 parsed. URL: wasm://wasm/63e4f84a. Script ID: 6, Source map URL: , debug symbols: ExternalDWARF:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #7 parsed. URL: wasm://wasm/63e4f84a. Script ID: 6, Source map URL: , debug symbols: ExternalDWARF:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #8 parsed. URL: wasm://wasm/2c194a36. Script ID: 7, Source map URL: , debug symbols: ExternalDWARF:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #8 parsed. URL: wasm://wasm/2c194a36. Script ID: 7, Source map URL: , debug symbols: ExternalDWARF:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #9 parsed. URL: wasm://wasm/95e97206. Script ID: 8, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #9 parsed. URL: wasm://wasm/95e97206. Script ID: 8, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #1: Script #10 parsed. URL: wasm://wasm/7ab47392. Script ID: 9, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
+Session #2: Script #10 parsed. URL: wasm://wasm/7ab47392. Script ID: 9, Source map URL: abc, debug symbols: SourceMap:abc. module begin: 0, module end: 0, code offset: 34
Session #1: Source for wasm://wasm/7b04570e:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
@@ -18,6 +30,22 @@ Session #2: Source for wasm://wasm/7b04570e:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
+Session #1: Source for wasm://wasm/7b04570e:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/7b04570e:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/5e8bdc5a:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/5e8bdc5a:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
Session #1: Source for wasm://wasm/ba7c35be:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
@@ -34,6 +62,38 @@ Session #2: Source for wasm://wasm/1baa71fe:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
Exports: [main: function]
+Session #1: Source for wasm://wasm/0f5a61aa:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/0f5a61aa:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/2b982afe:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/2b982afe:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/63e4f84a:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/63e4f84a:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #1: Source for wasm://wasm/2c194a36:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
+Session #2: Source for wasm://wasm/2c194a36:
+Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 19 14 2e 65 78 74 65 72 6e 61 6c 5f 64 65 62 75 67 5f 69 6e 66 6f 03 61 62 63 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
+Imports: []
+Exports: [main: function]
Session #1: Source for wasm://wasm/95e97206:
Raw: 00 61 73 6d 01 00 00 00 01 07 02 60 00 00 60 00 00 03 03 02 00 01 07 08 01 04 6d 61 69 6e 00 01 0a 0e 02 03 00 01 0b 08 00 02 40 41 02 1a 0b 0b 00 11 0b 2e 64 65 62 75 67 5f 69 6e 66 6f 01 02 03 04 05 00 15 10 73 6f 75 72 63 65 4d 61 70 70 69 6e 67 55 52 4c 03 61 62 63 00 1b 04 6e 61 6d 65 01 14 02 00 0b 6e 6f 70 46 75 6e 63 74 69 6f 6e 01 04 6d 61 69 6e
Imports: []
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 5ad5ff09de..e7e158e9d5 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -35,6 +35,15 @@ function testFunction(bytes) {
new WebAssembly.Module(new Uint8Array(bytes));
}
+// Generate stable IDs.
+let scriptIds = {};
+function nextStableId(id) {
+ if (!(id in scriptIds)) {
+ scriptIds[id] = Object.keys(scriptIds).length;
+ }
+ return scriptIds[id];
+}
+
contextGroup.addScript(testFunction.toString(), 0, 0, 'v8://test/testFunction');
InspectorTest.log(
@@ -43,36 +52,69 @@ InspectorTest.log(
// Sample .debug_info section.
// Content doesn't matter, as we don't try to parse it in V8,
// but should be non-empty to check that we're skipping it correctly.
-const dwarfSection = { name: '.debug_info', value: [1, 2, 3, 4, 5] };
+const embeddedDWARFSection = {
+ name: '.debug_info',
+ value: [1, 2, 3, 4, 5]
+};
+
+// Sample external_debug_info section set to "abc".
+const externalDWARFSection = {
+ name: '.external_debug_info',
+ value: [3, 97, 98, 99]
+};
// Sample sourceMappingURL section set to "abc".
-const sourceMapSection = { name: 'sourceMappingURL', value: [3, 97, 98, 99] };
+const sourceMapSection = {
+ name: 'sourceMappingURL',
+ value: [3, 97, 98, 99]
+};
-sessions[0].Protocol.Runtime
+sessions[0]
+ .Protocol.Runtime
.evaluate({
'expression': `//# sourceURL=v8://test/runTestRunction
// no debug info
testFunction([${createModule()}]);
- // DWARF
- testFunction([${createModule(dwarfSection)}]);
+ // shared script for identical modules
+ testFunction([${createModule()}]);
+
+ // External DWARF
+ testFunction([${createModule(externalDWARFSection)}]);
+
+ // Embedded DWARF
+ testFunction([${createModule(embeddedDWARFSection)}]);
// Source map
testFunction([${createModule(sourceMapSection)}]);
- // DWARF + source map
- testFunction([${createModule(dwarfSection, sourceMapSection)}]);
+ // SourceMap + External DWARF
+ testFunction([${createModule(sourceMapSection, externalDWARFSection)}]);
+
+ // External DWARF + SourceMap (different order)
+ testFunction([${createModule(externalDWARFSection, sourceMapSection)}]);
+
+ // Embedded DWARF + External DWARF
+ testFunction([${
+ createModule(embeddedDWARFSection, externalDWARFSection)}]);
+
+ // External + Embedded DWARF (different order)
+ testFunction([${
+ createModule(externalDWARFSection, embeddedDWARFSection)}]);
+
+ // Embedded DWARF + source map
+ testFunction([${createModule(embeddedDWARFSection, sourceMapSection)}]);
- // Source map + DWARF (different order)
- testFunction([${createModule(sourceMapSection, dwarfSection)}]);
+ // Source map + Embedded DWARF (different order)
+ testFunction([${createModule(sourceMapSection, embeddedDWARFSection)}]);
`
})
- .then(() => (
- // At this point all scripts were parsed.
- // Stop tracking and wait for script sources in each session.
- Promise.all(sessions.map(session => session.getScripts()))
- ))
+ .then(
+ () => (
+ // At this point all scripts were parsed.
+ // Stop tracking and wait for script sources in each session.
+ Promise.all(sessions.map(session => session.getScripts()))))
.catch(err => {
InspectorTest.log(err.stack);
})
@@ -85,11 +127,21 @@ function trackScripts(debuggerParams) {
Protocol.Debugger.enable(debuggerParams);
Protocol.Debugger.onScriptParsed(handleScriptParsed);
- async function loadScript(
- {url, scriptId, sourceMapURL, startColumn, endColumn, codeOffset}) {
+ async function loadScript({
+ url,
+ scriptId,
+ sourceMapURL,
+ startColumn,
+ endColumn,
+ codeOffset,
+ debugSymbols
+ }) {
+ let stableId = nextStableId(scriptId);
InspectorTest.log(`Session #${sessionId}: Script #${
- scripts.length} parsed. URL: ${url}. Source map URL: ${
- sourceMapURL}, module begin: ${startColumn}, module end: ${endColumn}, code offset: ${codeOffset}`);
+ scripts.length} parsed. URL: ${url}. Script ID: ${
+ stableId}, Source map URL: ${sourceMapURL}, debug symbols: ${
+ debugSymbols.type}:${debugSymbols.externalURL}. module begin: ${
+ startColumn}, module end: ${endColumn}, code offset: ${codeOffset}`);
let {result: {scriptSource, bytecode}} =
await Protocol.Debugger.getScriptSource({scriptId});
if (bytecode) {
@@ -101,10 +153,17 @@ function trackScripts(debuggerParams) {
bytecode = InspectorTest.decodeBase64(bytecode);
// Check that it can be parsed back to a WebAssembly module.
let module = new WebAssembly.Module(bytecode);
- scriptSource = `
+ scriptSource =
+ `
Raw: ${Array.from(bytecode, b => ('0' + b.toString(16)).slice(-2)).join(' ')}
-Imports: [${WebAssembly.Module.imports(module).map(i => `${i.name}: ${i.kind} from "${i.module}"`).join(', ')}]
-Exports: [${WebAssembly.Module.exports(module).map(e => `${e.name}: ${e.kind}`).join(', ')}]
+Imports: [${
+ WebAssembly.Module.imports(module)
+ .map(i => `${i.name}: ${i.kind} from "${i.module}"`)
+ .join(', ')}]
+Exports: [${
+ WebAssembly.Module.exports(module)
+ .map(e => `${e.name}: ${e.kind}`)
+ .join(', ')}]
`.trim();
}
InspectorTest.log(`Session #${sessionId}: Source for ${url}:`);
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
index 43827fef3b..c85aeeab2e 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
@@ -7,15 +7,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
Scope:
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -32,15 +30,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
Scope:
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -48,10 +44,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
Scope:
at wasm_B (0:45):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -59,10 +54,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
Scope:
at wasm_B (0:47):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack: "0": 3 (number)
- scope (wasm-expression-stack):
0: 3 (number)
at (anonymous) (0:17):
@@ -71,10 +65,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
Scope:
at wasm_B (0:49):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -82,10 +75,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
Scope:
at wasm_B (0:51):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack: "0": 3 (number)
- scope (wasm-expression-stack):
0: 3 (number)
at (anonymous) (0:17):
@@ -94,10 +86,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
Scope:
at wasm_B (0:53):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack: "0": 3 (number), "1": 1 (number)
- scope (wasm-expression-stack):
0: 3 (number)
1: 1 (number)
@@ -107,10 +98,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
Scope:
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 3 (number)
- stack: "0": 2 (number)
- scope (wasm-expression-stack):
0: 2 (number)
at (anonymous) (0:17):
@@ -119,15 +109,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
Scope:
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -135,15 +123,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
Scope:
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -151,10 +137,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
Scope:
at wasm_B (0:45):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -162,10 +147,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
Scope:
at wasm_B (0:47):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack: "0": 2 (number)
- scope (wasm-expression-stack):
0: 2 (number)
at (anonymous) (0:17):
@@ -174,10 +158,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
Scope:
at wasm_B (0:49):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -185,10 +168,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
Scope:
at wasm_B (0:51):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack: "0": 2 (number)
- scope (wasm-expression-stack):
0: 2 (number)
at (anonymous) (0:17):
@@ -197,10 +179,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
Scope:
at wasm_B (0:53):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack: "0": 2 (number), "1": 1 (number)
- scope (wasm-expression-stack):
0: 2 (number)
1: 1 (number)
@@ -210,10 +191,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
Scope:
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 2 (number)
- stack: "0": 1 (number)
- scope (wasm-expression-stack):
0: 1 (number)
at (anonymous) (0:17):
@@ -222,15 +202,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
Scope:
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -238,15 +216,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
Scope:
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -254,10 +230,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
Scope:
at wasm_B (0:45):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -265,10 +240,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
Scope:
at wasm_B (0:47):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack: "0": 1 (number)
- scope (wasm-expression-stack):
0: 1 (number)
at (anonymous) (0:17):
@@ -277,10 +251,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
Scope:
at wasm_B (0:49):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -288,10 +261,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
Scope:
at wasm_B (0:51):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack: "0": 1 (number)
- scope (wasm-expression-stack):
0: 1 (number)
at (anonymous) (0:17):
@@ -300,10 +272,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
Scope:
at wasm_B (0:53):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack: "0": 1 (number), "1": 1 (number)
- scope (wasm-expression-stack):
0: 1 (number)
1: 1 (number)
@@ -313,10 +284,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
Scope:
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 1 (number)
- stack: "0": 0 (number)
- scope (wasm-expression-stack):
0: 0 (number)
at (anonymous) (0:17):
@@ -325,15 +295,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
Scope:
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 0 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -341,15 +309,13 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
Scope:
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
- scope (local):
- stack:
- scope (wasm-expression-stack):
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 0 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -357,10 +323,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
Scope:
at wasm_B (0:45):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 0 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
@@ -368,10 +333,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
Scope:
at wasm_B (0:47):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 0 (number)
- stack: "0": 0 (number)
- scope (wasm-expression-stack):
0: 0 (number)
at (anonymous) (0:17):
@@ -380,10 +344,9 @@ Paused:
Script wasm://wasm/0c10a5fe byte offset 61: Wasm opcode 0x0b
Scope:
at wasm_B (0:61):
- - scope (global):
+ - scope (module):
- scope (local):
locals: "var0": 0 (number)
- stack:
- scope (wasm-expression-stack):
at (anonymous) (0:17):
-- skipped
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js
index 3e8d783af6..8de53c192c 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --debug-in-liftoff
-
const {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts.');
session.setupScriptMap();
diff --git a/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt b/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
new file mode 100644
index 0000000000..8b0861de10
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
@@ -0,0 +1,56 @@
+Test scope inspection and stepping after a trap.
+Instantiating.
+Calling div function.
+Paused at:
+--- 0 ---
+Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d
+scope at div (0:46):
+ locals: "a": 1, "b": 0, "unused": 4711, "local_zero": 0, "local_const_11": 11
+--- 1 ---
+ try {
+ instance.exports.#div(1, 0, 4711); // traps (div by zero)
+ } catch (e) {
+
+--- 2 ---
+#call_div()
+
+-------------
+-> stepInto
+Paused at:
+--- 0 ---
+ } catch (e) {
+ #e.stack; // step target of first pause
+ }
+
+--- 1 ---
+#call_div()
+
+-------------
+-> resume
+Paused at:
+--- 0 ---
+Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d
+scope at div (0:46):
+ locals: "a": -2147483648, "b": -1, "unused": 4711, "local_zero": 0, "local_const_11": 11
+--- 1 ---
+ try {
+ instance.exports.#div(0x80000000, -1, 4711); // traps (unrepresentable)
+ } catch (e) {
+
+--- 2 ---
+#call_div()
+
+-------------
+-> stepInto
+Paused at:
+--- 0 ---
+ } catch (e) {
+ #e.stack; // step target of second pause
+ }
+
+--- 1 ---
+#call_div()
+
+-------------
+-> resume
+Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-step-after-trap.js b/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
new file mode 100644
index 0000000000..c3f5af6dcf
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-step-after-trap.js
@@ -0,0 +1,102 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Test scope inspection and stepping after a trap.');
+session.setupScriptMap();
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+
+// Create a function which computes the div of the first two arguments.
+builder.addFunction('div', kSig_i_iii)
+ .addLocals(
+ {i32_count: 2}, ['a', 'b', 'unused', 'local_zero', 'local_const_11'])
+ .addBody([
+ kExprI32Const, 11, // const 11
+ kExprLocalSet, 4, // set local #4 ('local_const_11')
+ kExprLocalGet, 0, // param 0
+ kExprLocalGet, 1, // param 1
+ kExprI32DivS // div
+ ])
+ .exportFunc();
+
+const module_bytes = JSON.stringify(builder.toArray());
+
+function instantiate(bytes) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes[i] | 0;
+ }
+
+ let module = new WebAssembly.Module(buffer);
+ return new WebAssembly.Instance(module);
+}
+
+function getShortLocationString(location) {
+ return `${location.lineNumber}:${location.columnNumber}`;
+}
+
+let actions =
+ ['stepInto', 'resume', 'stepInto', 'resume', 'stepInfo', 'resume'];
+Protocol.Debugger.onPaused(async msg => {
+ InspectorTest.log('Paused at:');
+ for (let [nr, frame] of msg.params.callFrames.entries()) {
+ InspectorTest.log(`--- ${nr} ---`);
+ await session.logSourceLocation(frame.location);
+ if (/^wasm/.test(frame.url)) await printLocalScope(frame);
+ }
+ InspectorTest.log('-------------');
+ let action = actions.shift();
+ InspectorTest.log(`-> ${action}`);
+ Protocol.Debugger[action]();
+});
+
+function call_div() {
+ instance.exports.div(0, 1, 4711); // does not trap
+ try {
+ instance.exports.div(1, 0, 4711); // traps (div by zero)
+ } catch (e) {
+ e.stack; // step target of first pause
+ }
+ try {
+ instance.exports.div(0x80000000, -1, 4711); // traps (unrepresentable)
+ } catch (e) {
+ e.stack; // step target of second pause
+ }
+}
+
+contextGroup.addScript(instantiate.toString());
+contextGroup.addScript(call_div.toString());
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ await Protocol.Debugger.setPauseOnExceptions({state: 'all'});
+ InspectorTest.log('Instantiating.');
+ await Protocol.Runtime.evaluate(
+ {'expression': `const instance = instantiate(${module_bytes});`});
+ InspectorTest.log('Calling div function.');
+ await Protocol.Runtime.evaluate({'expression': 'call_div()'});
+ InspectorTest.log('Finished.');
+ InspectorTest.completeTest();
+})();
+
+async function printLocalScope(frame) {
+ InspectorTest.log(`scope at ${frame.functionName} (${
+ frame.location.lineNumber}:${frame.location.columnNumber}):`);
+ for (let scope of frame.scopeChain) {
+ if (scope.type != 'local') continue;
+ let properties = await Protocol.Runtime.getProperties(
+ {'objectId': scope.object.objectId});
+ for (let value of properties.result.result) {
+ let msg = await Protocol.Runtime.getProperties(
+ {objectId: value.value.objectId});
+ let prop_str = p => `"${p.name}": ${p.value.value}`;
+ let value_str = msg.result.result.map(prop_str).join(', ');
+ InspectorTest.log(` ${value.name}: ${value_str}`);
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
index 54e567e65b..569699ea24 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
@@ -9,11 +9,10 @@ Setting breakpoint on offset 59 (should be propagated to 60, the offset of the c
}
Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {"0":1024}
- scope (wasm-expression-stack):
{"0":1024}
at (anonymous) (0:17):
@@ -22,19 +21,17 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1024}
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -43,19 +40,17 @@ at (anonymous) (0:17):
Debugger.stepOver called
Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
at wasm_A (0:40):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1024}
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -64,11 +59,10 @@ at (anonymous) (0:17):
Debugger.stepOut called
Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -77,11 +71,10 @@ at (anonymous) (0:17):
Debugger.stepOut called
Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":2}
- stack: {"0":1024}
- scope (wasm-expression-stack):
{"0":1024}
at (anonymous) (0:17):
@@ -90,11 +83,10 @@ at (anonymous) (0:17):
Debugger.stepOver called
Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":2}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -103,11 +95,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
at wasm_B (0:46):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":2}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -116,11 +107,10 @@ at (anonymous) (0:17):
Debugger.resume called
Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1024}
- scope (wasm-expression-stack):
{"0":1024}
at (anonymous) (0:17):
@@ -129,19 +119,17 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1024}
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -150,11 +138,10 @@ at (anonymous) (0:17):
Debugger.stepOut called
Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -163,11 +150,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
at wasm_B (0:46):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -176,11 +162,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
at wasm_B (0:48):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1}
- scope (wasm-expression-stack):
{"0":1}
at (anonymous) (0:17):
@@ -189,11 +174,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
at wasm_B (0:50):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -202,11 +186,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
at wasm_B (0:52):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1}
- scope (wasm-expression-stack):
{"0":1}
at (anonymous) (0:17):
@@ -215,11 +198,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1,"1":1}
- scope (wasm-expression-stack):
{"0":1,"1":1}
at (anonymous) (0:17):
@@ -228,11 +210,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
at wasm_B (0:55):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":0}
- scope (wasm-expression-stack):
{"0":0}
at (anonymous) (0:17):
@@ -241,11 +222,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
at wasm_B (0:57):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -254,11 +234,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {"0":1024}
- scope (wasm-expression-stack):
{"0":1024}
at (anonymous) (0:17):
@@ -267,19 +246,17 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1024}
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -288,19 +265,17 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
at wasm_A (0:40):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1024}
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -309,19 +284,17 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
at wasm_A (0:41):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1024}
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:60):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -330,11 +303,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
at wasm_B (0:62):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
index d3c1932973..70f73d6cac 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
@@ -104,7 +104,7 @@ async function waitForPauseAndStep(stepAction) {
InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
for (var scope of frame.scopeChain) {
InspectorTest.logObject(' - scope (' + scope.type + '):');
- if (scope.type === 'global') {
+ if (scope.type === 'module' || scope.type === 'global') {
InspectorTest.logObject(' -- skipped');
} else {
const {result: {result: {value}}} =
@@ -114,9 +114,7 @@ async function waitForPauseAndStep(stepAction) {
returnByValue: true
});
if (scope.type === 'local') {
- if (value.locals)
- InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
- InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
+ InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
} else {
InspectorTest.log(` ${JSON.stringify(value)}`);
}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js
index 78c65e01f1..7ff4f58d87 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --debug-in-liftoff
-
let {session, contextGroup, Protocol} =
InspectorTest.start('Tests stepping through wasm scripts by byte offsets');
session.setupScriptMap();
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
index 6e4f8ffc6a..543e424214 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
@@ -12,11 +12,10 @@ Setting breakpoint on offset 54 (on the setlocal before the call), url wasm://wa
}
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":4}
- stack: {"0":3}
- scope (wasm-expression-stack):
{"0":3}
at (anonymous) (0:17):
@@ -25,11 +24,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -38,18 +36,16 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -58,18 +54,16 @@ at (anonymous) (0:17):
Debugger.stepOver called
Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -78,11 +72,10 @@ at (anonymous) (0:17):
Debugger.stepOut called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -91,11 +84,10 @@ at (anonymous) (0:17):
Debugger.stepOut called
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":3}
- stack: {"0":2}
- scope (wasm-expression-stack):
{"0":2}
at (anonymous) (0:17):
@@ -104,11 +96,10 @@ at (anonymous) (0:17):
Debugger.stepOver called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":2}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -117,11 +108,10 @@ at (anonymous) (0:17):
Debugger.stepOver called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":2}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -130,11 +120,10 @@ at (anonymous) (0:17):
Debugger.resume called
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":2}
- stack: {"0":1}
- scope (wasm-expression-stack):
{"0":1}
at (anonymous) (0:17):
@@ -143,11 +132,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -156,18 +144,16 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -176,11 +162,10 @@ at (anonymous) (0:17):
Debugger.stepOut called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -189,11 +174,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 45: Wasm opcode 0x20
at wasm_B (0:45):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -202,11 +186,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 47: Wasm opcode 0x04
at wasm_B (0:47):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1}
- scope (wasm-expression-stack):
{"0":1}
at (anonymous) (0:17):
@@ -215,11 +198,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 49: Wasm opcode 0x20
at wasm_B (0:49):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -228,11 +210,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 51: Wasm opcode 0x41
at wasm_B (0:51):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1}
- scope (wasm-expression-stack):
{"0":1}
at (anonymous) (0:17):
@@ -241,11 +222,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 53: Wasm opcode 0x6b
at wasm_B (0:53):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":1,"1":1}
- scope (wasm-expression-stack):
{"0":1,"1":1}
at (anonymous) (0:17):
@@ -254,11 +234,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
at wasm_B (0:54):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":1}
- stack: {"0":0}
- scope (wasm-expression-stack):
{"0":0}
at (anonymous) (0:17):
@@ -267,11 +246,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -280,18 +258,16 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
at wasm_A (0:38):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -300,18 +276,16 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01
at wasm_A (0:39):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -320,18 +294,16 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 40: Wasm opcode 0x0b
at wasm_A (0:40):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
- stack: {}
- scope (wasm-expression-stack):
{}
at wasm_B (0:56):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
@@ -340,11 +312,10 @@ at (anonymous) (0:17):
Debugger.stepInto called
Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
at wasm_B (0:58):
- - scope (global):
+ - scope (module):
-- skipped
- scope (local):
locals: {"var0":0}
- stack: {}
- scope (wasm-expression-stack):
{}
at (anonymous) (0:17):
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 17b1d05153..b33751e7c6 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -102,7 +102,7 @@ async function waitForPauseAndStep(stepAction) {
InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
for (var scope of frame.scopeChain) {
InspectorTest.logObject(' - scope (' + scope.type + '):');
- if (scope.type === 'global') {
+ if (scope.type === 'global' || scope.type === 'module') {
InspectorTest.logObject(' -- skipped');
} else {
const {result: {result: {value}}} =
@@ -115,7 +115,6 @@ async function waitForPauseAndStep(stepAction) {
if (scope.type === 'local') {
if (value.locals)
InspectorTest.log(` locals: ${JSON.stringify(value.locals)}`);
- InspectorTest.log(` stack: ${JSON.stringify(value.stack)}`);
} else {
InspectorTest.log(` ${JSON.stringify(value)}`);
}
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 4837f3caea..b92d269c94 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -1070,7 +1070,6 @@ int main(int argc, char* argv[]) {
printf("Embedding script '%s'\n", argv[i]);
startup_data = i::CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling::kClear, argv[i], nullptr);
- v8::internal::ReadOnlyHeap::ClearSharedHeapForTest();
argv[i] = nullptr;
}
}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index ba06c9df3c..858a76141d 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -14,15 +14,12 @@
# https://crbug.com/v8/9029
'debugger/script-on-after-compile-snapshot': [SKIP],
-}], # ALWAYS
-##############################################################################
-['variant == future', {
# https://crbug.com/v8/10356
# This test worked in the wasm interpreter, but fails when using Liftoff for
# debugging.
'debugger/wasm-anyref-global': [FAIL],
-}],
+}], # ALWAYS
##############################################################################
['system == android', {
@@ -78,11 +75,7 @@
##############################################################################
['arch == ppc or arch == ppc64', {
# Liftoff needs to be enabled before running these tests.
- 'debugger/wasm-scope-info': [SKIP],
- 'debugger/wasm-scope-info-liftoff': [SKIP],
- 'debugger/wasm-set-breakpoint-liftoff': [SKIP],
- 'debugger/wasm-stepping-liftoff': [SKIP],
- 'debugger/wasm-inspect-many-registers': [SKIP],
+ 'debugger/wasm-*': [SKIP],
}], # 'arch == ppc or arch == ppc64'
##############################################################################
@@ -90,13 +83,12 @@
# Stack manipulations in LiveEdit is not implemented for this arch.
'debugger/set-script-source-stack-padding': [SKIP],
# Liftoff needs to be enabled before running these tests.
- 'debugger/wasm-scope-info': [SKIP],
- 'debugger/wasm-scope-info-liftoff': [SKIP],
- 'debugger/wasm-set-breakpoint-liftoff': [SKIP],
- 'debugger/wasm-stepping-liftoff': [SKIP],
- 'debugger/wasm-inspect-many-registers': [SKIP],
+ 'debugger/wasm-*': [SKIP],
}], # 'arch == s390 or arch == s390x'
-##############################################################################
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
]
diff --git a/deps/v8/test/inspector/runtime/internal-properties-expected.txt b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
index 978c104866..29694d2009 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/internal-properties-expected.txt
@@ -163,7 +163,7 @@ expression: Promise.resolve(42)
name : [[PromiseStatus]]
value : {
type : string
- value : resolved
+ value : fulfilled
}
}
[1] : {
diff --git a/deps/v8/test/inspector/runtime/query-objects-expected.txt b/deps/v8/test/inspector/runtime/query-objects-expected.txt
index c11f021101..640da3d20b 100644
--- a/deps/v8/test/inspector/runtime/query-objects-expected.txt
+++ b/deps/v8/test/inspector/runtime/query-objects-expected.txt
@@ -98,6 +98,9 @@ Dump each object constructor name.
Running test: testQueryObjectsWithFeedbackVector
Before/After difference: 1
+Running test: testQueryObjectsWithArrayBuffer
+Test that queryObjects does not crash for on-heap TypedArrays
+
Running test: testWithObjectGroup
Query for Array.prototype 3 times
Results since initial: 0
diff --git a/deps/v8/test/inspector/runtime/query-objects.js b/deps/v8/test/inspector/runtime/query-objects.js
index 63a26deaef..302c7c195c 100644
--- a/deps/v8/test/inspector/runtime/query-objects.js
+++ b/deps/v8/test/inspector/runtime/query-objects.js
@@ -147,6 +147,34 @@ InspectorTest.runAsyncTestSuite([
session.disconnect();
},
+ async function testQueryObjectsWithArrayBuffer() {
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session = contextGroup.connect();
+ let Protocol = session.Protocol;
+
+ await Protocol.Runtime.evaluate({
+ expression: 'new Int8Array(32)',
+ });
+ let {result:{result:{objectId}}} = await Protocol.Runtime.evaluate({
+ expression: 'ArrayBuffer.prototype'
+ });
+ let {result:{objects}} = await session.Protocol.Runtime.queryObjects({
+ prototypeObjectId: objectId
+ });
+ let {result:{result: properties}} = await session.Protocol.Runtime.getProperties({
+ objectId: objects.objectId,
+ ownProperties: true,
+ generatePreview: true
+ });
+ await session.Protocol.Runtime.getProperties({
+ objectId: properties[0].value.objectId,
+ ownProperties: true,
+ generatePreview: true
+ });
+ InspectorTest.log('Test that queryObjects does not crash for on-heap TypedArrays');
+ session.disconnect();
+ },
+
async function testWithObjectGroup() {
let contextGroup = new InspectorTest.ContextGroup();
let session = contextGroup.connect();
diff --git a/deps/v8/test/inspector/runtime/regress-1075763-expected.txt b/deps/v8/test/inspector/runtime/regress-1075763-expected.txt
new file mode 100644
index 0000000000..30d5a1bbef
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regress-1075763-expected.txt
@@ -0,0 +1,26 @@
+Tests Runtime.evaluate returns object with undefined property.
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : f
+ description : f
+ objectId : <objectId>
+ preview : {
+ description : f
+ overflow : false
+ properties : [
+ [0] : {
+ name : [[GeneratorStatus]]
+ type : string
+ value : suspended
+ }
+ ]
+ subtype : generator
+ type : object
+ }
+ subtype : generator
+ type : object
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/regress-1075763.js b/deps/v8/test/inspector/runtime/regress-1075763.js
new file mode 100644
index 0000000000..5137ed730c
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regress-1075763.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests Runtime.evaluate returns object with undefined property.');
+
+(async function test() {
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "(function* f() { yield f;})()",
+ generatePreview: true
+ }));
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/regress-1078205-expected.txt b/deps/v8/test/inspector/runtime/regress-1078205-expected.txt
new file mode 100644
index 0000000000..0f1754b9d7
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regress-1078205-expected.txt
@@ -0,0 +1,157 @@
+Test malformed sourceURL magic comment.
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 0
+ exception : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 0
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : SyntaxError
+ description : SyntaxError: Unexpected token / in JSON at position 0 at JSON.parse (<anonymous>) at <anonymous>:1:6
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/regress-1078205.js b/deps/v8/test/inspector/runtime/regress-1078205.js
new file mode 100644
index 0000000000..ce17c19051
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regress-1078205.js
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Test malformed sourceURL magic comment.');
+
+(async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "JSON.parse('//')",
+ generatePreview: true,
+ }));
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "JSON.parse('//#')",
+ generatePreview: true,
+ }));
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "JSON.parse('//# ')",
+ generatePreview: true,
+ }));
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "JSON.parse('//# sourceURL')",
+ generatePreview: true,
+ }));
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "JSON.parse('//# sourceURL=')",
+ generatePreview: true,
+ }));
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: "JSON.parse('//# sourceURL=\"')",
+ generatePreview: true,
+ }));
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/regress-986051-expected.txt b/deps/v8/test/inspector/runtime/regress-986051-expected.txt
new file mode 100644
index 0000000000..ad2f3d8209
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regress-986051-expected.txt
@@ -0,0 +1,76 @@
+Regression test for 986051
+Regression test
+{
+ id : <messageId>
+ result : {
+ result : {
+ description : 1
+ type : number
+ value : 1
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : 1
+ exception : {
+ className : ReferenceError
+ description : ReferenceError: $0 is not defined at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : 1
+ scriptId : <scriptId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 0
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ text : Uncaught
+ }
+ result : {
+ className : ReferenceError
+ description : ReferenceError: $0 is not defined at <anonymous>:1:1
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : global
+ description : global
+ objectId : <objectId>
+ type : object
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : undefined
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/regress-986051.js b/deps/v8/test/inspector/runtime/regress-986051.js
new file mode 100644
index 0000000000..7c6842a36c
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regress-986051.js
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {Protocol} = InspectorTest.start(
+ "Regression test for 986051");
+
+Protocol.Runtime.enable();
+(async function() {
+ InspectorTest.log("Regression test");
+ evaluateRepl('1', true);
+ evaluateRepl('$0', false);
+ evaluateRepl('Object.defineProperty(globalThis, "$0", {configurable: false});', true);
+ evaluateRepl('$0', true);
+ evaluateRepl('$0', false);
+ InspectorTest.completeTest();
+})();
+
+async function evaluateRepl(expression, includeCommandLineAPI) {
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression,
+ includeCommandLineAPI,
+ replMode: true,
+ }));
+}
diff --git a/deps/v8/test/inspector/runtime/remote-object-expected.txt b/deps/v8/test/inspector/runtime/remote-object-expected.txt
index 98fe47beed..4763dd18c7 100644
--- a/deps/v8/test/inspector/runtime/remote-object-expected.txt
+++ b/deps/v8/test/inspector/runtime/remote-object-expected.txt
@@ -1567,7 +1567,7 @@ Running test: testPromise
[0] : {
name : [[PromiseStatus]]
type : string
- value : resolved
+ value : fulfilled
}
[1] : {
name : [[PromiseValue]]
diff --git a/deps/v8/test/intl/date-format/check-calendar.js b/deps/v8/test/intl/date-format/check-calendar.js
index 8654e38035..ba0beed904 100644
--- a/deps/v8/test/intl/date-format/check-calendar.js
+++ b/deps/v8/test/intl/date-format/check-calendar.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
let invalidCalendar = [
"invalid",
"abce",
diff --git a/deps/v8/test/intl/date-format/check-numbering-system.js b/deps/v8/test/intl/date-format/check-numbering-system.js
index c17a84fff1..141cc13039 100644
--- a/deps/v8/test/intl/date-format/check-numbering-system.js
+++ b/deps/v8/test/intl/date-format/check-numbering-system.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
let invalidNumberingSystem = [
"invalid",
"abce",
diff --git a/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js b/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js
index 75b4a456d4..a859a29c05 100644
--- a/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js
+++ b/deps/v8/test/intl/date-format/constructor-calendar-numberingSytem-order.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
const actual = [];
const options = {
diff --git a/deps/v8/test/intl/date-format/property-override.js b/deps/v8/test/intl/date-format/property-override.js
index 370f82b0c6..44b8b9d7ab 100644
--- a/deps/v8/test/intl/date-format/property-override.js
+++ b/deps/v8/test/intl/date-format/property-override.js
@@ -71,6 +71,7 @@ var expectedProperties = [
'minute',
'second',
'timeZoneName',
+ 'fractionalSecondDigits',
];
assertEquals(expectedProperties.length, properties.length);
diff --git a/deps/v8/test/intl/date-format/related-year.js b/deps/v8/test/intl/date-format/related-year.js
index a3d9e9dcb4..9460a5705b 100644
--- a/deps/v8/test/intl/date-format/related-year.js
+++ b/deps/v8/test/intl/date-format/related-year.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-other-calendars
// Test it will output relatedYear and yearName
let df = new Intl.DateTimeFormat("zh-u-ca-chinese", {year: "numeric"})
diff --git a/deps/v8/test/intl/displaynames/constructor-order.js b/deps/v8/test/intl/displaynames/constructor-order.js
index 6951b5fddb..919df585bd 100644
--- a/deps/v8/test/intl/displaynames/constructor-order.js
+++ b/deps/v8/test/intl/displaynames/constructor-order.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-displaynames
-
// Throws only once during construction.
// Check for all getters to prevent regression.
// Preserve the order of getter initialization.
diff --git a/deps/v8/test/intl/displaynames/constructor.js b/deps/v8/test/intl/displaynames/constructor.js
index c86fa479fe..2dc72e63ce 100644
--- a/deps/v8/test/intl/displaynames/constructor.js
+++ b/deps/v8/test/intl/displaynames/constructor.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-displaynames
// DisplayNames constructor can't be called as function.
assertThrows(() => Intl.DisplayNames('sr'), TypeError);
diff --git a/deps/v8/test/intl/displaynames/resolved-options.js b/deps/v8/test/intl/displaynames/resolved-options.js
index dd8988375a..32cbc8f199 100644
--- a/deps/v8/test/intl/displaynames/resolved-options.js
+++ b/deps/v8/test/intl/displaynames/resolved-options.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-displaynames
-
let displayNames = new Intl.DisplayNames();
// The default style is 'long'
assertEquals('long', displayNames.resolvedOptions().style);
diff --git a/deps/v8/test/intl/displaynames/supported-locale.js b/deps/v8/test/intl/displaynames/supported-locale.js
index d392bedd11..ac8f058fbe 100644
--- a/deps/v8/test/intl/displaynames/supported-locale.js
+++ b/deps/v8/test/intl/displaynames/supported-locale.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-displaynames
-
assertEquals(typeof Intl.DisplayNames.supportedLocalesOf, "function",
"Intl.DisplayNames.supportedLocalesOf should be a function");
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 6437de45db..d96ffea076 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -33,12 +33,6 @@
# https://code.google.com/p/v8/issues/detail?id=7481
'collator/check-kf-option': [FAIL],
'collator/check-kn-option': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=9312
- 'regress-9312': [FAIL],
-
- # http://crbug/v8/9930
- 'date-format/format_range_hour_cycle': [FAIL],
}], # ALWAYS
['variant == no_wasm_traps', {
@@ -67,8 +61,11 @@
'default_locale': [SKIP],
# Unable to change locale and TZ on Android:
'regress-7770': [SKIP],
- # 'bn' locale is not included in Android.
- 'relative-time-format/resolved-options-nu-extended': [FAIL],
}], # 'system == android'
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/intl/locale/locale-constructor.js b/deps/v8/test/intl/locale/locale-constructor.js
index 95a6f3d24f..5ad4ef92b0 100644
--- a/deps/v8/test/intl/locale/locale-constructor.js
+++ b/deps/v8/test/intl/locale/locale-constructor.js
@@ -19,9 +19,8 @@ assertThrows(() => new Intl.Locale('abcdefghi'), RangeError);
// Options will be force converted into Object.
assertDoesNotThrow(() => new Intl.Locale('sr', 5));
-// ICU problem - locale length is limited.
-// http://bugs.icu-project.org/trac/ticket/13417.
-assertThrows(
+// Regression for http://bugs.icu-project.org/trac/ticket/13417.
+assertDoesNotThrow(
() => new Intl.Locale(
'sr-cyrl-rs-t-ja-u-ca-islamic-cu-rsd-tz-uslax-x-whatever', {
calendar: 'buddhist',
diff --git a/deps/v8/test/intl/number-format/check-numbering-system.js b/deps/v8/test/intl/number-format/check-numbering-system.js
index 78c9a6d8d5..e17b1f427a 100644
--- a/deps/v8/test/intl/number-format/check-numbering-system.js
+++ b/deps/v8/test/intl/number-format/check-numbering-system.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
let invalidNumberingSystem = [
"invalid",
"abce",
diff --git a/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js b/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js
index 8c284967ff..6cc73c2d23 100644
--- a/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js
+++ b/deps/v8/test/intl/number-format/constructor-numberingSytem-order.js
@@ -1,8 +1,6 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// Flags: --harmony-intl-add-calendar-numbering-system
const actual = [];
const options = {
diff --git a/deps/v8/test/intl/number-format/unified/sign-display.js b/deps/v8/test/intl/number-format/unified/sign-display.js
index 18b74c9f0b..21e0173189 100644
--- a/deps/v8/test/intl/number-format/unified/sign-display.js
+++ b/deps/v8/test/intl/number-format/unified/sign-display.js
@@ -13,7 +13,7 @@ const testData = [
["auto", "-123", "-0", "0", "123"],
["always", "-123", "-0", "+0", "+123"],
["never", "123", "0", "0", "123"],
- ["exceptZero", "-123", "-0", "0", "+123"],
+ ["exceptZero", "-123", "0", "0", "+123"],
];
for (const [signDisplay, neg, negZero, zero, pos] of testData) {
diff --git a/deps/v8/test/intl/regress-10437.js b/deps/v8/test/intl/regress-10437.js
new file mode 100644
index 0000000000..1bdd356660
--- /dev/null
+++ b/deps/v8/test/intl/regress-10437.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertDoesNotThrow(() => (new Intl.NumberFormat(
+ 'ar', {style: 'unit', unit: 'acre-per-degree'})).format(0));
+
+assertDoesNotThrow(() => (new Intl.NumberFormat(
+ 'ar', {style: 'unit', unit: 'millimeter-per-mile'})).format(0));
+
+assertDoesNotThrow(() => (new Intl.NumberFormat(
+ 'ar', {style: 'unit', unit: 'centimeter-per-acre'})).format(1));
+
+assertDoesNotThrow(() => (new Intl.NumberFormat(
+ 'ar', {style: 'unit', unit: 'minute-per-yard'})).format(1));
+
+assertDoesNotThrow(() => (new Intl.NumberFormat(
+ 'ar', {style: 'unit', unit: 'foot-per-fluid-ounce'})).format(2));
diff --git a/deps/v8/test/intl/regress-10438.js b/deps/v8/test/intl/regress-10438.js
new file mode 100644
index 0000000000..81419180cf
--- /dev/null
+++ b/deps/v8/test/intl/regress-10438.js
@@ -0,0 +1,51 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_dateformat_fractional_second_digits
+
+assertEquals(
+ 0,
+ (new Intl.DateTimeFormat("en", {fractionalSecondDigits: 0}))
+ .resolvedOptions().fractionalSecondDigits);
+
+assertEquals(
+ 1,
+ (new Intl.DateTimeFormat("en", {fractionalSecondDigits: 1}))
+ .resolvedOptions().fractionalSecondDigits);
+
+assertEquals(
+ 2,
+ (new Intl.DateTimeFormat("en", {fractionalSecondDigits: 2}))
+ .resolvedOptions().fractionalSecondDigits);
+
+assertEquals(
+ 3,
+ (new Intl.DateTimeFormat("en", {fractionalSecondDigits: 3}))
+ .resolvedOptions().fractionalSecondDigits);
+
+// When timeStyle and dateStyle is not present, GetNumberOption will fallback
+// to 0 as default regardless fractionalSecondDigits is present in the option or
+// not.
+assertEquals(
+ 0,
+ (new Intl.DateTimeFormat()).resolvedOptions().fractionalSecondDigits);
+
+assertEquals(
+ 0,
+ (new Intl.DateTimeFormat("en", {fractionalSecondDigits: undefined}))
+ .resolvedOptions().fractionalSecondDigits);
+
+// When timeStyle or dateStyle is present, the code should not read
+// fractionalSecondDigits from the option.
+assertEquals(
+ undefined,
+ (new Intl.DateTimeFormat(
+ "en", {timeStyle: "short", fractionalSecondDigits: 3}))
+ .resolvedOptions().fractionalSecondDigits);
+
+assertEquals(
+ undefined,
+ (new Intl.DateTimeFormat(
+ "en", {dateStyle: "short", fractionalSecondDigits: 3}))
+ .resolvedOptions().fractionalSecondDigits);
diff --git a/deps/v8/test/intl/regress-1074578.js b/deps/v8/test/intl/regress-1074578.js
new file mode 100644
index 0000000000..c8fa56e8a2
--- /dev/null
+++ b/deps/v8/test/intl/regress-1074578.js
@@ -0,0 +1,45 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test the update of tz2020a
+// https://mm.icann.org/pipermail/tz-announce/2020-April/000058.html
+// Morocco springs forward on 2020-05-31, not 2020-05-24.
+// Canada's Yukon advanced to -07 year-round on 2020-03-08.
+// America/Nuuk renamed from America/Godthab.
+// zic now supports expiration dates for leap second lists.
+
+// A. Test Morocco springs forward on 2020-05-31, not 2020-05-24.
+
+const df1 = new Intl.DateTimeFormat(
+ "en",
+ {timeZone: "Africa/Casablanca", timeStyle: "long", dateStyle: "long"})
+const d1 = new Date("2020-05-25T00:00:00.000Z");
+const d2 = new Date("2020-05-31T00:00:00.000Z");
+
+// Before tz2020a change will get "May 25, 2020 at 1:00:00 AM GMT+1"
+assertEquals("May 25, 2020 at 12:00:00 AM GMT", df1.format(d1));
+
+// Before tz2020a change will get "May 31, 2020 at 1:00:00 AM GMT+1"
+assertEquals("May 31, 2020 at 12:00:00 AM GMT", df1.format(d2));
+
+// B. Test Canada's Yukon advanced to -07 year-round on 2020-03-08.
+const df2 = new Intl.DateTimeFormat(
+ "en",
+ {timeZone: "Canada/Yukon", dateStyle: "long", timeStyle: "long"});
+const d3 = new Date("2020-03-09T00:00Z");
+const d4 = new Date("2021-03-09T00:00Z");
+
+// Before tz202a change will get "March 8, 2020 at 5:00:00 PM PDT"
+assertEquals("March 8, 2020 at 5:00:00 PM MST", df2.format(d3));
+
+// Before tz202a change will get "March 8, 2021 at 4:00:00 PM PST"
+assertEquals("March 8, 2021 at 5:00:00 PM MST", df2.format(d4));
+
+// C. Test America/Nuuk renamed from America/Godthab.
+
+// Before tz2020a will throw RangeError.
+const df3 = new Intl.DateTimeFormat("en", {timeZone: "America/Nuuk"});
+
+// Renamed timezone will return the stable name before the rename.
+assertEquals("America/Godthab", df3.resolvedOptions().timeZone);
diff --git a/deps/v8/test/intl/regress-364374.js b/deps/v8/test/intl/regress-364374.js
new file mode 100644
index 0000000000..05e8afc4a3
--- /dev/null
+++ b/deps/v8/test/intl/regress-364374.js
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const special_time_zones = [
+ "America/Argentina/ComodRivadavia",
+ "America/Knox_IN",
+ "Antarctica/McMurdo",
+ "Australia/ACT",
+ "Australia/LHI",
+ "Australia/NSW",
+ "Brazil/DeNoronha",
+ "CET",
+ "CST6CDT",
+ "Chile/EasterIsland",
+ "Etc/UCT",
+ "EET",
+ "EST",
+ "EST5EDT",
+ "GB",
+ "GB-Eire",
+ "GMT+0",
+ "GMT-0",
+ "GMT0",
+ "HST",
+ "MET",
+ "MST",
+ "MST7MDT",
+ "Mexico/BajaNorte",
+ "Mexico/BajaSur",
+ "NZ",
+ "NZ-CHAT",
+ "PRC",
+ "PST8PDT",
+ "ROC",
+ "ROK",
+ "UCT",
+ "US/Alaska",
+ "US/Aleutian",
+ "US/Arizona",
+ "US/Central",
+ "US/East-Indiana",
+ "US/Eastern",
+ "US/Hawaii",
+ "US/Indiana-Starke",
+ "US/Michigan",
+ "US/Mountain",
+ "US/Pacific",
+ "US/Pacific-New",
+ "US/Samoa",
+ "W-SU",
+ "WET",
+];
+
+special_time_zones.forEach(function(timeZone) {
+ assertDoesNotThrow(() => {
+ // Make sure the following wont throw RangeError exception
+ df = new Intl.DateTimeFormat(undefined, {timeZone});
+ });
+})
diff --git a/deps/v8/test/intl/regress-966285.js b/deps/v8/test/intl/regress-966285.js
index 4c5cfa9491..c53fb6d8ac 100644
--- a/deps/v8/test/intl/regress-966285.js
+++ b/deps/v8/test/intl/regress-966285.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
var v = {};
Object.defineProperty(v.__proto__, "calendar",
{ get: function() { return -1; } });
diff --git a/deps/v8/test/intl/regress-9786.js b/deps/v8/test/intl/regress-9786.js
index e922dba5f7..faeb6ad2bf 100644
--- a/deps/v8/test/intl/regress-9786.js
+++ b/deps/v8/test/intl/regress-9786.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
// Well-formed but invalid calendar should not throw RangeError.
var calendar = "abc";
var len = 3;
diff --git a/deps/v8/test/intl/regress-9787.js b/deps/v8/test/intl/regress-9787.js
index 576c0f7d64..24eca1178c 100644
--- a/deps/v8/test/intl/regress-9787.js
+++ b/deps/v8/test/intl/regress-9787.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
// ill-formed and valid calendar should throw RangeError.
assertThrows(
'new Intl.DateTimeFormat("en", {calendar: "gregorian"})',
diff --git a/deps/v8/test/intl/regress-9788.js b/deps/v8/test/intl/regress-9788.js
index 7a3679584f..1a4d44b4a3 100644
--- a/deps/v8/test/intl/regress-9788.js
+++ b/deps/v8/test/intl/regress-9788.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
// Well-formed but invalid numberingSystem should not throw RangeError.
var numberingSystem = "abc";
var len = 3;
diff --git a/deps/v8/test/intl/regress-9887.js b/deps/v8/test/intl/regress-9887.js
index 4ef31f85bc..0d43a1711d 100644
--- a/deps/v8/test/intl/regress-9887.js
+++ b/deps/v8/test/intl/regress-9887.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-intl-add-calendar-numbering-system
-
let intlClasses = [
Intl.RelativeTimeFormat,
Intl.NumberFormat,
diff --git a/deps/v8/test/intl/relative-time-format/resolved-options-nu-extended.js b/deps/v8/test/intl/relative-time-format/resolved-options-nu-extended.js
index dd7979df9e..0d5da805fb 100644
--- a/deps/v8/test/intl/relative-time-format/resolved-options-nu-extended.js
+++ b/deps/v8/test/intl/relative-time-format/resolved-options-nu-extended.js
@@ -6,21 +6,25 @@
// Split from test/intl/relative-time-format/resolved-options-nu.js
// because Android not yet include bn locale data.
-// For locale default the numberingSystem to other than 'latn'
-assertEquals(
- "beng",
- new Intl.RelativeTimeFormat("bn").resolvedOptions().numberingSystem
-);
+// Only test if the browser support 'bn' locale
+if (Intl.RelativeTimeFormat.supportedLocalesOf(["bn"]).length > 0) {
+ // For locale default the numberingSystem to other than 'latn'
+ assertEquals(
+ "beng",
+ new Intl.RelativeTimeFormat("bn").resolvedOptions().numberingSystem
+ );
-// For locale which default others but use -u-nu-latn to change to 'latn' numberingSystem
-assertEquals(
- "latn",
- new Intl.RelativeTimeFormat("bn-u-nu-latn").resolvedOptions()
- .numberingSystem
-);
-// For locale use -u-nu- with invalid value still back to default.
-assertEquals(
- "beng",
- new Intl.RelativeTimeFormat("bn-u-nu-abcd").resolvedOptions()
- .numberingSystem
-);
+ // For locale which default others but use -u-nu-latn to change to 'latn'
+ // numberingSystem
+ assertEquals(
+ "latn",
+ new Intl.RelativeTimeFormat("bn-u-nu-latn").resolvedOptions()
+ .numberingSystem
+ );
+ // For locale use -u-nu- with invalid value still back to default.
+ assertEquals(
+ "beng",
+ new Intl.RelativeTimeFormat("bn-u-nu-abcd").resolvedOptions()
+ .numberingSystem
+ );
+}
diff --git a/deps/v8/test/js-perf-test/JSTests1.json b/deps/v8/test/js-perf-test/JSTests1.json
index d8c8a04684..3f472a41e6 100644
--- a/deps/v8/test/js-perf-test/JSTests1.json
+++ b/deps/v8/test/js-perf-test/JSTests1.json
@@ -11,6 +11,22 @@
"resources": ["base.js"],
"tests": [
{
+ "name": "Operators",
+ "path": ["Operators"],
+ "main": "run.js",
+ "resources": [
+ "run.js",
+ "abstract-equality.js"
+ ],
+ "results_regexp": "^Operators\\-%s\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Equal-SmiNumber"},
+ {"name": "Equal-SmiOddball"},
+ {"name": "Equal-NumberOddball"},
+ {"name": "Equal-OddballOddball"}
+ ]
+ },
+ {
"name": "Collections",
"path": ["Collections"],
"main": "run.js",
diff --git a/deps/v8/test/js-perf-test/Operators/abstract-equality.js b/deps/v8/test/js-perf-test/Operators/abstract-equality.js
new file mode 100644
index 0000000000..da65a60296
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Operators/abstract-equality.js
@@ -0,0 +1,79 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const TEST_ITERATIONS = 100000;
+
+// This dummy ensures that the feedback for benchmark.run() in the Measure
+// function from base.js is not monomorphic, thereby preventing the benchmarks
+// below from being inlined. This ensures consistent behavior and comparable
+// results.
+new BenchmarkSuite('Prevent-Inline-Dummy', [100000], [
+ new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
+]);
+
+new BenchmarkSuite('Equal-SmiNumber', [100000], [
+ new Benchmark('Equal-SmiNumber', true, false, 0, TestEqualSmiNumber, SetUp)
+]);
+
+new BenchmarkSuite('Equal-SmiOddball', [100000], [
+ new Benchmark('Equal-SmiOddball', true, false, 0, TestEqualSmiOddball, SetUp)
+]);
+
+new BenchmarkSuite('Equal-NumberOddball', [100000], [
+ new Benchmark('Equal-NumberOddball', true, false, 0, TestEqualNumberOddball,
+ SetUp)
+]);
+
+new BenchmarkSuite('Equal-OddballOddball', [100000], [
+ new Benchmark('Equal-OddballOddball', true, false, 0, TestEqualOddballOddball,
+ SetUp)
+]);
+
+
+let smis = [];
+let numbers = [];
+let oddballs = [];
+function SetUp() {
+ for(let i = 0; i < TEST_ITERATIONS + 1; ++i) {
+ smis[i] = (i % 2 == 0) ? 42 : -42;
+ numbers[i] = (i % 2 == 0) ? 42.3 : -42.3;
+ oddballs[i] = (i % 2 == 0);
+ }
+}
+
+
+function TestEqualSmiNumber() {
+ let result = false;
+ for(let i = 0; i < TEST_ITERATIONS; ++i) {
+ result = result || (11 == numbers[i]);
+ }
+ return result;
+}
+
+
+function TestEqualSmiOddball() {
+ let result = false;
+ for(let i = 1; i < TEST_ITERATIONS; ++i) {
+ result = result || (smis[i] == false);
+ }
+ return result;
+}
+
+
+function TestEqualNumberOddball() {
+ let result = false;
+ for(let i = 1; i < TEST_ITERATIONS; ++i) {
+ result = result || (numbers[i] == false);
+ }
+ return result;
+}
+
+
+function TestEqualOddballOddball() {
+ let result = false;
+ for(let i = 0; i < TEST_ITERATIONS; ++i) {
+ result = result || (oddballs[i] == oddballs[i+1]);
+ }
+ return result;
+}
diff --git a/deps/v8/test/js-perf-test/Operators/run.js b/deps/v8/test/js-perf-test/Operators/run.js
new file mode 100644
index 0000000000..e70442ac1d
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Operators/run.js
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+load('../base.js');
+load('abstract-equality.js');
+
+var success = true;
+
+
+function PrintResult(name, result) {
+ print(`Operators-${name}(Score): ${result}`);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/message/fail/spread-call-4.js b/deps/v8/test/message/fail/spread-call-4.js
new file mode 100644
index 0000000000..564562b7ad
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call-4.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+x = 1;
+console.log(1, ...x);
diff --git a/deps/v8/test/message/fail/spread-call-4.out b/deps/v8/test/message/fail/spread-call-4.out
new file mode 100644
index 0000000000..66503f6419
--- /dev/null
+++ b/deps/v8/test/message/fail/spread-call-4.out
@@ -0,0 +1,5 @@
+*%(basename)s:5: TypeError: Found non-callable @@iterator
+console.log(1, ...x);
+ ^
+TypeError: Found non-callable @@iterator
+ at *%(basename)s:5:9
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index fd6f282ecc..09a90fb4f1 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -44,8 +44,8 @@
}], # arch != x64 and arch != ia32 and arch != arm64 and arch != arm
['variant == code_serializer', {
- # TODO(yangguo): Code serializer output is incompatible with all message
- # tests.
+ # Code serializer output is incompatible with all message tests
+ # because the same test is executed twice.
'*': [SKIP],
}], # variant == code_serializer
@@ -64,4 +64,9 @@
'asm-*': [SKIP],
}], # lite_mode or variant == jitless
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.js b/deps/v8/test/message/wasm-trace-memory-interpreted.js
deleted file mode 100644
index 401707f581..0000000000
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-stress-opt --expose-wasm --trace-wasm-memory --wasm-interpret-all
-// Flags: --experimental-wasm-simd
-
-load("test/message/wasm-trace-memory.js");
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.out b/deps/v8/test/message/wasm-trace-memory-interpreted.out
deleted file mode 100644
index e04e6964ca..0000000000
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.out
+++ /dev/null
@@ -1,14 +0,0 @@
-interpreter func: 0+0x3 load from 00000004 val: i32:0 / 00000000
-interpreter func: 1+0x3 load from 00000001 val: i8:0 / 00
-interpreter func: 3+0x5 store to 00000004 val: i32:305419896 / 12345678
-interpreter func: 0+0x3 load from 00000002 val: i32:1450704896 / 56780000
-interpreter func: 1+0x3 load from 00000006 val: i8:52 / 34
-interpreter func: 2+0x3 load from 00000002 val: f32:68169720922112.000000 / 56780000
-interpreter func: 4+0x5 store to 00000004 val: i8:171 / ab
-interpreter func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
-interpreter func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
-interpreter func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
-interpreter func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
-interpreter func: 7+0x3 load from 00000004 val: i16:48879 / beef
-interpreter func: 8+0x3 load from 00000002 val: i64:-4688528683866062848 / beef0000beef0000
-interpreter func: 9+0x3 load from 00000002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.js b/deps/v8/test/message/weakref-finalizationregistry-error.js
index 1db6bfeccd..e4c47fed0d 100644
--- a/deps/v8/test/message/weakref-finalizationregistry-error.js
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.js
@@ -8,8 +8,7 @@
// Since cleanup tasks are top-level tasks, errors thrown from them don't stop
// future cleanup tasks from running.
-function callback(iter) {
- [...iter];
+function callback(holdings) {
throw new Error('callback');
};
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.out b/deps/v8/test/message/weakref-finalizationregistry-error.out
index 4682b63f23..01255fd364 100644
--- a/deps/v8/test/message/weakref-finalizationregistry-error.out
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.out
@@ -3,10 +3,12 @@
^
Error: callback
at callback (*%(basename)s:{NUMBER}:{NUMBER})
+ at FinalizationRegistry.cleanupSome (<anonymous>)
*%(basename)s:{NUMBER}: Error: callback
throw new Error('callback');
^
Error: callback
at callback (*%(basename)s:{NUMBER}:{NUMBER})
+ at FinalizationRegistry.cleanupSome (<anonymous>)
diff --git a/deps/v8/test/mjsunit/asm/load-elimination.js b/deps/v8/test/mjsunit/asm/load-elimination.js
index fff722d017..f3d0c1a4e6 100644
--- a/deps/v8/test/mjsunit/asm/load-elimination.js
+++ b/deps/v8/test/mjsunit/asm/load-elimination.js
@@ -23,5 +23,3 @@ var foo = (function(stdlib, foreign, heap) {
assertEquals(0x1234, foo());
assertEquals(0x1234, foo());
-%OptimizeFunctionOnNextCall(foo);
-assertEquals(0x1234, foo());
diff --git a/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js b/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js
index 3945c8d49d..f76dd42d71 100644
--- a/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js
+++ b/deps/v8/test/mjsunit/call-intrinsic-fuzzing.js
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-for-fuzzing
+// Flags: --allow-natives-for-fuzzing --fuzzing
// Test whitelisted/blacklisted intrinsics in the context of fuzzing.
// Blacklisted intrinsics are replaced with undefined.
-assertEquals(undefined, %GetOptimizationStatus(function (){}));
+assertEquals(undefined, %ConstructConsString("a", "b"));
// Blacklisted intrinsics can have wrong arguments.
-assertEquals(undefined, %GetOptimizationStatus(1, 2, 3, 4));
+assertEquals(undefined, %ConstructConsString(1, 2, 3, 4));
// We don't care if an intrinsic actually exists.
assertEquals(undefined, %FooBar());
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js b/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
index 2a7c9643ec..a985ed6f69 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js
@@ -47,6 +47,9 @@
assertFalse(foo(a));
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
+ // Re-prepare the function immediately to make sure type feedback isn't
+ // cleared by untimely gc, as re-optimization on new feedback is tested below
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(a));
assertOptimized(foo);
@@ -55,7 +58,6 @@
assertUnoptimized(foo);
// Make sure TurboFan learns the new feedback
- %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertFalse(foo("a"));
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js b/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
index ad866aa7be..6c3d0ff862 100644
--- a/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
+++ b/deps/v8/test/mjsunit/compiler/abstract-equal-undetectable.js
@@ -91,10 +91,6 @@ const undetectable = %GetUndetectable();
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo(b));
assertFalse(foo(a));
-
- // TurboFan doesn't need to bake in feedback, since it sees the undetectable.
- assertFalse(foo(1));
- assertOptimized(foo);
})();
// Unknown undetectable on one side strict equality with receiver.
@@ -124,3 +120,30 @@ const undetectable = %GetUndetectable();
assertFalse(foo(1));
assertUnoptimized(foo);
})();
+
+// Unknown undetectable on both sides.
+(function() {
+ const a = undetectable;
+
+ function foo(a, b) { return a == b; }
+
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(a, a));
+ assertTrue(foo(a, undefined));
+ assertTrue(foo(undefined, a));
+ assertFalse(foo(a, %GetUndetectable()));
+ assertFalse(foo(%GetUndetectable(), a));
+ assertFalse(foo(%GetUndetectable(), %GetUndetectable()));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(a, a));
+ assertTrue(foo(a, undefined));
+ assertTrue(foo(undefined, a));
+ assertFalse(foo(a, %GetUndetectable()));
+ assertFalse(foo(%GetUndetectable(), a));
+ assertFalse(foo(%GetUndetectable(), %GetUndetectable()));
+ assertOptimized(foo);
+
+ // TurboFan bakes in feedback on the inputs.
+ assertFalse(foo(1));
+ assertUnoptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js b/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
index 1390acc356..2004477932 100644
--- a/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
+++ b/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
@@ -18,12 +18,14 @@ testAdd(3n, 7n);
testAdd(17n, -54n);
%OptimizeFunctionOnNextCall(testAdd);
assertEquals(testAdd(6n, 2n), 8n);
+// Re-prepare the function immediately to make sure type feedback isn't cleared
+// by an untimely gc, as re-optimization on new feedback is tested below
+%PrepareFunctionForOptimization(testAdd);
assertOptimized(testAdd);
assertThrows(() => testAdd(big, big), RangeError);
assertUnoptimized(testAdd);
-%PrepareFunctionForOptimization(testAdd);
testAdd(30n, -50n);
testAdd(23n, 5n);
%OptimizeFunctionOnNextCall(testAdd);
diff --git a/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js b/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
new file mode 100644
index 0000000000..9341f8e8c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --harmony-weak-refs --expose-gc
+
+// Helper to convert setTimeout into an awaitable promise.
+function asyncTimeout(timeout) {
+ return new Promise((resolve, reject)=>{
+ setTimeout(resolve, timeout);
+ })
+}
+
+function Foo() {}
+
+function getX(o) { return o.x; }
+
+(async function() {
+ let o = new Foo();
+ // Transition o:Foo to o:Foo{x}. This transition is important, as the o:Foo
+ // map is the initial map for the Foo constructor, and so is strongly held by
+ // it. We want o to be the only strong holder of its map.
+ o.x = 42;
+ %CompleteInobjectSlackTracking(new Foo());
+
+ // Warm up 'getX' with 'Foo{x}' feedback for its o.x access.
+ %PrepareFunctionForOptimization(getX);
+ assertEquals(getX(o), 42);
+ assertEquals(getX(o), 42);
+
+ // Clear out 'o', which is the only strong holder of the Foo{x} map.
+ let weak_o = new WeakRef(o);
+ o = null;
+
+ // Tick the message loop so that the weak ref can be collected.
+ await asyncTimeout(0);
+
+ // Collect the old 'o', which will also collect the 'Foo{x}' map.
+ gc();
+
+ // Make sure the old 'o' was collected.
+ assertEquals(undefined, weak_o.deref());
+
+ // Optimize the function with the current monomorphic 'Foo{x}' map o.x access,
+ // where the 'Foo{x}' map is dead and therefore the map set is empty. Then,
+ // create a new 'Foo{x}' object and pass that through. This compilation and
+ // o.x access should still succeed despite the dead map.
+ %OptimizeFunctionOnNextCall(getX);
+ o = new Foo();
+ o.x = 42;
+ assertEquals(getX(o), 42);
+
+})();
diff --git a/deps/v8/test/mjsunit/compiler/osr-infinite.js b/deps/v8/test/mjsunit/compiler/osr-infinite.js
index 51b3a3e871..0a5f6576bf 100644
--- a/deps/v8/test/mjsunit/compiler/osr-infinite.js
+++ b/deps/v8/test/mjsunit/compiler/osr-infinite.js
@@ -11,7 +11,6 @@ function thrower() {
if (x == 5) %OptimizeOsr(1);
if (x == 10) throw "terminate";
}
-%PrepareFunctionForOptimization(thrower);
%NeverOptimizeFunction(thrower); // Don't want to inline the thrower.
%NeverOptimizeFunction(test); // Don't want to inline the func into test.
diff --git a/deps/v8/test/mjsunit/compiler/redundancy-elimination.js b/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
index dc01fb4a05..d5b0f6a390 100644
--- a/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
+++ b/deps/v8/test/mjsunit/compiler/redundancy-elimination.js
@@ -155,10 +155,8 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 1));
- // Even passing -0 should not deoptimize and
- // of course still pass the equality test above.
- assertEquals(9, foo([9, 2], -0));
assertOptimized(foo);
+ assertEquals(9, foo([9, 2], -0));
})();
// Test the RedundancyElimination::ReduceSpeculativeNumberComparison()
@@ -176,10 +174,8 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 1));
- // Even passing -0 should not deoptimize and
- // of course still pass the equality test above.
- assertEquals(9, foo([9, 2], -0));
assertOptimized(foo);
+ assertEquals(9, foo([9, 2], -0));
})();
// Test the RedundancyElimination::ReduceSpeculativeNumberComparison()
@@ -197,8 +193,6 @@
%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo([1, 2], 0));
assertEquals(1, foo([1, 2], 1));
- // Even passing -0 should not deoptimize and
- // of course still pass the equality test above.
- assertEquals(9, foo([9, 2], -0));
assertOptimized(foo);
+ assertEquals(9, foo([9, 2], -0));
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1065737.js b/deps/v8/test/mjsunit/compiler/regress-1065737.js
new file mode 100644
index 0000000000..7da5ee7238
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1065737.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ class c {
+ static get [v = 0]() {}
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+assertThrows(foo, ReferenceError);
+assertThrows(foo, ReferenceError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, ReferenceError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1067544.js b/deps/v8/test/mjsunit/compiler/regress-1067544.js
new file mode 100644
index 0000000000..4e41c766b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1067544.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const v = [];
+function foo() {
+ Int8Array.prototype.values.call([v]);
+}
+
+%PrepareFunctionForOptimization(foo);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1068494.js b/deps/v8/test/mjsunit/compiler/regress-1068494.js
new file mode 100644
index 0000000000..e7578e2c8b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1068494.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return { ['bar']: class {} };
+}
+%PrepareFunctionForOptimization(foo);
+assertEquals('bar', foo().bar.name);
+assertEquals('bar', foo().bar.name);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals('bar', foo().bar.name);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1070892.js b/deps/v8/test/mjsunit/compiler/regress-1070892.js
new file mode 100644
index 0000000000..b58dde7470
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1070892.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v = {0: 0, 1: 1, '01': 7};
+function foo(index) {
+ return [v[index], v[index + 1], index + 1];
+};
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(foo(0), [0, 1, 1]);
+assertEquals(foo(0), [0, 1, 1]);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(foo(0), [0, 1, 1]);
+assertEquals(foo('0'), [0, 7, '01']);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1071743.js b/deps/v8/test/mjsunit/compiler/regress-1071743.js
new file mode 100644
index 0000000000..f2d1d07a9d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1071743.js
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+function foo(v) {
+ let x = Math.floor(v);
+ Number.prototype[v] = 42;
+ return x + Math.floor(v);
+}
+
+%PrepareFunctionForOptimization(foo);
+assertSame(foo(-0), -0);
+assertSame(foo(-0), -0);
+%OptimizeFunctionOnNextCall(foo);
+assertSame(foo(-0), -0);
+
+
+function bar(v) {
+ v = v ? (v|0) : -0; // v has now type Integral32OrMinusZero.
+ let x = Math.floor(v);
+ Number.prototype[v] = 42;
+ return x + Math.floor(v);
+}
+
+%PrepareFunctionForOptimization(bar);
+assertSame(2, bar(1));
+assertSame(2, bar(1));
+%OptimizeFunctionOnNextCall(bar);
+assertSame(-0, bar(-0));
diff --git a/deps/v8/test/mjsunit/compiler/regress-1074736.js b/deps/v8/test/mjsunit/compiler/regress-1074736.js
new file mode 100644
index 0000000000..6395026454
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1074736.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var arr = new Uint8Array();
+%ArrayBufferDetach(arr.buffer);
+
+function foo() {
+ return arr[Symbol.iterator]();
+}
+
+%PrepareFunctionForOptimization(foo);
+assertThrows(foo, TypeError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1082704.js b/deps/v8/test/mjsunit/compiler/regress-1082704.js
new file mode 100644
index 0000000000..e93f5a6e91
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1082704.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var array = [[]];
+function foo() {
+ const x = array[0];
+ const y = [][0];
+ return x == y;
+}
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo());
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo());
diff --git a/deps/v8/test/mjsunit/compiler/regress-1084820.js b/deps/v8/test/mjsunit/compiler/regress-1084820.js
new file mode 100644
index 0000000000..beb168b413
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1084820.js
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Create a map where 'my_property' has HeapObject representation.
+const dummy_obj = {};
+dummy_obj.my_property = 'some HeapObject';
+dummy_obj.my_property = 'some other HeapObject';
+
+function gaga() {
+ const obj = {};
+ // Store a HeapNumber and then a Smi.
+ // This must happen in a loop, even if it's only 2 iterations:
+ for (let j = -3_000_000_000; j <= -1_000_000_000; j += 2_000_000_000) {
+ obj.my_property = j;
+ }
+ // Trigger (soft) deopt.
+ if (!%IsBeingInterpreted()) obj + obj;
+}
+
+%PrepareFunctionForOptimization(gaga);
+gaga();
+gaga();
+%OptimizeFunctionOnNextCall(gaga);
+gaga();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1092650.js b/deps/v8/test/mjsunit/compiler/regress-1092650.js
new file mode 100644
index 0000000000..ba94375aeb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1092650.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Create map with HeapNumber in field 'a'
+({a: 2**30});
+
+function foo() {
+ return foo.arguments[0];
+}
+
+function main() {
+ foo({a: 42});
+}
+
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(main);
+main();
+main();
+%OptimizeFunctionOnNextCall(main);
+main();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1094132.js b/deps/v8/test/mjsunit/compiler/regress-1094132.js
new file mode 100644
index 0000000000..418637d86f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1094132.js
@@ -0,0 +1,78 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function prettyPrinted() {}
+
+function formatFailureText() {
+ if (expectedText.length <= 40 && foundText.length <= 40) {
+ message += ": expected <" + expectedText + "> found <" + foundText + ">";
+ message += ":\nexpected:\n" + expectedText + "\nfound:\n" + foundText;
+ }
+}
+
+function fail(expectedText, found, name_opt) {
+ formatFailureText(expectedText, found, name_opt);
+ if (!a[aProps[i]][aProps[i]]) { }
+}
+
+function deepEquals(a, b) {
+ if (a === 0) return 1 / a === 1 / b;
+ if (typeof a !== typeof a) return false;
+ if (typeof a !== "object" && typeof a !== "function") return false;
+ if (objectClass !== classOf()) return false;
+ if (objectClass === "RegExp") { }
+}
+
+function assertEquals() {
+ if (!deepEquals()) {
+ fail(prettyPrinted(), undefined, undefined);
+ }
+}
+
+({y: {}, x: 0.42});
+
+function gaga() {
+ return {gx: bar.arguments[0], hx: baz.arguments[0]};
+}
+
+function baz() {
+ return gaga();
+}
+
+function bar(obj) {
+ return baz(obj.y);
+}
+
+function foo() {
+ bar({y: {}, x: 42});
+ try { assertEquals() } catch (e) {}
+ try { assertEquals() } catch (e) {}
+ assertEquals();
+}
+
+%PrepareFunctionForOptimization(prettyPrinted);
+%PrepareFunctionForOptimization(formatFailureText);
+%PrepareFunctionForOptimization(fail);
+%PrepareFunctionForOptimization(deepEquals);
+%PrepareFunctionForOptimization(assertEquals);
+%PrepareFunctionForOptimization(gaga);
+%PrepareFunctionForOptimization(baz);
+%PrepareFunctionForOptimization(bar);
+%PrepareFunctionForOptimization(foo);
+try { foo() } catch (e) {}
+%OptimizeFunctionOnNextCall(foo);
+try { foo() } catch (e) {}
+%PrepareFunctionForOptimization(prettyPrinted);
+%PrepareFunctionForOptimization(formatFailureText);
+%PrepareFunctionForOptimization(fail);
+%PrepareFunctionForOptimization(deepEquals);
+%PrepareFunctionForOptimization(assertEquals);
+%PrepareFunctionForOptimization(gaga);
+%PrepareFunctionForOptimization(baz);
+%PrepareFunctionForOptimization(bar);
+%PrepareFunctionForOptimization(foo);
+%OptimizeFunctionOnNextCall(foo);
+try { foo() } catch (e) {}
diff --git a/deps/v8/test/mjsunit/const-field-tracking-2.js b/deps/v8/test/mjsunit/const-field-tracking-2.js
new file mode 100644
index 0000000000..b0eb8c749f
--- /dev/null
+++ b/deps/v8/test/mjsunit/const-field-tracking-2.js
@@ -0,0 +1,225 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(gsathya): This test will probably break when v8 tiers up to turbofan
+// from turboprop.
+//
+// Flags: --allow-natives-syntax --opt --no-always-opt --turboprop
+
+var global = this;
+var unique_id = 0;
+// Creates a function with unique SharedFunctionInfo to ensure the feedback
+// vector is unique for each test case.
+function MakeFunctionWithUniqueSFI(...args) {
+ assertTrue(args.length > 0);
+ var body = `/* Unique comment: ${unique_id++} */ ` + args.pop();
+ return new Function(...args, body);
+}
+
+
+//
+// Load constant field from constant object directly.
+//
+function TestLoadFromConstantFieldOfAConstantObject(the_value, other_value) {
+ function A(v) { this.v = v; }
+ function O() { this.a = new A(the_value); }
+ var the_object = new O();
+
+ // Ensure that {the_object.a}'s map is not stable to complicate compiler's
+ // life.
+ new A(the_value).blah = 0;
+
+ // Ensure that constant tracking is enabled for {contant_object}.
+ delete global.constant_object;
+ global.constant_object = the_object;
+ assertEquals(the_object, constant_object);
+
+ assertTrue(%HasFastProperties(the_object));
+
+ // {constant_object} is known to the compiler via global property cell
+ // tracking.
+ var load = MakeFunctionWithUniqueSFI("return constant_object.a.v;");
+ %PrepareFunctionForOptimization(load);
+ load();
+ load();
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(the_value, load());
+ assertOptimized(load);
+ var a = new A(other_value);
+ assertTrue(%HaveSameMap(a, the_object.a));
+ // Make constant field mutable by assigning another value
+ // to some other instance of A.
+ new A(the_value).v = other_value;
+ assertTrue(%HaveSameMap(a, new A(the_value)));
+ assertTrue(%HaveSameMap(a, the_object.a));
+ assertOptimized(load);
+ assertEquals(the_value, load());
+ assertOptimized(load);
+ assertEquals(the_value, load());
+}
+
+//Test constant tracking with Smi value.
+(function() {
+ var the_value = 42;
+ var other_value = 153;
+ TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
+})();
+
+// Test constant tracking with double value.
+(function() {
+ var the_value = 0.9;
+ var other_value = 0.42;
+ TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
+})();
+
+// Test constant tracking with function value.
+(function() {
+ var the_value = function V() {};
+ var other_value = function W() {};
+ TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
+})();
+
+// Test constant tracking with heap object value.
+(function() {
+ function V() {}
+ var the_value = new V();
+ var other_value = new V();
+ TestLoadFromConstantFieldOfAConstantObject(the_value, other_value);
+})();
+
+
+//
+// Load constant field from a prototype.
+//
+function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
+ function Proto() { this.v = the_value; }
+ var the_prototype = new Proto();
+
+ function O() {}
+ O.prototype = the_prototype;
+ var the_object = new O();
+
+ // Ensure O.prototype is in fast mode by loading from its field.
+ function warmup() { return new O().v; }
+ %EnsureFeedbackVectorForFunction(warmup);
+ warmup(); warmup(); warmup();
+ assertTrue(%HasFastProperties(O.prototype));
+
+ // The parameter object is not constant but all the values have the same
+ // map and therefore the compiler knows the prototype object and can
+ // optimize load of "v".
+ var load = MakeFunctionWithUniqueSFI("o", "return o.v;");
+ %PrepareFunctionForOptimization(load);
+ load(new O());
+ load(new O());
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(the_value, load(new O()));
+ assertOptimized(load);
+ // Invalidation of mutability should trigger deoptimization with a
+ // "field-owner" reason.
+ the_prototype.v = other_value;
+ assertUnoptimized(load);
+}
+
+// Test constant tracking with Smi value.
+(function() {
+ var the_value = 42;
+ var other_value = 153;
+ TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
+})();
+
+// Test constant tracking with double value.
+(function() {
+ var the_value = 0.9;
+ var other_value = 0.42;
+ TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
+})();
+
+// Test constant tracking with function value.
+(function() {
+ var the_value = function V() {};
+ var other_value = function W() {};
+ TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
+})();
+
+// Test constant tracking with heap object value.
+(function() {
+ function V() {}
+ var the_value = new V();
+ var other_value = new V();
+ TestLoadFromConstantFieldOfAPrototype(the_value, other_value);
+})();
+
+
+//
+// Store to constant field of a constant object.
+//
+function TestStoreToConstantFieldOfConstantObject(the_value, other_value) {
+ function A(v) { this.v = v; }
+ function O() { this.a = new A(the_value); }
+ var the_object = new O();
+
+ // Ensure that {the_object.a}'s map is not stable to complicate compiler's
+ // life.
+ new A(the_value).blah = 0;
+
+ // Ensure that constant tracking is enabled for {contant_object}.
+ delete global.constant_object;
+ global.constant_object = the_object;
+ assertEquals(the_object, constant_object);
+
+ assertTrue(%HasFastProperties(the_object));
+
+ // {constant_object} is known to the compiler via global property cell
+ // tracking.
+ var store = MakeFunctionWithUniqueSFI("v", "constant_object.a.v = v;");
+ %PrepareFunctionForOptimization(store);
+ store(the_value);
+ store(the_value);
+ %OptimizeFunctionOnNextCall(store);
+ store(the_value);
+ assertEquals(the_value, constant_object.a.v);
+ assertOptimized(store);
+ // Storing of the same value does not deoptimize.
+ store(the_value);
+ assertEquals(the_value, constant_object.a.v);
+ assertOptimized(store);
+
+ var a = new A(other_value);
+
+ assertOptimized(store);
+ // Storing other value deoptimizes because of failed value check.
+ store(other_value);
+ assertOptimized(store);
+ assertEquals(other_value, constant_object.a.v);
+}
+
+// Test constant tracking with Smi values.
+(function() {
+ var the_value = 42;
+ var other_value = 153;
+ TestStoreToConstantFieldOfConstantObject(the_value, other_value);
+})();
+
+// Test constant tracking with double values.
+(function() {
+ var the_value = 0.9;
+ var other_value = 0.42
+ TestStoreToConstantFieldOfConstantObject(the_value, other_value);
+})();
+
+// Test constant tracking with function values.
+(function() {
+ var the_value = function V() {};
+ var other_value = function W() {};
+ TestStoreToConstantFieldOfConstantObject(the_value, other_value);
+})();
+
+// Test constant tracking with heap object values.
+(function() {
+ function V() {}
+ var the_value = new V();
+ var other_value = new V();
+ TestStoreToConstantFieldOfConstantObject(the_value, other_value);
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-copywithin.js b/deps/v8/test/mjsunit/es6/array-copywithin.js
index ed52b7d5fa..140f27b785 100644
--- a/deps/v8/test/mjsunit/es6/array-copywithin.js
+++ b/deps/v8/test/mjsunit/es6/array-copywithin.js
@@ -42,7 +42,7 @@
Array.prototype.copyWithin.call(args, -2, 0);
assertArrayEquals([1, 1, 2], Array.prototype.slice.call(args));
- // [[Class]] does not change
+ // Object.prototype.toString branding does not change
assertArrayEquals("[object Arguments]", Object.prototype.toString.call(args));
})();
diff --git a/deps/v8/test/mjsunit/es6/promise-all-resolve-not-callable.js b/deps/v8/test/mjsunit/es6/promise-all-resolve-not-callable.js
new file mode 100644
index 0000000000..cfaa3a7158
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/promise-all-resolve-not-callable.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+load('test/mjsunit/test-async.js');
+
+// Promise.all should call IteratorClose if Promise.resolve is not callable.
+
+let returnCount = 0;
+let iter = {
+ [Symbol.iterator]() {
+ return {
+ return() {
+ returnCount++;
+ }
+ };
+ }
+};
+
+Promise.resolve = "certainly not callable";
+
+testAsync(assert => {
+ assert.plan(2);
+ Promise.all(iter).then(assert.unreachable, reason => {
+ assert.equals(true, reason instanceof TypeError);
+ assert.equals(1, returnCount);
+ });
+});
diff --git a/deps/v8/test/mjsunit/es6/promise-allsettled-resolve-not-callable.js b/deps/v8/test/mjsunit/es6/promise-allsettled-resolve-not-callable.js
new file mode 100644
index 0000000000..ed5868383d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/promise-allsettled-resolve-not-callable.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+load('test/mjsunit/test-async.js');
+
+// Promise.allSettled should call IteratorClose if Promise.resolve is not callable.
+
+let returnCount = 0;
+let iter = {
+ [Symbol.iterator]() {
+ return {
+ return() {
+ returnCount++;
+ }
+ };
+ }
+};
+
+Promise.resolve = "certainly not callable";
+
+testAsync(assert => {
+ assert.plan(2);
+ Promise.allSettled(iter).then(assert.unreachable, reason => {
+ assert.equals(true, reason instanceof TypeError);
+ assert.equals(1, returnCount);
+ });
+});
diff --git a/deps/v8/test/mjsunit/es6/promise-race-resolve-not-callable.js b/deps/v8/test/mjsunit/es6/promise-race-resolve-not-callable.js
new file mode 100644
index 0000000000..17f696f892
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/promise-race-resolve-not-callable.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+load('test/mjsunit/test-async.js');
+
+// Promise.race should call IteratorClose if Promise.resolve is not callable.
+
+let returnCount = 0;
+let iter = {
+ [Symbol.iterator]() {
+ return {
+ return() {
+ returnCount++;
+ }
+ };
+ }
+};
+
+Promise.resolve = "certainly not callable";
+
+testAsync(assert => {
+ assert.plan(2);
+ Promise.race(iter).then(assert.unreachable, reason => {
+ assert.equals(true, reason instanceof TypeError);
+ assert.equals(1, returnCount);
+ });
+});
diff --git a/deps/v8/test/mjsunit/es6/proxies-constructor.js b/deps/v8/test/mjsunit/es6/proxies-constructor.js
index 93b64ec3a1..206df4713c 100644
--- a/deps/v8/test/mjsunit/es6/proxies-constructor.js
+++ b/deps/v8/test/mjsunit/es6/proxies-constructor.js
@@ -22,14 +22,6 @@
})();
-(function testRevokedTarget() {
- var revocable = Proxy.revocable({}, {});
- revocable.revoke();
-
- assertThrows(function(){ new Proxy(revocable.proxy, {}); }, TypeError);
-})();
-
-
(function testNonObjectHandlerTypes() {
assertThrows(function(){ new Proxy({}, undefined); }, TypeError);
@@ -45,14 +37,6 @@
})();
-(function testRevokedHandler() {
- var revocable = Proxy.revocable({}, {});
- revocable.revoke();
-
- assertThrows(function(){ new Proxy({}, revocable.proxy); }, TypeError);
-})();
-
-
(function testConstructionWithoutArguments() {
assertThrows(function(){ new Proxy(); }, TypeError);
diff --git a/deps/v8/test/mjsunit/es6/proxies-revocable.js b/deps/v8/test/mjsunit/es6/proxies-revocable.js
index 82b853af8c..1f61174a35 100644
--- a/deps/v8/test/mjsunit/es6/proxies-revocable.js
+++ b/deps/v8/test/mjsunit/es6/proxies-revocable.js
@@ -21,9 +21,3 @@ assertEquals(undefined, revoke());
for (var trap of traps) {
assertThrows(() => Reflect[trap](proxy), TypeError);
}
-
-// Throw TypeError if target or handler is revoked proxy
-var revocable = Proxy.revocable({}, {});
-revocable.revoke();
-assertThrows(function(){ Proxy.revocable(revocable.proxy, {}); }, TypeError);
-assertThrows(function(){ Proxy.revocable({}, revocable.proxy); }, TypeError);
diff --git a/deps/v8/test/mjsunit/es6/reflect-construct.js b/deps/v8/test/mjsunit/es6/reflect-construct.js
index 34b6f27373..f69a93b487 100644
--- a/deps/v8/test/mjsunit/es6/reflect-construct.js
+++ b/deps/v8/test/mjsunit/es6/reflect-construct.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-unsafe-function-constructor
+// Flags: --allow-unsafe-function-constructor --harmony-promise-any
(function testReflectConstructArity() {
@@ -311,6 +311,7 @@
var realm2 = Realm.create();
var well_known_intrinsic_constructors = [
+ ["AggregateError", [[]]],
"Array",
"ArrayBuffer",
"Boolean",
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-411237.js b/deps/v8/test/mjsunit/es6/regress/regress-411237.js
deleted file mode 100644
index b2b1a39bf6..0000000000
--- a/deps/v8/test/mjsunit/es6/regress/regress-411237.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-%PrepareFunctionForOptimization(print);
-try {
- %OptimizeFunctionOnNextCall(print);
-} catch(e) { }
-
-try {
- function* f() {
- }
- %PrepareFunctionForOptimization(f);
- %OptimizeFunctionOnNextCall(f);
-} catch(e) { }
diff --git a/deps/v8/test/mjsunit/es6/typedarray-from-optional-arguments.js b/deps/v8/test/mjsunit/es6/typedarray-from-optional-arguments.js
new file mode 100644
index 0000000000..c1375aefe3
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-from-optional-arguments.js
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+
+function assertArrayLikeEquals(value, expected, type) {
+ assertEquals(value.__proto__, type.prototype);
+ assertEquals(expected.length, value.length);
+ for (var i = 0; i < value.length; ++i) {
+ assertEquals(expected[i], value[i]);
+ }
+}
+
+for (var constructor of typedArrayConstructors) {
+ let ta = new constructor([1,2,3]);
+ assertArrayLikeEquals(constructor.from([1,2,3]), ta, constructor);
+ assertArrayLikeEquals(constructor.from([1,2,3], undefined),
+ ta, constructor);
+ assertArrayLikeEquals(constructor.from([1,2,3], undefined, undefined),
+ ta, constructor);
+}
diff --git a/deps/v8/test/mjsunit/harmony/aggregate-error.js b/deps/v8/test/mjsunit/harmony/aggregate-error.js
new file mode 100644
index 0000000000..40ab7508c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/aggregate-error.js
@@ -0,0 +1,213 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-promise-any
+
+(function TestNoParameters() {
+ // Can't omit the "errors" parameter; there's nothing to iterate.
+ assertThrows(() => { new AggregateError(); });
+})();
+
+(function TestNoParameters_NoNew() {
+ // Can't omit the "errors" parameter; there's nothing to iterate.
+ assertThrows(() => { AggregateError(); });
+})();
+
+(function TestOneParameterErrorsIsArray() {
+ let error = new AggregateError([1, 20, 4]);
+ assertEquals('', error.message);
+ assertEquals([1, 20, 4], error.errors);
+})();
+
+(function TestOneParameterErrorsIsArray_NoNew() {
+ let error = AggregateError([1, 20, 4]);
+ assertEquals('', error.message);
+ assertEquals([1, 20, 4], error.errors);
+})();
+
+(function TestOneParameterErrosIsAnEmptyArray() {
+ let error = new AggregateError([]);
+ assertEquals('', error.message);
+ assertEquals([], error.errors);
+})();
+
+(function TestOneParameterErrorsIsASet() {
+ let set = new Set();
+ set.add(5);
+ set.add(100);
+ let error = new AggregateError(set);
+ assertEquals('', error.message);
+ assertEquals(2, error.errors.length);
+ assertTrue(error.errors[0] == 5 || error.errors[1] == 5);
+ assertTrue(error.errors[0] == 100 || error.errors[1] == 100);
+})();
+
+(function TestOneParameterErrorsNotIterable() {
+ assertThrows(() => { new AggregateError(5); });
+})();
+
+(function TestOneParameterErrorsNotIterable_NoNew() {
+ assertThrows(() => { AggregateError(5); });
+})();
+
+(function TestTwoParameters() {
+ let error = new AggregateError([1, 20, 4], 'custom message');
+ assertEquals('custom message', error.message);
+ assertEquals([1, 20, 4], error.errors);
+})();
+
+(function TestTwoParameters_NoNew() {
+ let error = AggregateError([1, 20, 4], 'custom message');
+ assertEquals('custom message', error.message);
+ assertEquals([1, 20, 4], error.errors);
+})();
+
+(function TestTwoParametersMessageNotString() {
+ let custom = { toString() { return 'hello'; } };
+ let error = new AggregateError([], custom);
+ assertEquals('hello', error.message);
+})();
+
+(function TestTwoParametersMessageIsSMI() {
+ let error = new AggregateError([], 44);
+ assertEquals('44', error.message);
+})();
+
+(function TestTwoParametersMessageUndefined() {
+ let error = new AggregateError([], undefined);
+ assertFalse(Object.prototype.hasOwnProperty.call(error, 'message'));
+})();
+
+(function AggregateErrorPrototypeErrorsCalledWithWrongTypeOfObject() {
+ let f = Object.getOwnPropertyDescriptor(AggregateError.prototype, 'errors').get;
+
+ // This works:
+ let error = new AggregateError([3]);
+ let got_errors = f.call(error);
+ assertEquals([3], got_errors);
+
+ // This doesn't:
+ assertThrows(() => {Ā f.call({}) } );
+})();
+
+(function AggregateErrorPrototypeErrorsCalledWithTooManyArguments() {
+ let f = Object.getOwnPropertyDescriptor(AggregateError.prototype, 'errors').get;
+ let error = new AggregateError([3]);
+ let got_errors = f.call(error, ["unnecessary", "arguments"]);
+ assertEquals([3], got_errors);
+})();
+
+(function SetErrorsSloppy() {
+ let e = new AggregateError([1]);
+ e.errors = [4, 5, 6];
+ assertEquals([1], e.errors);
+})();
+
+(function SetErrorsStrict() {
+ "use strict";
+ let e = new AggregateError([1]);
+ assertThrows(() => { e.errors = [4, 5, 6];});
+})();
+
+(function SubClassProto() {
+ class A extends AggregateError {
+ constructor() {
+ super([]);
+ }
+ }
+
+ let o = new A();
+ assertEquals(o.__proto__, A.prototype);
+})();
+
+(function ErrorsWithHoles() {
+ let errors = [0];
+ errors[2]Ā = 2;
+ let a = new AggregateError(errors);
+ assertEquals([0, undefined, 2], a.errors);
+})();
+
+(function ErrorsIsANewArray(){
+ let array = [8, 9];
+ let e = new AggregateError(array);
+ array.push(1);
+ assertEquals([8, 9], e.errors);
+})();
+
+(function ErrorsIsANewArrayForEachGetterCall(){
+ let e = new AggregateError([9, 6, 3]);
+ const errors1 = e.errors;
+ const errors2 = e.errors;
+ assertNotSame(errors1, errors2);
+})();
+
+(function ErrorsModified(){
+ let e = new AggregateError([9, 6, 3]);
+ const errors1 = e.errors;
+ errors1[0] = 50;
+ const errors2 = e.errors;
+ assertEquals([50, 6, 3], errors1);
+ assertEquals([9, 6, 3], errors2);
+})();
+
+(function EmptyErrorsModified1(){
+ let e = new AggregateError([]);
+ const errors1 = e.errors;
+ errors1[0] = 50;
+ const errors2 = e.errors;
+ assertEquals([50], errors1);
+ assertEquals([], errors2);
+})();
+
+(function EmptyErrorsModified2(){
+ let e = new AggregateError([]);
+ const errors1 = e.errors;
+ errors1.push(50);
+ const errors2 = e.errors;
+ assertEquals([50], errors1);
+ assertEquals([], errors2);
+})();
+
+(function AggregateErrorCreation() {
+ // Verify that we match the spec wrt getting the prototype from the
+ // newTarget, iterating the errors array and calling toString on the
+ // message.
+ let counter = 1;
+ let prototype_got = 0;
+ let errors_iterated = 0;
+ let to_string_called = 0;
+
+ // For observing Get(new target, "prototype")
+ function target() {}
+ let handler = {
+ get: (target, prop, receiver) => {
+ if (prop == 'prototype') {
+ prototype_got = counter++;
+ return target.prototype;
+ }
+ }
+ };
+ let p = new Proxy(target, handler);
+
+ // For observing IterableToList(errors)
+ var errors = {
+ [Symbol.iterator]() {
+ return {
+ next() {
+ errors_iterated = counter++;
+ return { done: true };
+ }
+ };
+ }
+ };
+
+ // For observing ToString(message)
+ let message = { toString: () => { to_string_called = counter++;}}
+
+ let o = Reflect.construct(AggregateError, [errors, message], p);
+
+ assertEquals(1, prototype_got);
+ assertEquals(2, to_string_called);
+ assertEquals(3, errors_iterated);
+ })();
diff --git a/deps/v8/test/mjsunit/harmony/logical-assignment.js b/deps/v8/test/mjsunit/harmony/logical-assignment.js
new file mode 100644
index 0000000000..92153547c2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/logical-assignment.js
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-logical-assignment
+
+{
+ let x = null;
+ let y = 0;
+ x ??= y ||= 5;
+
+ assertEquals(x, 5);
+ assertEquals(y, 5);
+}
+
+
+{
+ let x = null;
+ let y = 4;
+ x ??= y ||= 5;
+
+ assertEquals(x, 4);
+ assertEquals(y, 4);
+}
+
+{
+ let x = 1;
+ let y = 0;
+ x &&= y ||= 5;
+
+ assertEquals(x, 5);
+ assertEquals(y, 5);
+}
+
+{
+ let x = 0;
+ let y = 0;
+ x &&= y ||= 5;
+
+ assertEquals(x, 0);
+ assertEquals(y, 0);
+}
diff --git a/deps/v8/test/mjsunit/harmony/optional-chaining.js b/deps/v8/test/mjsunit/harmony/optional-chaining.js
index a6121f5fcb..72b0559e00 100644
--- a/deps/v8/test/mjsunit/harmony/optional-chaining.js
+++ b/deps/v8/test/mjsunit/harmony/optional-chaining.js
@@ -101,7 +101,6 @@ shouldThrowSyntaxError('function foo() { new?.target; }');
shouldThrowSyntaxError('function tag() {} tag?.``;');
shouldThrowSyntaxError('const o = { tag() {} }; o?.tag``;');
-shouldThrowSyntaxError('class A { #foo = "hi"; constructor() { this?.#foo; } }')
const o2 = {
count: 0,
diff --git a/deps/v8/test/mjsunit/harmony/promise-any-overflow-1.js b/deps/v8/test/mjsunit/harmony/promise-any-overflow-1.js
new file mode 100644
index 0000000000..0a95edd05c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/promise-any-overflow-1.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-promise-any
+
+load('test/mjsunit/test-async.js');
+
+// Make sure we properly throw a RangeError when overflowing the maximum
+// number of elements for Promise.any, which is capped at 2^21 bits right
+// now, since we store the indices as identity hash on the resolve element
+// closures.
+const a = new Array(2 ** 21 - 1);
+const p = Promise.resolve(1);
+for (let i = 0; i < a.length; ++i) a[i] = p;
+testAsync(assert => {
+ assert.plan(1);
+ Promise.any(a).then(assert.unreachable, reason => {
+ assert.equals(true, reason instanceof RangeError);
+ });
+});
diff --git a/deps/v8/test/mjsunit/harmony/promise-any-overflow-2.js b/deps/v8/test/mjsunit/harmony/promise-any-overflow-2.js
new file mode 100644
index 0000000000..2ed8453e3a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/promise-any-overflow-2.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-promise-any
+
+load('test/mjsunit/test-async.js');
+
+// Test that pre-allocation of the errors array works even if it needs to be
+// allocated in large object space.
+const a = new Array(64 * 1024);
+a.fill(Promise.reject(1));
+testAsync(assert => {
+ assert.plan(1);
+ Promise.any(a).then(assert.unreachable, b => {
+ assert.equals(a.length, b.errors.length);
+ });
+});
diff --git a/deps/v8/test/mjsunit/harmony/promise-any-resolve-not-callable.js b/deps/v8/test/mjsunit/harmony/promise-any-resolve-not-callable.js
new file mode 100644
index 0000000000..abfd0570e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/promise-any-resolve-not-callable.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-promise-any
+
+load('test/mjsunit/test-async.js');
+
+// Promise.any should call IteratorClose if Promise.resolve is not callable.
+
+let returnCount = 0;
+let iter = {
+ [Symbol.iterator]() {
+ return {
+ return() {
+ returnCount++;
+ }
+ };
+ }
+};
+
+Promise.resolve = "certainly not callable";
+
+testAsync(assert => {
+ assert.plan(2);
+ Promise.any(iter).then(assert.unreachable, reason => {
+ assert.equals(true, reason instanceof TypeError);
+ assert.equals(1, returnCount);
+ });
+});
diff --git a/deps/v8/test/mjsunit/harmony/promise-any.js b/deps/v8/test/mjsunit/harmony/promise-any.js
new file mode 100644
index 0000000000..6ba91de96a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/promise-any.js
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --harmony-promise-any
+
+load('test/mjsunit/test-async.js');
+
+(function() {
+ testAsync(assert => {
+ assert.plan(1);
+ Promise.any([]).then(
+ assert.unreachable,
+ (x) => { assert.equals(0, x.errors.length); }
+ );
+ });
+})();
+
+(function() {
+ const p1 = Promise.resolve(1);
+ const p2 = Promise.resolve(2);
+ const p3 = Promise.resolve(3);
+ testAsync(assert => {
+ assert.plan(1);
+ Promise.any([p1, p2, p3]).then(
+ (x) => { assert.equals(1, x); },
+ assert.unreachable);
+ });
+})();
+
+(function() {
+ let outsideResolve;
+ let outsideReject;
+ let p1 = new Promise(() => {});
+ let p2 = new Promise(function(resolve, reject) {
+ outsideResolve = resolve;
+ outsideReject = reject;
+ });
+ let p3 = new Promise(() => {});
+ testAsync(assert => {
+ assert.plan(1);
+ Promise.any([p1, p2, p3]).then(
+ (x) => { assert.equals(2, x); },
+ assert.unreachable
+ );
+ outsideResolve(2);
+ });
+})();
+
+(function() {
+ const p1 = Promise.reject(1);
+ const p2 = Promise.resolve(2);
+ const p3 = Promise.resolve(3);
+ testAsync(assert => {
+ assert.plan(1);
+ Promise.any([p1, p2, p3]).then(
+ (x) => { assert.equals(2, x); },
+ assert.unreachable);
+ });
+})();
+
+(function() {
+ const p1 = Promise.reject(1);
+ const p2 = Promise.reject(2);
+ const p3 = Promise.reject(3);
+ testAsync(assert => {
+ assert.plan(4);
+ Promise.any([p1, p2, p3]).then(
+ assert.unreachable,
+ (x) => {
+ assert.equals(3, x.errors.length);
+ assert.equals(1, x.errors[0]);
+ assert.equals(2, x.errors[1]);
+ assert.equals(3, x.errors[2]);
+ }
+ );
+ });
+})();
+
+(function() {
+ testAsync(assert => {
+ assert.plan(1);
+ (async function() {
+ const p1 = Promise.reject(1);
+ const p2 = Promise.reject(2);
+ const p3 = Promise.reject(3);
+ try {
+ await Promise.any([p1, p2, p3]);
+ } catch (error) {
+ assert.equals(1, 1);
+ }
+ })();
+ });
+})();
diff --git a/deps/v8/test/mjsunit/harmony/string-matchAll.js b/deps/v8/test/mjsunit/harmony/string-matchAll.js
index e9b39ba46c..e3b1d5c224 100644
--- a/deps/v8/test/mjsunit/harmony/string-matchAll.js
+++ b/deps/v8/test/mjsunit/harmony/string-matchAll.js
@@ -36,7 +36,7 @@ TestNoMatch('a', 'b');
function TestGlobalRegex(regex_or_string) {
- const iter = 'ab'.matchAll(/./g);
+ const iter = 'ab'.matchAll(regex_or_string);
let next_result = iter.next();
assertEquals(['a'], next_result.value);
assertFalse(next_result.done);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics-cleanupsome.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics-cleanupsome.js
new file mode 100644
index 0000000000..3a3a54ef34
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics-cleanupsome.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs-with-cleanup-some
+
+(function TestCleanupSomeWithoutFinalizationRegistry() {
+ assertThrows(() => FinalizationRegistry.prototype.cleanupSome.call({}), TypeError);
+ // Does not throw:
+ let fg = new FinalizationRegistry(() => {});
+ let rv = FinalizationRegistry.prototype.cleanupSome.call(fg);
+ assertEquals(undefined, rv);
+})();
+
+(function TestCleanupSomeWithNonCallableCallback() {
+ let fg = new FinalizationRegistry(() => {});
+ assertThrows(() => fg.cleanupSome(1), TypeError);
+ assertThrows(() => fg.cleanupSome(1n), TypeError);
+ assertThrows(() => fg.cleanupSome(Symbol()), TypeError);
+ assertThrows(() => fg.cleanupSome({}), TypeError);
+ assertThrows(() => fg.cleanupSome('foo'), TypeError);
+ assertThrows(() => fg.cleanupSome(true), TypeError);
+ assertThrows(() => fg.cleanupSome(false), TypeError);
+ assertThrows(() => fg.cleanupSome(null), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
index 9a1a99efe4..547a688c2a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -148,23 +148,3 @@
let proxy = new Proxy(obj, handler);
let wr = new WeakRef(proxy);
})();
-
-(function TestCleanupSomeWithoutFinalizationRegistry() {
- assertThrows(() => FinalizationRegistry.prototype.cleanupSome.call({}), TypeError);
- // Does not throw:
- let fg = new FinalizationRegistry(() => {});
- let rv = FinalizationRegistry.prototype.cleanupSome.call(fg);
- assertEquals(undefined, rv);
-})();
-
-(function TestCleanupSomeWithNonCallableCallback() {
- let fg = new FinalizationRegistry(() => {});
- assertThrows(() => fg.cleanupSome(1), TypeError);
- assertThrows(() => fg.cleanupSome(1n), TypeError);
- assertThrows(() => fg.cleanupSome(Symbol()), TypeError);
- assertThrows(() => fg.cleanupSome({}), TypeError);
- assertThrows(() => fg.cleanupSome('foo'), TypeError);
- assertThrows(() => fg.cleanupSome(true), TypeError);
- assertThrows(() => fg.cleanupSome(false), TypeError);
- assertThrows(() => fg.cleanupSome(null), TypeError);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
deleted file mode 100644
index 3f5133a87c..0000000000
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-doesnt-iterate-all-holdings.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-
-let cleanup_call_count = 0;
-let cleanup = function(iter) {
- print("in cleanup");
- if (cleanup_call_count == 0) {
- // First call: iterate 2 of the 3 holdings
- let holdings_list = [];
- for (holdings of iter) {
- holdings_list.push(holdings);
- // Don't iterate the rest of the holdings
- if (holdings_list.length == 2) {
- break;
- }
- }
- assertEquals(holdings_list.length, 2);
- assertTrue(holdings_list[0] < 3);
- assertTrue(holdings_list[1] < 3);
- // Update call count only after the asserts; this ensures that the test
- // fails even if the exceptions inside the cleanup function are swallowed.
- cleanup_call_count++;
- } else {
- // Second call: iterate one leftover holdings and one holdings.
- assertEquals(1, cleanup_call_count);
- let holdings_list = [];
- for (holdings of iter) {
- holdings_list.push(holdings);
- }
- assertEquals(holdings_list.length, 2);
- assertTrue((holdings_list[0] < 3 && holdings_list[1] == 100) ||
- (holdings_list[1] < 3 && holdings_list[0] == 100));
- // Update call count only after the asserts; this ensures that the test
- // fails even if the exceptions inside the cleanup function are swallowed.
- cleanup_call_count++;
- }
-}
-
-let fg = new FinalizationRegistry(cleanup);
-// Create 3 objects and register them in the FinalizationRegistry. The objects need
-// to be inside a closure so that we can reliably kill them!
-
-(function() {
- let objects = [];
-
- for (let i = 0; i < 3; ++i) {
- objects[i] = {a: i};
- fg.register(objects[i], i);
- }
-
- gc();
- assertEquals(0, cleanup_call_count);
-
- // Drop the references to the objects.
- objects = [];
-})();
-
-// This GC will reclaim the targets.
-gc();
-assertEquals(0, cleanup_call_count);
-
-let timeout_func_1 = function() {
- assertEquals(1, cleanup_call_count);
-
- // Assert that the cleanup function won't be called unless new targets appear.
- setTimeout(timeout_func_2, 0);
-}
-
-setTimeout(timeout_func_1, 0);
-
-let timeout_func_2 = function() {
- assertEquals(1, cleanup_call_count);
-
- // Create a new object and register it.
- (function() {
- let obj = {};
- let wc = fg.register(obj, 100);
- obj = null;
- })();
-
- // This GC will reclaim the targets.
- gc();
- assertEquals(1, cleanup_call_count);
-
- setTimeout(timeout_func_3, 0);
-}
-
-let timeout_func_3 = function() {
- assertEquals(2, cleanup_call_count);
-}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
index 12282f7486..8b43618c71 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
@@ -19,9 +19,8 @@ function scheduleMicrotask(func) {
let log = [];
-let cleanup = (iter) => {
+let cleanup = (holdings) => {
cleanedUp = true;
- for (holdings of iter) { }
}
let fg = new FinalizationRegistry(cleanup);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
index c591b44a54..ef60d3f150 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/iterating-in-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
@@ -4,21 +4,11 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-let cleanup_called = false;
-let cleanup = function(iter) {
- assertFalse(cleanup_called);
- let holdings_list = [];
- for (holdings of iter) {
- holdings_list.push(holdings);
- }
- assertEquals(holdings_list.length, 2);
- if (holdings_list[0] == 1) {
- assertEquals(holdings_list[1], 2);
- } else {
- assertEquals(holdings_list[0], 2);
- assertEquals(holdings_list[1], 1);
- }
- cleanup_called = true;
+let cleanup_called = 0;
+let holdings_list = [];
+let cleanup = function(holdings) {
+ holdings_list.push(holdings);
+ cleanup_called++;
}
let fg = new FinalizationRegistry(cleanup);
@@ -34,7 +24,7 @@ let o2 = {};
})();
gc();
-assertFalse(cleanup_called);
+assertEquals(cleanup_called, 0);
// Drop the last references to o1 and o2.
(function() {
@@ -45,10 +35,17 @@ assertFalse(cleanup_called);
// GC will reclaim the target objects; the cleanup function will be called the
// next time we enter the event loop.
gc();
-assertFalse(cleanup_called);
+assertEquals(cleanup_called, 0);
let timeout_func = function() {
- assertTrue(cleanup_called);
+ assertEquals(cleanup_called, 2);
+ assertEquals(holdings_list.length, 2);
+ if (holdings_list[0] == 1) {
+ assertEquals(holdings_list[1], 2);
+ } else {
+ assertEquals(holdings_list[0], 2);
+ assertEquals(holdings_list[1], 1);
+ }
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
index 8e40fd6bbd..f7fe196c78 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking --allow-natives-syntax
let cleanup_count = 0;
let cleanup_holdings = [];
-let cleanup = function(iter) {
- for (holdings of iter) {
- cleanup_holdings.push(holdings);
- }
+let cleanup = function(holdings) {
+ cleanup_holdings.push(holdings);
++cleanup_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
index 697c926a82..cdced3fca5 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking
let cleanup_count = 0;
let cleanup_holdings = [];
-let cleanup = function(iter) {
- for (holdings of iter) {
- cleanup_holdings.push(holdings);
- }
+let cleanup = function(holdings) {
+ cleanup_holdings.push(holdings);
++cleanup_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
new file mode 100644
index 0000000000..7476f2bd4e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+// FinalizationRegistry#cleanupSome is normative optional and has its own
+// flag. Test that it's not present with only --harmony-weak-refs.
+
+assertEquals(undefined, Object.getOwnPropertyDescriptor(
+ FinalizationRegistry.prototype, "cleanupSome"));
+assertEquals(undefined, FinalizationRegistry.prototype.cleanupSome);
+assertFalse('cleanupSome' in FinalizationRegistry.prototype);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
index f757bdbf7e..480ec4e2ad 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
@@ -2,18 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking --allow-natives-syntax
let cleanup_count = 0;
let cleanup_holdings = [];
-let cleanup = function(iter) {
+let cleanup = function(holdings) {
%AbortJS("shouldn't be called");
}
-let cleanup2 = function(iter) {
- for (holdings of iter) {
- cleanup_holdings.push(holdings);
- }
+let cleanup2 = function(holdings) {
+ cleanup_holdings.push(holdings);
++cleanup_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js b/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
new file mode 100644
index 0000000000..6007f9c360
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+// A newly created WeakRef is kept alive until the end of the next microtask
+// checkpoint. V8 asserts that the kept objects list is cleared at the end of
+// microtask checkpoints when the microtask policy is auto. Test that d8, which
+// uses the auto policy, upholds the assert when manually quitting.
+let obj = {};
+let wr = new WeakRef(obj);
+testRunner.quit();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref
index 144b56fce7..274e714994 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref
@@ -5,12 +5,10 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
-let cleanup = function(iter) {
+let cleanup = function(holdings) {
assertFalse(cleanup_called);
let holdings_list = [];
- for (holdings of iter) {
- holdings_list.push(holdings);
- }
+ holdings_list.push(holdings);
assertEquals(1, holdings_list.length);
assertEquals("holdings", holdings_list[0]);
cleanup_called = true;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
index 732740f293..72d2cae83e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
@@ -5,8 +5,7 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
-function cleanup(iter) {
- [...iter];
+function cleanup(holdings) {
cleanup_called = true;
};
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
index d8c00fcff8..f63d17ed7f 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
@@ -5,14 +5,10 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
-let cleanup = function(iter) {
+let holdings_list = [];
+let cleanup = function(holdings) {
assertFalse(cleanup_called);
- let holdings_list = [];
- for (holdings of iter) {
- holdings_list.push(holdings);
- }
- assertEquals(holdings_list.length, 1);
- assertEquals(holdings_list[0].a, "this is the holdings object");
+ holdings_list.push(holdings);
cleanup_called = true;
}
@@ -40,6 +36,8 @@ assertFalse(cleanup_called);
let timeout_func = function() {
assertTrue(cleanup_called);
+ assertEquals(holdings_list.length, 1);
+ assertEquals(holdings_list[0].a, "this is the holdings object");
}
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
index de25bbe425..3b3f3412a2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
@@ -11,17 +11,13 @@ let cleanup0_holdings_count = 0;
let cleanup1_call_count = 0;
let cleanup1_holdings_count = 0;
-let cleanup0 = function(iter) {
- for (holdings of iter) {
- ++cleanup0_holdings_count;
- }
+let cleanup0 = function(holdings) {
+ ++cleanup0_holdings_count;
++cleanup0_call_count;
}
-let cleanup1 = function(iter) {
- for (holdings of iter) {
- ++cleanup1_holdings_count;
- }
+let cleanup1 = function(holdings) {
+ ++cleanup1_holdings_count;
++cleanup1_call_count;
}
@@ -61,12 +57,11 @@ gc();
gc();
let timeout_func = function() {
- assertEquals(1, cleanup0_call_count);
+ assertEquals(2, cleanup0_call_count);
assertEquals(2, cleanup0_holdings_count);
- assertEquals(1, cleanup1_call_count);
+ assertEquals(2, cleanup1_call_count);
assertEquals(2, cleanup1_holdings_count);
}
-// Give the cleanup task a chance to run. All holdings will be iterated during
-// the same invocation of the cleanup function.
+// Give the cleanup task a chance to run.
setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
index fec0ab5b57..21b9ff709b 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
@@ -6,10 +6,8 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- ++cleanup_holdings_count;
- }
+let cleanup = function(holdings) {
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
@@ -33,7 +31,7 @@ let fg2 = new FinalizationRegistry(cleanup);
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the holdings.
+// Assert that the cleanup function was called.
let timeout_func = function() {
assertEquals(2, cleanup_call_count);
assertEquals(2, cleanup_holdings_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
index 09854f0556..235a34a592 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
@@ -5,9 +5,7 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let call_count = 0;
-let reentrant_gc =
- function(iter) {
- [...iter];
+let reentrant_gc = function(holdings) {
gc();
call_count++;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js b/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js
new file mode 100644
index 0000000000..f9bcc2b77d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/stress-finalizationregistry-dirty-enqueue.js
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stress-compaction --expose-gc
+
+// Test that the dirty FinalizationRegistries that are enqueued during GC have
+// their slots correctly recorded by the GC.
+
+// 1) Create many JSFinalizationRegistry objects so that they span several pages
+// (page size is 256kb).
+let registries = [];
+for (let i = 0; i < 1024 * 8; i++) {
+ registries.push(new FinalizationRegistry(() => {}));
+}
+
+// 2) Force two GCs to ensure that JSFinalizatonRegistry objects are tenured.
+gc();
+gc();
+
+// 3) In a function: create a dummy target and register it in all
+// JSFinalizatonRegistry objects.
+(function() {
+ let garbage = {};
+ registries.forEach((fr) => {
+ fr.register(garbage, 42);
+ });
+ garbage = null;
+})();
+
+// 4) Outside the function where the target is unreachable: force GC to collect
+// the object.
+gc();
+
+// 5) Force another GC to test that the slot was correctly updated.
+gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
index 7c09cf5985..56d9b562a1 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
@@ -6,11 +6,9 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- assertEquals(holdings, undefined);
- ++cleanup_holdings_count;
- }
+let cleanup = function(holdings) {
+ assertEquals(holdings, undefined);
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
@@ -30,7 +28,7 @@ let fg = new FinalizationRegistry(cleanup);
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the holdings.
+// Assert that the cleanup function was called.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
assertEquals(1, cleanup_holdings_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index e8a7843c71..400385d193 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -6,11 +6,9 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- assertEquals("holdings", holdings);
- ++cleanup_holdings_count;
- }
+let cleanup = function(holdings) {
+ assertEquals("holdings", holdings);
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
@@ -30,12 +28,12 @@ let key = {"k": "this is the key"};
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the holdings.
+// Assert that the cleanup function was called.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
assertEquals(1, cleanup_holdings_count);
- // Unregister an already iterated over weak reference.
+ // Unregister an already cleaned-up weak reference.
let success = fg.unregister(key);
assertFalse(success);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index ff576b4dfe..efa4df5217 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -5,7 +5,7 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --noincremental-marking
let cleanup_call_count = 0;
-let cleanup = function(iter) {
+let cleanup = function(holdings) {
++cleanup_call_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
index e7604eecec..ff48758c07 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -5,7 +5,7 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup = function(iter) {
+let cleanup = function(holdings) {
++cleanup_call_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
index 30926d1d56..e607a1ead5 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
@@ -6,18 +6,12 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- assertEquals(holdings, "holdings");
-
- // There's one more object with the same key that we haven't
- // iterated over yet so we should be able to unregister the
- // callback for that one.
- let success = fg.unregister(key);
- assertTrue(success);
-
- ++cleanup_holdings_count;
- }
+let cleanup = function(holdings) {
+ assertEquals(holdings, "holdings");
+ let success = fg.unregister(key);
+ assertFalse(success);
+
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
@@ -28,9 +22,7 @@ let key = {"k": "this is the key"};
(function() {
let object = {};
- let object2 = {};
fg.register(object, "holdings", key);
- fg.register(object2, "holdings", key);
// object goes out of scope.
})();
@@ -39,7 +31,7 @@ let key = {"k": "this is the key"};
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the WeakCell.
+// Assert that the cleanup function was called.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
assertEquals(1, cleanup_holdings_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
deleted file mode 100644
index a62e6ed923..0000000000
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- // Unregister before we've iterated through the holdings.
- let success = fg.unregister(key);
- assertTrue(success);
-
- for (wc of iter) {
- ++cleanup_holdings_count;
- }
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key = {"k": "the key"};
-// Create an object and register it in the FinalizationRegistry. The object needs
-// to be inside a closure so that we can reliably kill them!
-
-(function() {
- let object = {};
- fg.register(object, "holdings", key);
-
- // object goes out of scope.
-})();
-
-// This GC will discover unretained targets and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
-
-// Assert that the cleanup function was called, but didn't iterate any holdings.
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(0, cleanup_holdings_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index e26d9a1921..e04b9f1485 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -6,34 +6,40 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- assertEquals(holdings, "holdings");
- let success = fg.unregister(key);
- assertFalse(success);
-
- ++cleanup_holdings_count;
+let cleanup = function(holdings) {
+ // See which target we're cleaning up and unregister the other one.
+ if (holdings == 1) {
+ let success = fg.unregister(key2);
+ assertTrue(success);
+ } else {
+ assertSame(holdings, 2);
+ let success = fg.unregister(key1);
+ assertTrue(success);
}
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
let fg = new FinalizationRegistry(cleanup);
-// Create an object and register it in the FinalizationRegistry. The object needs to be inside
-// a closure so that we can reliably kill them!
-let key = {"k": "this is the key"};
+let key1 = {"k": "first key"};
+let key2 = {"k": "second key"};
+// Create two objects and register them in the FinalizationRegistry. The objects
+// need to be inside a closure so that we can reliably kill them!
(function() {
- let object = {};
- fg.register(object, "holdings", key);
+ let object1 = {};
+ fg.register(object1, 1, key1);
+ let object2 = {};
+ fg.register(object2, 2, key2);
- // object goes out of scope.
+ // object1 and object2 go out of scope.
})();
-// This GC will discover dirty WeakCells and schedule cleanup.
+// This GC will reclaim target objects and schedule cleanup.
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the WeakCell.
+// Assert that the cleanup function was called and cleaned up one holdings (but not the other one).
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
assertEquals(1, cleanup_holdings_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index 8f28673205..e11fd3b8e9 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -6,35 +6,39 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- assertEquals(holdings, "holdings");
- ++cleanup_holdings_count;
- }
- // Unregister an already iterated over weak reference.
+let cleanup = function(holdings) {
+ assertEquals(holdings, "holdings");
+
+ // There's one more object with the same key that we haven't
+ // cleaned up yet so we should be able to unregister the
+ // callback for that one.
let success = fg.unregister(key);
- assertFalse(success);
+
+ assertTrue(success);
+
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
let fg = new FinalizationRegistry(cleanup);
-let key = {"k": "this is the key"};
-
// Create an object and register it in the FinalizationRegistry. The object needs to be inside
// a closure so that we can reliably kill them!
+let key = {"k": "this is the key"};
(function() {
let object = {};
+ let object2 = {};
fg.register(object, "holdings", key);
+ fg.register(object2, "holdings", key);
// object goes out of scope.
})();
-// This GC will reclaim the target object and schedule cleanup.
+// This GC will discover dirty WeakCells and schedule cleanup.
gc();
assertEquals(0, cleanup_call_count);
-// Assert that the cleanup function was called and iterated the holdings.
+// Assert that the cleanup function was called.
let timeout_func = function() {
assertEquals(1, cleanup_call_count);
assertEquals(1, cleanup_holdings_count);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
deleted file mode 100644
index a7ab9d18df..0000000000
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
-
-let cleanup_call_count = 0;
-let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- // See which target we're iterating over and unregister the other one.
- if (holdings == 1) {
- let success = fg.unregister(key2);
- assertTrue(success);
- } else {
- assertSame(holdings, 2);
- let success = fg.unregister(key1);
- assertTrue(success);
- }
- ++cleanup_holdings_count;
- }
- ++cleanup_call_count;
-}
-
-let fg = new FinalizationRegistry(cleanup);
-let key1 = {"k": "first key"};
-let key2 = {"k": "second key"};
-// Create two objects and register them in the FinalizationRegistry. The objects
-// need to be inside a closure so that we can reliably kill them!
-
-(function() {
- let object1 = {};
- fg.register(object1, 1, key1);
- let object2 = {};
- fg.register(object2, 2, key2);
-
- // object1 and object2 go out of scope.
-})();
-
-// This GC will reclaim target objects and schedule cleanup.
-gc();
-assertEquals(0, cleanup_call_count);
-
-// Assert that the cleanup function was called and iterated one holdings (but not the other one).
-let timeout_func = function() {
- assertEquals(1, cleanup_call_count);
- assertEquals(1, cleanup_holdings_count);
-}
-
-setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
index 3512fc9217..772078e107 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -6,11 +6,9 @@
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
-let cleanup = function(iter) {
- for (holdings of iter) {
- assertEquals("holdings2", holdings);
- ++cleanup_holdings_count;
- }
+let cleanup = function(holdings) {
+ assertEquals("holdings2", holdings);
+ ++cleanup_holdings_count;
++cleanup_call_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 5117997965..3b3e488a82 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -5,7 +5,7 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
-let cleanup = function(iter) {
+let cleanup = function(holdings) {
++cleanup_call_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
index f3480f78d1..ee4b5ecb90 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -5,13 +5,9 @@
// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
-let cleanup = function(iter) {
+let cleanup = function(holdings_arg) {
assertFalse(cleanup_called);
- let result = iter.next();
- assertEquals(result.value, holdings);
- assertFalse(result.done);
- result = iter.next();
- assertTrue(result.done);
+ assertEquals(holdings_arg, holdings);
cleanup_called = true;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
index 94f5ce6a90..6572faee21 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-unregistertoken.js
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs-with-cleanup-some --expose-gc --noincremental-marking
-var FG = new FinalizationRegistry (function (iter) { globalThis.FRRan = true; });
+var FR = new FinalizationRegistry (function (holdings) { globalThis.FRRan = true; });
{
let obj = {};
// obj is its own unregister token and becomes unreachable after this
// block. If the unregister token is held strongly this test will not
// terminate.
- FG.register(obj, 42, obj);
+ FR.register(obj, 42, obj);
}
function tryAgain() {
gc();
- if (globalThis.FRRan || FG.cleanupSome()) {
+ if (globalThis.FRRan || FR.cleanupSome()) {
return;
}
setTimeout(tryAgain, 0);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 58dcd6c9ed..0aeb3862ca 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -295,7 +295,7 @@ var prettyPrinted;
default:
return objectClass + "(" + String(value) + ")";
}
- // [[Class]] is "Object".
+ // classOf() returned "Object".
var name = value.constructor.name;
if (name) return name + "()";
return "Object()";
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 989c097908..42f0b970d3 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -65,6 +65,14 @@
# BUG(v8:10197)
'regress/regress-748069': [SKIP],
+ # https://crbug.com/1043058
+ # Enable once serializing a running isolate is fully implemented.
+ 'serialize-deserialize-now': [SKIP],
+
+ # https://crbug.com/v8/issues/10486
+ # Enable once multi-byte prefixed opcodes are correctly handled
+ 'regress/wasm/regress-1065599': [SKIP],
+
##############################################################################
# Tests where variants make no sense.
'd8/enable-tracing': [PASS, NO_VARIANTS],
@@ -86,6 +94,7 @@
# Long-running tests.
# We really should find better solutions for these.
'es6/promise-all-overflow-1': [SKIP],
+ 'harmony/promise-any-overflow-1': [SKIP],
'migrations': [SKIP],
'regress/regress-2073': [SKIP],
@@ -113,6 +122,7 @@
'asm/sqlite3/*': [PASS, SLOW, NO_VARIANTS],
'compiler/regress-9017': [PASS, SLOW],
'es6/promise-all-overflow-2': [PASS, SLOW, ['arch != x64', SKIP]],
+ 'harmony/promise-any-overflow-2': [PASS, SLOW, ['arch != x64', SKIP]],
'copy-on-write-assert': [PASS, SLOW],
'es6/typedarray-construct-offset-not-smi': [PASS, SLOW],
'harmony/futex': [PASS, SLOW],
@@ -237,6 +247,7 @@
'es6/block-let-crankshaft': [SKIP],
'opt-elements-kind': [SKIP],
'osr-elements-kind': [SKIP],
+ 'compiler/number-divide': [SKIP],
'regress/regress-crbug-137689': [SKIP],
'regress/regress-trap-allocation-memento': [SKIP],
'regress/regress-2249': [SKIP],
@@ -380,6 +391,7 @@
# Flag --interpreted-frames-native-stack incompatible with jitless
'regress/regress-10138': [SKIP],
+ 'regress/regress-1078913': [SKIP],
}], # 'lite_mode or variant == jitless'
##############################################################################
@@ -822,10 +834,6 @@
'wasm/module-memory': [SKIP],
'wasm/shared-memory-gc-stress': [SKIP],
- # Redirection to the interpreter is non-deterministic with multiple isolates.
- 'wasm/interpreter-mixed': [SKIP],
- 'wasm/worker-interpreter': [SKIP],
-
# The {FreezeWasmLazyCompilation} runtime function sets a flag in the native
# module, which causes a data-race if the native module is shared between
# isolates.
@@ -937,16 +945,22 @@
# BUG(v8:7166).
'd8/enable-tracing': [SKIP],
- # Relies on async compilation which requires background tasks.
- 'wasm/streaming-error-position': [SKIP],
+
+ # Rely on (blocking) concurrent compilation.
+ 'compiler/concurrent-invalidate-transition-map': [SKIP],
+ 'compiler/concurrent-proto-change': [SKIP],
+ 'compiler/manual-concurrent-recompile': [SKIP],
+ 'compiler/regress-905555-2': [SKIP],
+ 'compiler/regress-905555': [SKIP],
+ 'compiler/regress-9945-1': [SKIP],
+ 'concurrent-initial-prototype-change': [SKIP],
+ 'regress/regress-356053': [SKIP],
+ 'regress/regress-embedded-cons-string': [SKIP],
+
# Intentionally non-deterministic using shared arraybuffers.
'wasm/atomics-stress': [SKIP],
'wasm/atomics64-stress': [SKIP],
'wasm/futex': [SKIP],
-
- # Deadlocks on predictable platform (https://crbug.com/v8/9760).
- 'wasm/async-compile': [SKIP],
- 'wasm/streaming-compile': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -1134,9 +1148,20 @@
'compiler/opt-higher-order-functions': [SKIP],
'regress/regress-1049982-1': [SKIP],
'regress/regress-1049982-2': [SKIP],
+ 'es6/iterator-eager-deopt': [SKIP],
# interrupt_budget overrides don't work with TurboProp.
'interrupt-budget-override': [SKIP],
+ 'never-optimize': [SKIP],
+
+ # In turboprop we reuse the optimized code on soft deopt. The following tests
+ # test for a soft deopt and they won't work in TurboProp.
+ 'deopt-recursive-soft-once': [SKIP],
+ 'regress/regress-3709': [SKIP],
+ 'regress/regress-5790': [SKIP],
+
+ # const field tracking is disabled in turboprop
+ 'const-field-tracking': [SKIP],
}], # variant == turboprop
##############################################################################
@@ -1160,4 +1185,72 @@
'compiler/number-toboolean': [SKIP],
}], # variant == assert_types
+##############################################################################
+['variant == stress_snapshot and arch != x64', {
+ # Deserialization fails due to read-only snapshot checksum verification.
+ # https://crbug.com/v8/10491
+ '*': [SKIP],
+}],
+
+['variant == stress_snapshot and arch == x64', {
+ # Crashes the serializer due to recursion.
+ 'deep-recursion': [SKIP],
+ 'string-replace-gc': [SKIP],
+ # Debug check failed:
+ # map == GetReadOnlyRoots(isolate).fixed_array_map() || map == GetReadOnlyRoots(isolate).fixed_cow_array_map().
+ # This means a mismatch of elements kinds / elements on the global object.
+ 'es6/block-sloppy-function': [SKIP],
+ 'es6/reflect-get-own-property-descriptor': [SKIP],
+ 'es6/reflect': [SKIP],
+ 'get-own-property-descriptor': [SKIP],
+ 'global-properties': [SKIP],
+ 'indexed-accessors': [SKIP],
+ 'object-freeze-global': [SKIP],
+ 'object-seal-global': [SKIP],
+ 'regress/regress-1103': [SKIP],
+ 'regress/regress-1112': [SKIP],
+ 'regress/regress-1120': [SKIP],
+ 'regress/regress-2346': [SKIP],
+ 'regress/regress-489151': [SKIP],
+ 'regress/regress-crbug-1002628': [SKIP],
+ 'regress/regress-crbug-454091': [SKIP],
+ 'regress/regress-crbug-663750': [SKIP],
+ 'regress/regress-freeze-setter': [SKIP],
+ # TODO(v8:10494): asm Code objects can't be flushed and end up in the isolate
+ # serializer.
+ 'asm/*': [SKIP],
+ 'compiler/regress-439743': [SKIP],
+ 'regress/regress-441099': [SKIP],
+ 'regress/regress-617526': [SKIP],
+ 'regress/regress-6196': [SKIP],
+ 'regress/regress-677685': [SKIP],
+ 'regress/regress-7893': [SKIP],
+ 'regress/regress-799690': [SKIP],
+ 'regress/regress-8377': [SKIP],
+ 'regress/regress-crbug-1047368': [SKIP],
+ 'regress/regress-crbug-898974': [SKIP],
+ 'regress/regress-crbug-935800': [SKIP],
+ 'regress/regress-crbug-976934': [SKIP],
+ 'regress/wasm/*': [SKIP],
+ 'wasm/*': [SKIP],
+ # Investigate (IsScript).
+ 'harmony/import-from-compilation-errored': [SKIP],
+ 'harmony/private-fields-special-object': [SKIP],
+ # Investigate (JSFunction in startup serializer).
+ 'regress/regress-1034394': [SKIP],
+ 'regress/regress-863810': [SKIP],
+ 'regress/regress-crbug-772056': [SKIP],
+ # Investigate (Check failed: buffer.byte_length() <= Smi::kMaxValue)
+ 'regress/regress-319722-ArrayBuffer': [SKIP],
+ 'regress/regress-599717': [SKIP],
+ 'regress/regress-667603': [SKIP],
+ # Investigate (IsFixedArrayBase).
+ 'regress/regress-786784': [SKIP],
+ 'regress/regress-v8-9656': [SKIP],
+ # Investigate (startup_serializer_->ReferenceMapContains(obj)).
+ 'regress/regress-813440': [SKIP],
+ # Investigate (segfault).
+ 'regress/regress-crbug-397662': [SKIP],
+}], # variant == stress_snapshot
+
]
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
index f2b764e16c..95c8c8650a 100644
--- a/deps/v8/test/mjsunit/never-optimize.js
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -25,38 +25,23 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt --no-use-osr
+// Flags: --interrupt-budget=1024
-function o1() {
-}
+function o1() { }
%PrepareFunctionForOptimization(o1);
-
o1(); o1();
%OptimizeFunctionOnNextCall(o1);
o1();
-
-// Check that the given function was optimized.
assertOptimized(o1);
// Test the %NeverOptimizeFunction runtime call.
+function u1(i) { return i+1 }
+function u2(i) { return i+1 }
%NeverOptimizeFunction(u1);
-function u1() {
-}
-
-function u2() {
+for (let i = 0; i < 1000; ++i) {
u1();
+ u2();
}
-%PrepareFunctionForOptimization(u1);
-%PrepareFunctionForOptimization(u2);
-
-u1(); u1();
-u2(); u2();
-
-%OptimizeFunctionOnNextCall(u1);
-%OptimizeFunctionOnNextCall(u2);
-
-u1(); u1();
-u2(); u2();
-
assertUnoptimized(u1);
assertOptimized(u2);
diff --git a/deps/v8/test/mjsunit/class-of-builtins.js b/deps/v8/test/mjsunit/object-tostring-builtins.js
index 59fefffa75..8717770979 100644
--- a/deps/v8/test/mjsunit/class-of-builtins.js
+++ b/deps/v8/test/mjsunit/object-tostring-builtins.js
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// The [[Class]] property of (instances of) builtin functions must be
-// correctly set.
+// Object.prototype.toString should return the correct values for instances of
+// various built-in classes.
var funs = {
Object: [ Object ],
Function: [ Function ],
diff --git a/deps/v8/test/mjsunit/regress-crbug-1078825.js b/deps/v8/test/mjsunit/regress-crbug-1078825.js
new file mode 100644
index 0000000000..9381da50a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-crbug-1078825.js
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --harmony-promise-any
+
+load('test/mjsunit/test-async.js');
+
+(function() {
+ const p1 = Promise.reject(1);
+ const p2 = Promise.resolve(1);
+ Object.defineProperty(p2, "then", {});
+
+ testAsync(assert => {
+ assert.plan(1);
+ Promise.any([p1, p2]).then(
+ assert.unreachable,
+ (e) => { assert.equals(true, e instanceof TypeError); });
+ });
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-10508.js b/deps/v8/test/mjsunit/regress/regress-10508.js
new file mode 100644
index 0000000000..9199336b7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-10508.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = (error, frames) => {
+ // JSON.stringify executes the replacer, triggering the relevant
+ // code in Invoke().
+ JSON.stringify({}, frames[0].getFunction());
+};
+let v0;
+try {
+ throw new Error();
+} catch (e) {
+ e.stack
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1069964.js b/deps/v8/test/mjsunit/regress/regress-1069964.js
new file mode 100644
index 0000000000..0c6000b1ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1069964.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Realm.createAllowCrossRealmAccess();
+const c = Realm.global(1);
+Realm.detachGlobal(1);
+try { c.constructor = () => {}; } catch {}
diff --git a/deps/v8/test/mjsunit/regress/regress-1071190.js b/deps/v8/test/mjsunit/regress/regress-1071190.js
new file mode 100644
index 0000000000..7a888442e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1071190.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+function test() {
+ const a = new DataView(new ArrayBuffer(32));
+ const b = new DataView(new ArrayBuffer(32));
+ a.setFloat64(0);
+ b.setFloat64(0, undefined);
+
+ for(let i = 0; i < 8; ++i) {
+ assertEquals(a.getUint8(i), b.getUint8(i));
+ }
+}
+
+%PrepareFunctionForOptimization(test);
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-1076569.js b/deps/v8/test/mjsunit/regress/regress-1076569.js
new file mode 100644
index 0000000000..a223b600a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1076569.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --allow-natives-syntax --turboprop
+
+var array = new Int16Array();
+
+function foo() {
+ array[0] = "123.12";
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1077804.js b/deps/v8/test/mjsunit/regress/regress-1077804.js
new file mode 100644
index 0000000000..00ae196c33
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1077804.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return bar();
+}
+
+function bar(a, b) {
+ return a + b;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+%OptimizeFunctionOnNextCall(foo);
+%PrepareFunctionForOptimization(bar);
+%OptimizeFunctionOnNextCall(bar);
+bar(2n, 2n);
+assertTrue(Number.isNaN(foo()));
diff --git a/deps/v8/test/mjsunit/regress/regress-1078913.js b/deps/v8/test/mjsunit/regress/regress-1078913.js
new file mode 100644
index 0000000000..58d083f547
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1078913.js
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --interpreted-frames-native-stack
+
+// Make sure that the interpreted trampoline copy (for native interpreter frames
+// in stack traces) works for interperted functions but doesn't crash for asm.js
+
+function func() {
+ return;
+}
+
+function asm_func() {
+ "use asm";
+ function f(){}
+ return {f:f};
+}
+
+function failed_asm_func() {
+ "use asm";
+ // This should fail validation
+ [x,y,z] = [1,2,3];
+ return;
+}
+
+func();
+asm_func();
+failed_asm_func();
diff --git a/deps/v8/test/mjsunit/regress/regress-1365.js b/deps/v8/test/mjsunit/regress/regress-1365.js
index 59290f9ebc..58efb5715c 100644
--- a/deps/v8/test/mjsunit/regress/regress-1365.js
+++ b/deps/v8/test/mjsunit/regress/regress-1365.js
@@ -43,7 +43,6 @@ assertEquals(Object.prototype, Object.prototype.valueOf());
assertThrows(callGlobalValueOf);
assertThrows(callGlobalHasOwnProperty);
-%OptimizeFunctionOnNextCall(Object.prototype.valueOf);
Object.prototype.valueOf();
assertEquals(Object.prototype, Object.prototype.valueOf());
diff --git a/deps/v8/test/mjsunit/regress/regress-447756.js b/deps/v8/test/mjsunit/regress/regress-447756.js
index 1fc7518c13..55fd2bd0fc 100644
--- a/deps/v8/test/mjsunit/regress/regress-447756.js
+++ b/deps/v8/test/mjsunit/regress/regress-447756.js
@@ -42,7 +42,8 @@ function TestOptimizedCode() {
assertSame(Infinity, 1 / a1.byteOffset);
}
-%OptimizeFunctionOnNextCall(Uint8Array);
-for (var i = 0; i < 1000; i++) {
- TestOptimizedCode();
-}
+%PrepareFunctionForOptimization(TestOptimizedCode);
+TestOptimizedCode();
+TestOptimizedCode();
+%OptimizeFunctionOnNextCall(TestOptimizedCode);
+TestOptimizedCode();
diff --git a/deps/v8/test/mjsunit/regress/regress-491481.js b/deps/v8/test/mjsunit/regress/regress-491481.js
deleted file mode 100644
index 196b6aeb79..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-491481.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-try {
-%OptimizeFunctionOnNextCall(print);
-try {
- __f_16();
-} catch(e) { print(e); }
-try {
- __f_10();
-} catch(e) {; }
-} catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1053939-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-1053939-1.js
new file mode 100644
index 0000000000..e7657882bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1053939-1.js
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+
+v = {};
+v.__proto__ = new Int32Array(1);
+function foo() {
+ for (var i = 0; i < 2; i++) {
+ v[i] = 0;
+ }
+}
+foo();
+assertEquals(Object.keys(v).length, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1055138-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-1055138-1.js
new file mode 100644
index 0000000000..039cf83c8f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1055138-1.js
@@ -0,0 +1,64 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Object.prototype[1] = 153;
+Object.freeze(Object.prototype);
+
+(function TestSloppyStoreToReadOnlyProperty() {
+ function foo() {
+ let ar = [];
+ for (let i = 0; i < 3; i++) {
+ ar[i] = 42;
+
+ if (i == 1) {
+ // Attempt to overwrite read-only element should not change
+ // array length.
+ assertEquals(1, ar.length);
+ } else {
+ assertEquals(i + 1, ar.length);
+ }
+ }
+ return ar;
+ }
+
+ assertEquals([42,153,42], foo());
+ assertEquals([42,153,42], foo());
+ assertEquals([42,153,42], foo());
+ %PrepareFunctionForOptimization(foo);
+ assertEquals([42,153,42], foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([42,153,42], foo());
+})();
+
+(function StrictStoreToReadOnlyProperty() {
+ function foo() {
+ "use strict";
+ let ar = [];
+ let threw_exception = false;
+ for (let i = 0; i < 3; i++) {
+ try {
+ ar[i] = 42;
+ } catch(e) {
+ // Attempt to overwrite read-only element should throw and
+ // should not change array length.
+ assertTrue(i == 1);
+ assertEquals(1, ar.length);
+ assertInstanceof(e, TypeError);
+ threw_exception = true;
+ }
+ }
+ assertTrue(threw_exception);
+ return ar;
+ }
+
+ assertEquals([42,153,42], foo());
+ assertEquals([42,153,42], foo());
+ assertEquals([42,153,42], foo());
+ %PrepareFunctionForOptimization(foo);
+ assertEquals([42,153,42], foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([42,153,42], foo());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1055138-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1055138-2.js
new file mode 100644
index 0000000000..33dbcf8520
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1055138-2.js
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.prototype[1] = 153;
+
+(function TestSloppyStoreToReadOnlyProperty() {
+ function foo(prototype_frozen) {
+ let ar = [];
+ for (let i = 0; i < 3; i++) {
+ ar[i] = 42;
+
+ if (prototype_frozen) {
+ if (i == 1) {
+ // Attempt to overwrite read-only element should not change
+ // array length.
+ assertEquals(1, ar.length);
+ } else {
+ assertEquals(i + 1, ar.length);
+ }
+ }
+ }
+ return ar;
+ }
+
+ // Warm-up store IC.
+ assertEquals([42,42,42], foo(false));
+ assertEquals([42,42,42], foo(false));
+ assertEquals([42,42,42], foo(false));
+ assertEquals([42,42,42], foo(false));
+ Object.freeze(Object.prototype);
+ // Ensure IC was properly invalidated.
+ assertEquals([42,153,42], foo(true));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1055138-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-1055138-3.js
new file mode 100644
index 0000000000..bf08cdc9fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1055138-3.js
@@ -0,0 +1,40 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.prototype[1] = 153;
+
+(function StrictStoreToReadOnlyProperty() {
+ function foo(prototype_frozen) {
+ "use strict";
+ let ar = [];
+ let threw_exception = false;
+ for (let i = 0; i < 3; i++) {
+ try {
+ ar[i] = 42;
+ } catch(e) {
+ if (prototype_frozen) {
+ // Attempt to overwrite read-only element should throw and
+ // should not change array length.
+ assertTrue(i == 1);
+ assertEquals(1, ar.length);
+ assertInstanceof(e, TypeError);
+ threw_exception = true;
+ }
+ }
+ }
+ if (prototype_frozen) {
+ assertTrue(threw_exception);
+ }
+ return ar;
+ }
+
+ // Warm-up store IC.
+ assertEquals([42,42,42], foo(false));
+ assertEquals([42,42,42], foo(false));
+ assertEquals([42,42,42], foo(false));
+ assertEquals([42,42,42], foo(false));
+ Object.freeze(Object.prototype);
+ // Ensure IC was properly invalidated.
+ assertEquals([42,153,42], foo());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1060023.js b/deps/v8/test/mjsunit/regress/regress-crbug-1060023.js
new file mode 100644
index 0000000000..ca666130bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1060023.js
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class b extends RegExp {
+ exec() {
+ (function() { (a = (function({} = this) {})) => {} })
+ }
+}
+assertThrows(()=>'a'.match(new b), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-347542.js b/deps/v8/test/mjsunit/regress/regress-crbug-1063796.js
index 3929a6076a..b86ceed155 100644
--- a/deps/v8/test/mjsunit/regress/regress-347542.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1063796.js
@@ -1,12 +1,15 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-function foo() {};
+Object.prototype[1] = 1;
+function foo(baz) {
+ return 1 in arguments;
+}
+assertTrue(foo(0));
%PrepareFunctionForOptimization(foo);
-foo();
+assertTrue(foo(0));
%OptimizeFunctionOnNextCall(foo);
-foo();
-%NeverOptimizeFunction(foo);
+assertTrue(foo(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1065741.js b/deps/v8/test/mjsunit/regress/regress-crbug-1065741.js
new file mode 100644
index 0000000000..bf97370d93
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1065741.js
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function bar() {
+ String.prototype.startsWith.apply();
+}
+
+%PrepareFunctionForOptimization(bar);
+assertThrows(bar, TypeError);
+assertThrows(bar, TypeError);
+%OptimizeFunctionOnNextCall(bar);
+assertThrows(bar, TypeError);
+%PrepareFunctionForOptimization(bar);
+%OptimizeFunctionOnNextCall(bar);
+assertThrows(bar, TypeError);
+assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1067757.js b/deps/v8/test/mjsunit/regress/regress-crbug-1067757.js
new file mode 100644
index 0000000000..feba5c1e68
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1067757.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy-feedback-allocation
+
+"use strict";
+
+function foo() {
+ let count = 0;
+ try {
+ for (p of v) {
+ count += 1;
+ }
+ } catch (e) { }
+ assertEquals(count, 0);
+}
+
+var v = [ "0", {}];
+
+foo();
+Reflect.deleteProperty(v, '0');
+
+let count_loop = 0;
+try {
+ for (p of v) { count_loop += 1; }
+} catch (e) {}
+assertEquals(count_loop, 0);
+
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1070560.js b/deps/v8/test/mjsunit/regress/regress-crbug-1070560.js
new file mode 100644
index 0000000000..c94d5f5b93
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1070560.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ // Create a FixedDoubleArray
+ var arr = [5.65];
+ // Force the elements to be EmptyFixedArray
+ arr.splice(0);
+ // This should create a FixedDoubleArray initialized with holes.
+ arr.splice(-4, 9, 10, 20);
+ // If the earlier spice didn't create a holes this would fail.
+ assertFalse(2 in arr);
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1074737.js b/deps/v8/test/mjsunit/regress/regress-crbug-1074737.js
new file mode 100644
index 0000000000..a644f87c67
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1074737.js
@@ -0,0 +1,40 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ throw 42
+} catch (e) {
+ function foo() { return e };
+ %PrepareFunctionForOptimization(foo);
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ var e = "expected";
+}
+assertEquals("expected", foo());
+
+try {
+ throw 42
+} catch (f) {
+ function foo2() { return f };
+ %PrepareFunctionForOptimization(foo2);
+ %OptimizeFunctionOnNextCall(foo2);
+ foo2();
+ with ({}) {
+ var f = "expected";
+ }
+}
+assertEquals("expected", foo2());
+
+(function () {
+ function foo3() { return g };
+ %PrepareFunctionForOptimization(foo3);
+ %OptimizeFunctionOnNextCall(foo3);
+ foo3();
+ with ({}) {
+ var g = "expected";
+ }
+ assertEquals("expected", foo3());
+})()
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1077508.js b/deps/v8/test/mjsunit/regress/regress-crbug-1077508.js
new file mode 100644
index 0000000000..73bc5a6910
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1077508.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const array = [, , , 0, 1, 2];
+const comparefn = () => {
+ Array.prototype.__defineSetter__("0", function () {});
+ Array.prototype.__defineSetter__("1", function () {});
+ Array.prototype.__defineSetter__("2", function () {});
+};
+
+array.sort(comparefn);
+
+assertArrayEquals([, , , , , , ], array);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-754177.js b/deps/v8/test/mjsunit/regress/regress-crbug-754177.js
index 1c105a3bf2..74685366d4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-754177.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-754177.js
@@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --fuzzing
-// Do not crash on non-JSFunction input.
+// Do not crash on non-JSFunction input when fuzzing.
%NeverOptimizeFunction(undefined);
%NeverOptimizeFunction(true);
%NeverOptimizeFunction(1);
%NeverOptimizeFunction({});
assertThrows("%NeverOptimizeFunction()", SyntaxError);
+
+%PrepareFunctionForOptimization(print);
+%OptimizeFunctionOnNextCall(print);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-10484-1.js b/deps/v8/test/mjsunit/regress/regress-v8-10484-1.js
new file mode 100644
index 0000000000..61dd2929f3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-10484-1.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var ar;
+Object.defineProperty(Array.prototype, 3,
+ { get() { Object.freeze(ar); } });
+
+function foo() {
+ ar = [1, 2, 3];
+ ar.length = 4;
+ ar.pop();
+}
+
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-10484-2.js b/deps/v8/test/mjsunit/regress/regress-v8-10484-2.js
new file mode 100644
index 0000000000..c2030e82db
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-10484-2.js
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var ar;
+Object.defineProperty(Array.prototype, 3,
+ {
+ get() {
+ Object.defineProperty(
+ ar, "length",
+ { value: 3, writable: false, configurable: false });
+ }
+ });
+
+function foo() {
+ ar = [1, 2, 3];
+ ar.length = 4;
+ ar.pop();
+}
+
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-10513.js b/deps/v8/test/mjsunit/regress/regress-v8-10513.js
new file mode 100644
index 0000000000..e9b91f5b4d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-10513.js
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const access_log = [];
+const handler = {
+ get: function(obj, prop) {
+ access_log.push(prop);
+ return prop in obj ? obj[prop] : "z";
+ }
+};
+
+class ProxiedGroupRegExp extends RegExp {
+ exec(s) {
+ var result = super.exec(s);
+ if (result) {
+ result.groups = new Proxy(result.groups, handler);
+ }
+ return result;
+ }
+}
+
+let re = new ProxiedGroupRegExp("(?<x>.)");
+assertEquals("a z", "a".replace(re, "$<x> $<y>"));
+assertEquals(["x", "y"], access_log);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10309.js b/deps/v8/test/mjsunit/regress/wasm/regress-10309.js
index ce73b783ef..904ea73aca 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-10309.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10309.js
@@ -46,7 +46,7 @@ function assert_return(action, expected) {
let f32 = Math.fround;
// simple.wast:1
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x09\x02\x60\x00\x00\x60\x01\x7f\x01\x7d\x03\x04\x03\x00\x00\x01\x05\x03\x01\x00\x01\x07\x1c\x02\x11\x72\x65\x70\x6c\x61\x63\x65\x5f\x6c\x61\x6e\x65\x5f\x74\x65\x73\x74\x00\x01\x04\x72\x65\x61\x64\x00\x02\x08\x01\x00\x0a\x6e\x03\x2a\x00\x41\x10\x43\x00\x00\x80\x3f\x38\x02\x00\x41\x14\x43\x00\x00\x00\x40\x38\x02\x00\x41\x18\x43\x00\x00\x40\x40\x38\x02\x00\x41\x1c\x43\x00\x00\x80\x40\x38\x02\x00\x0b\x39\x01\x01\x7b\x41\x10\x2a\x02\x00\xfd\x12\x21\x00\x20\x00\x41\x10\x2a\x01\x04\xfd\x14\x01\x21\x00\x20\x00\x41\x10\x2a\x01\x08\xfd\x14\x02\x21\x00\x20\x00\x41\x10\x2a\x01\x0c\xfd\x14\x03\x21\x00\x41\x00\x20\x00\xfd\x01\x02\x00\x0b\x07\x00\x20\x00\x2a\x02\x00\x0b");
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x09\x02\x60\x00\x00\x60\x01\x7f\x01\x7d\x03\x04\x03\x00\x00\x01\x05\x03\x01\x00\x01\x07\x1c\x02\x11\x72\x65\x70\x6c\x61\x63\x65\x5f\x6c\x61\x6e\x65\x5f\x74\x65\x73\x74\x00\x01\x04\x72\x65\x61\x64\x00\x02\x08\x01\x00\x0a\x6e\x03\x2a\x00\x41\x10\x43\x00\x00\x80\x3f\x38\x02\x00\x41\x14\x43\x00\x00\x00\x40\x38\x02\x00\x41\x18\x43\x00\x00\x40\x40\x38\x02\x00\x41\x1c\x43\x00\x00\x80\x40\x38\x02\x00\x0b\x39\x01\x01\x7b\x41\x10\x2a\x02\x00\xfd\x13\x21\x00\x20\x00\x41\x10\x2a\x01\x04\xfd\x20\x01\x21\x00\x20\x00\x41\x10\x2a\x01\x08\xfd\x20\x02\x21\x00\x20\x00\x41\x10\x2a\x01\x0c\xfd\x20\x03\x21\x00\x41\x00\x20\x00\xfd\x0b\x02\x00\x0b\x07\x00\x20\x00\x2a\x02\x00\x0b");
// simple.wast:49
call($1, "replace_lane_test", []);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js b/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
index 85ee43732b..5f69c3343f 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1054466.js
@@ -42,9 +42,9 @@ kExprF32Max, // f32.max
kSimdPrefix, kExprF32x4Splat, // f32x4.splat
kExprI32Const, 0x83, 0x01, // i32.const
kSimdPrefix, kExprI32x4Splat, // i32x4.splat
-kSimdPrefix, kExprI32x4Add, // i32x4.add
-kSimdPrefix, kExprI32x4Add, // i32x4.add
-kSimdPrefix, kExprS1x8AnyTrue, // s1x8.any_true
+kSimdPrefix, kExprI32x4Eq, // i32x4.eq
+kSimdPrefix, kExprI32x4Eq, // i32x4.eq
+kSimdPrefix, kExprS1x16AnyTrue, // s1x16.any_true
kExprEnd, // end @64
]);
builder.addExport('main', 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1055692.js b/deps/v8/test/mjsunit/regress/wasm/regress-1055692.js
deleted file mode 100644
index a16180ab5c..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1055692.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --wasm-staging --wasm-interpret-all --experimental-wasm-simd
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-const builder = new WasmModuleBuilder();
-builder.addMemory(16, 32, false);
-builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
-// Generate function 1 (out of 1).
-builder.addFunction(undefined, 0 /* sig */)
- .addBodyWithEnd([
-// signature: i_iii
-// body:
-kExprI32Const, 0x75, // i32.const
-kExprI32Const, 0x74, // i32.const
-kExprI32Const, 0x18, // i32.const
-kSimdPrefix, kExprS8x16LoadSplat, // s8x16.load_splat
-kExprUnreachable, // unreachable
-kExprUnreachable, // unreachable
-kExprI32Const, 0x6f, // i32.const
-kExprI32Const, 0x7f, // i32.const
-kExprI32Const, 0x6f, // i32.const
-kExprDrop,
-kExprDrop,
-kExprDrop,
-kExprDrop,
-kExprDrop,
-kExprEnd, // end @18
-]);
-builder.addExport('main', 0);
-const instance = builder.instantiate();
-print(instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js b/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
new file mode 100644
index 0000000000..03530e6f7a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
+ // signature: i_iii
+ // body:
+ kExprI32Const, 0xba, 0x01, // i32.const
+ kSimdPrefix, kExprI16x8Splat, // i16x8.splat
+ kExprMemorySize, 0x00, // memory.size
+ kSimdPrefix, kExprI16x8ShrS, // i16x8.shr_s
+ kSimdPrefix, kExprS1x16AnyTrue, // s1x16.any_true
+ kExprMemorySize, 0x00, // memory.size
+ kExprI32RemS, // i32.rem_s
+ kExprEnd, // end @15
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+instance.exports.main(1, 2, 3);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1065635.js b/deps/v8/test/mjsunit/regress/wasm/regress-1065635.js
new file mode 100644
index 0000000000..a4f82e8650
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1065635.js
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ 'use asm';
+ function bar() {
+ return -1e-15;
+ }
+ return {bar: bar};
+}
+
+assertEquals(-1e-15, foo().bar());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1065852.js b/deps/v8/test/mjsunit/regress/wasm/regress-1065852.js
new file mode 100644
index 0000000000..24463e0688
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1065852.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* asm() {
+ "use asm";
+ function x(v) {
+ v = v | 0;
+ }
+ return x;
+}
+
+// 'function*' creates a generator with an implicit 'next' method.
+asm().next();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1067621.js b/deps/v8/test/mjsunit/regress/wasm/regress-1067621.js
new file mode 100644
index 0000000000..e75b0156f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1067621.js
@@ -0,0 +1,82 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const kNumberOfWorker = 4;
+
+const workerOnMessage = function(msg) {
+ if (msg.module) {
+ let module = msg.module;
+ let mem = msg.mem;
+ this.instance = new WebAssembly.Instance(module, {m: {memory: mem}});
+ postMessage({instantiated: true});
+ } else {
+ const kNumberOfRuns = 20;
+ let result = new Array(kNumberOfRuns);
+ for (let i = 0; i < kNumberOfRuns; ++i) {
+ result[i] = instance.exports.grow();
+ }
+ postMessage({result: result});
+ }
+};
+
+function spawnWorkers() {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ let worker = new Worker(
+ 'onmessage = ' + workerOnMessage.toString(), {type: 'string'});
+ workers.push(worker);
+ }
+ return workers;
+}
+
+function instantiateModuleInWorkers(workers, module, shared_memory) {
+ for (let worker of workers) {
+ worker.postMessage({module: module, mem: shared_memory});
+ let msg = worker.getMessage();
+ if (!msg.instantiated) throw 'Worker failed to instantiate';
+ }
+}
+
+function triggerWorkers(workers) {
+ for (i = 0; i < workers.length; i++) {
+ let worker = workers[i];
+ worker.postMessage({});
+ }
+}
+
+(function TestConcurrentGrowMemoryResult() {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory('m', 'memory', 1, 500, 'shared');
+ builder.addFunction('grow', kSig_i_v)
+ .addBody([kExprI32Const, 1, kExprMemoryGrow, kMemoryZero])
+ .exportFunc();
+
+ const module = builder.toModule();
+ const shared_memory =
+ new WebAssembly.Memory({initial: 1, maximum: 500, shared: true});
+
+ // Spawn off the workers and run the sequences.
+ let workers = spawnWorkers();
+ instantiateModuleInWorkers(workers, module, shared_memory);
+ triggerWorkers(workers);
+ let all_results = [];
+ for (let worker of workers) {
+ let msg = worker.getMessage();
+ all_results = all_results.concat(msg.result);
+ }
+
+ all_results.sort((a, b) => a - b);
+ for (let i = 1; i < all_results.length; ++i) {
+ assertEquals(all_results[i - 1] + 1, all_results[i]);
+ }
+
+ // Terminate all workers.
+ for (let worker of workers) {
+ worker.terminate();
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js b/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js
new file mode 100644
index 0000000000..33a694cf05
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1070078.js
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+// Generate function 1 (out of 4).
+builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
+ // signature: i_iii
+ // body:
+ kExprI32Const, 0x00, // i32.const
+ kExprMemoryGrow, 0x00, // memory.grow
+ kExprI32Const, 0xd3, 0xe7, 0x03, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0x84, 0x80, 0xc0, 0x05, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0x84, 0x81, 0x80, 0xc8, 0x01, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0x19, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprS8x16Shuffle,
+ 0x00, 0x00, 0x17, 0x00, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x10, 0x01, 0x00, 0x04, 0x04, 0x04, 0x04, // s8x16.shuffle
+ kSimdPrefix, kExprS8x16Shuffle,
+ 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // s8x16.shuffle
+ kSimdPrefix, kExprI8x16LeU, // i8x16.le_u
+ kSimdPrefix, kExprS1x16AnyTrue, // s1x16.any_true
+ kExprMemoryGrow, 0x00, // memory.grow
+ kExprDrop,
+ kExprEnd, // end @233
+]);
+builder.addExport('main', 0);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1073553.js b/deps/v8/test/mjsunit/regress/wasm/regress-1073553.js
new file mode 100644
index 0000000000..78ea8c2687
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1073553.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1);
+builder.addFunction(undefined, kSig_v_i) .addBodyWithEnd([
+ kExprI32Const, 1, kExprMemoryGrow, kMemoryZero, kNumericPrefix]);
+// Intentionally add just a numeric opcode prefix without the index byte.
+
+const b = builder.toBuffer();
+WebAssembly.compile(b);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1074586-b.js b/deps/v8/test/mjsunit/regress/wasm/regress-1074586-b.js
new file mode 100644
index 0000000000..fbadeb0859
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1074586-b.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// // Use of this source code is governed by a BSD-style license that can be
+// // found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+const sig = builder.addType(makeSig(
+ [kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32],
+ []));
+builder.addFunction(undefined, sig).addBodyWithEnd([
+ // signature: v_iiiiifidi
+ // body:
+ kExprI32Const, 0x00, // i32.const
+ kExprI64Const, 0x00, // i64.const
+ kAtomicPrefix, kExprI64AtomicStore, 0x00, 0x00, // i64.atomic.store64
+ kExprEnd, // end @9
+]);
+builder.addExport('main', 0);
+assertDoesNotThrow(() => builder.instantiate());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js b/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
new file mode 100644
index 0000000000..ae25e3c261
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, false, true);
+builder.addGlobal(kWasmI32, 1);
+const sig = builder.addType(makeSig([kWasmI32, kWasmI64, kWasmI64, kWasmI64], [kWasmF32]));
+// Generate function 1 (out of 3).
+builder.addFunction(undefined, sig)
+ .addLocals({i32_count: 57}).addLocals({i64_count: 11})
+ .addBodyWithEnd([
+// signature: f_illl
+// body:
+kExprLocalGet, 0x1b, // local.get
+kExprLocalSet, 0x1c, // local.set
+kExprI32Const, 0x00, // i32.const
+kExprIf, kWasmStmt, // if @11
+ kExprGlobalGet, 0x00, // global.get
+ kExprLocalSet, 0x1e, // local.set
+ kExprBlock, kWasmStmt, // block @19
+ kExprGlobalGet, 0x00, // global.get
+ kExprLocalSet, 0x21, // local.set
+ kExprBlock, kWasmStmt, // block @25
+ kExprBlock, kWasmStmt, // block @27
+ kExprBlock, kWasmStmt, // block @29
+ kExprGlobalGet, 0x00, // global.get
+ kExprLocalSet, 0x0a, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprLocalSet, 0x28, // local.set
+ kExprLocalGet, 0x00, // local.get
+ kExprLocalSet, 0x0b, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprBrIf, 0x01, // br_if depth=1
+ kExprEnd, // end @47
+ kExprUnreachable, // unreachable
+ kExprEnd, // end @49
+ kExprI32Const, 0x01, // i32.const
+ kExprLocalSet, 0x36, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprIf, kWasmStmt, // if @56
+ kExprEnd, // end @59
+ kExprLocalGet, 0x00, // local.get
+ kExprLocalSet, 0x10, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Eqz, // i32.eqz
+ kExprLocalSet, 0x38, // local.set
+ kExprBlock, kWasmStmt, // block @69
+ kExprI32Const, 0x7f, // i32.const
+ kExprI32Eqz, // i32.eqz
+ kExprLocalSet, 0x39, // local.set
+ kExprI32Const, 0x01, // i32.const
+ kExprIf, kWasmStmt, // if @78
+ kExprGlobalGet, 0x00, // global.get
+ kExprLocalSet, 0x11, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Eqz, // i32.eqz
+ kExprLocalSet, 0x12, // local.set
+ kExprGlobalGet, 0x00, // global.get
+ kExprLocalSet, 0x13, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x01, // i32.const
+ kExprI32Sub, // i32.sub
+ kExprLocalSet, 0x3a, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kAtomicPrefix, kExprI64AtomicLoad16U, 0x01, 0x04, // i64.atomic.load16_u
+ kExprDrop, // drop
+ kExprI64Const, 0x01, // i64.const
+ kExprLocalSet, 0x44, // local.set
+ kExprI64Const, 0x01, // i64.const
+ kExprLocalSet, 0x3e, // local.set
+ kExprElse, // else @115
+ kExprNop, // nop
+ kExprEnd, // end @117
+ kExprLocalGet, 0x40, // local.get
+ kExprLocalSet, 0x41, // local.set
+ kExprLocalGet, 0x41, // local.get
+ kExprI64Const, 0x4b, // i64.const
+ kExprI64Add, // i64.add
+ kExprDrop, // drop
+ kExprEnd, // end @128
+ kExprEnd, // end @129
+ kExprUnreachable, // unreachable
+ kExprEnd, // end @132
+ kExprUnreachable, // unreachable
+ kExprEnd, // end @134
+kExprF32Const, 0x00, 0x00, 0x84, 0x42, // f32.const
+kExprEnd, // end @140
+]);
+const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js b/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
new file mode 100644
index 0000000000..12f8ba661a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, false, true);
+const sig = builder.addType(makeSig([], [kWasmI32]));
+
+builder.addFunction(undefined, sig)
+ .addLocals({i32_count: 1002}).addLocals({i64_count: 3})
+ .addBodyWithEnd([
+// signature: i_v
+// body:
+ kExprLocalGet, 0xec, 0x07, // local.get
+ kExprLocalGet, 0xea, 0x07, // local.set
+ kExprLocalGet, 0x17, // local.set
+ kExprLocalGet, 0xb5, 0x01, // local.set
+ kExprI32Const, 0x00, // i32.const
+ kExprIf, kWasmI32, // if @39 i32
+ kExprI32Const, 0x91, 0xe8, 0x7e, // i32.const
+ kExprElse, // else @45
+ kExprI32Const, 0x00, // i32.const
+ kExprEnd, // end @48
+ kExprIf, kWasmStmt, // if @49
+ kExprI32Const, 0x00, // i32.const
+ kExprI32Const, 0x00, // i32.const
+ kAtomicPrefix, kExprI32AtomicSub, 0x01, 0x04, // i32.atomic.sub
+ kExprDrop,
+ kExprEnd,
+ kExprUnreachable,
+kExprEnd
+]);
+
+const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1079449.js b/deps/v8/test/mjsunit/regress/wasm/regress-1079449.js
new file mode 100644
index 0000000000..65e964552d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1079449.js
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+const sig = builder.addType(makeSig(
+ [
+ kWasmI64, kWasmI32, kWasmI64, kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI64, kWasmI64, kWasmI64
+ ],
+ [kWasmI64]));
+// Generate function 2 (out of 3).
+builder.addFunction(undefined, sig)
+ .addLocals({f32_count: 10})
+ .addLocals({i32_count: 4})
+ .addLocals({f64_count: 1})
+ .addLocals({i32_count: 15})
+ .addBodyWithEnd([
+ // signature: v_liliiiiiilll
+ // body:
+ kExprI32Const, 0x00, // i32.const
+ kExprI64Const, 0x00, // i64.const
+ kExprI64Const, 0x00, // i64.const
+ kAtomicPrefix, kExprI64AtomicCompareExchange, 0x00,
+ 0x8, // i64.atomic.cmpxchng64
+ kExprEnd, // end @124
+ ]);
+
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(
+ 0n, instance.exports.main(1n, 2, 3n, 4, 5, 6, 7, 8, 9, 10n, 11n, 12n, 13n));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js b/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js
new file mode 100644
index 0000000000..1173109094
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1081030.js
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-simd
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
+ // signature: i_iii
+ // body:
+ kExprF32Const, 0xf8, 0xf8, 0xf8, 0xf8,
+ kSimdPrefix, kExprF32x4Splat, // f32x4.splat
+ kExprF32Const, 0xf8, 0xf8, 0xf8, 0xf8,
+ kSimdPrefix, kExprF32x4Splat, // f32x4.splat
+ kSimdPrefix, kExprF32x4Min, 0x01, // f32x4.min
+ kSimdPrefix, kExprS1x4AnyTrue, 0x01, // s1x4.any_true
+ kExprEnd, // end @16
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(1, instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js b/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
deleted file mode 100644
index 46dce4a871..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7049.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-gc
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-// Build two instances, instance 2 is interpreted, and calls instance 1 (via
-// C_WASM_ENTRY), instance 1 then calls JS, which triggers GC.
-
-let builder1 = new WasmModuleBuilder();
-
-function call_gc() {
- print('Triggering GC.');
- gc();
- print('Survived GC.');
-}
-let func1_sig = makeSig(new Array(8).fill(kWasmI32), [kWasmI32]);
-let imp = builder1.addImport('q', 'gc', kSig_v_v);
-let func1 = builder1.addFunction('func1', func1_sig)
- .addBody([
- kExprLocalGet, 0, // -
- kExprCallFunction, imp
- ])
- .exportFunc();
-let instance1 = builder1.instantiate({q: {gc: call_gc}});
-
-let builder2 = new WasmModuleBuilder();
-
-let func1_imp = builder2.addImport('q', 'func1', func1_sig);
-let func2 = builder2.addFunction('func2', kSig_i_i)
- .addBody([
- kExprLocalGet, 0, // 1
- kExprLocalGet, 0, // 2
- kExprLocalGet, 0, // 3
- kExprLocalGet, 0, // 4
- kExprLocalGet, 0, // 5
- kExprLocalGet, 0, // 6
- kExprLocalGet, 0, // 7
- kExprLocalGet, 0, // 8
- kExprCallFunction, func1_imp
- ])
- .exportFunc();
-
-let instance2 = builder2.instantiate({q: {func1: instance1.exports.func1}});
-
-%RedirectToWasmInterpreter(
- instance2, parseInt(instance2.exports.func2.name));
-
-// Call with 1. This will be passed by the C_WASM_ENTRY via the stack, and the
-// GC will try to dereference it (before the bug fix).
-instance2.exports.func2(1);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-715216a.js b/deps/v8/test/mjsunit/regress/wasm/regress-715216a.js
deleted file mode 100644
index 56253414c9..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-715216a.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --wasm-interpret-all --validate-asm
-
-function asm() {
- "use asm";
- function f() {}
- return {};
-}
-asm();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js b/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
deleted file mode 100644
index 85e93e07c9..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-715216b.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --wasm-interpret-all --wasm-lazy-compilation
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-var builder = new WasmModuleBuilder();
-builder.addFunction('f', kSig_v_v).addBody([]);
-builder.addFunction('g', kSig_v_v).addBody([]);
-builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-719175.js b/deps/v8/test/mjsunit/regress/wasm/regress-719175.js
deleted file mode 100644
index c6217b0b01..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-719175.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --validate-asm --wasm-interpret-all
-
-function asm() {
- 'use asm';
- function f() {
- if (1.0 % 2.5 == -0.75) {
- }
- return 0;
- }
- return {f: f};
-}
-asm().f();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-766003.js b/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
deleted file mode 100644
index 3aaff40636..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-766003.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --wasm-interpret-all
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
- __v_6 = new WasmModuleBuilder();
-__v_6.addFunction('exp1', kSig_i_i).addBody([kExprUnreachable]).exportFunc();
- __v_7 = new WasmModuleBuilder();
- __v_7.addImport('__v_11', '__v_11', kSig_i_i);
-try {
-; } catch(e) {; }
- __v_8 = __v_6.instantiate().exports.exp1;
- __v_9 = __v_7.instantiate({__v_11: {__v_11: __v_8}}).exports.call_imp;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js b/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
deleted file mode 100644
index c06adebd76..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-771243.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --wasm-interpret-all
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-assertThrows(() => {
- __v_29 = 0;
-function __f_1() {
- __v_19 = new WasmModuleBuilder();
- if (__v_25) {
- __v_23 = __v_19.addImport('__v_24', '__v_30', __v_25);
- }
- if (__v_18) {
- __v_19.addMemory();
- __v_19.addFunction('load', kSig_i_i)
- .addBody([ 0])
- .exportFunc();
- }
- return __v_19;
-}
- (function TestExternalCallBetweenTwoWasmModulesWithoutAndWithMemory() {
- __v_21 = __f_1(__v_18 = false, __v_25 = kSig_i_i);
- __v_21.addFunction('plus_one', kSig_i_i)
- .addBody([
- kExprLocalGet, 0, // -
- kExprCallFunction, __v_29 ])
- .exportFunc();
- __v_32 =
- __f_1(__v_18 = true, __v_25 = undefined);
- __v_31 = __v_32.instantiate(); try { __v_32[__getRandomProperty()] = __v_0; delete __v_18[__getRandomProperty()]; delete __v_34[__getRandomProperty()]; } catch(e) {; };
- __v_20 = __v_21.instantiate(
- {__v_24: {__v_30: __v_31.exports.load}});
- __v_20.exports.plus_one(); __v_33 = __v_43;
-})();
-});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js b/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
deleted file mode 100644
index 54676b198e..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-772332.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --wasm-interpret-all
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-assertThrows(() => {
-let __v_50315 = 0;
-function __f_15356(__v_50316, __v_50317) {
- let __v_50318 = new WasmModuleBuilder();
- if (__v_50317) {
- let __v_50319 = __v_50318.addImport('import_module', 'other_module_fn', kSig_i_i);
- }
- __v_50318.addMemory();
- __v_50318.addFunction('load', kSig_i_i).addBody([ 0, 0, 0]).exportFunc();
- return __v_50318;
-}
- (function __f_15357() {
- let __v_50320 = __f_15356(__v_50350 = false, __v_50351 = kSig_i_i);
- __v_50320.addFunction('plus_one', kSig_i_i).addBody([kExprLocalGet, 0, kExprCallFunction, __v_50315, kExprI32Const, kExprI32Add, kExprReturn]).exportFunc();
- let __v_50321 = __f_15356();
- let __v_50324 = __v_50321.instantiate();
- let __v_50325 = __v_50320.instantiate({
- import_module: {
- other_module_fn: __v_50324.exports.load
- }
- });
- __v_50325.exports.plus_one();
- })();
-});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-778917.js b/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
deleted file mode 100644
index c7eb033d95..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-778917.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --wasm-interpret-all
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-
-const builder = new WasmModuleBuilder();
-
-const index = builder.addFunction("huge_frame", kSig_v_v)
- .addBody([kExprCallFunction, 0])
- .addLocals({f64_count: 49555}).exportFunc().index;
-// We assume above that the function we added has index 0.
-assertEquals(0, index);
-
-const module = builder.instantiate();
-assertThrows(module.exports.huge_frame, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-831463.js b/deps/v8/test/mjsunit/regress/wasm/regress-831463.js
deleted file mode 100644
index 2818ad350b..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-831463.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --wasm-interpret-all
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-const builder = new WasmModuleBuilder();
-const sig = builder.addType(kSig_i_i);
-builder.addFunction('call', kSig_i_v)
- .addBody([
- kExprI32Const, 0, kExprI32Const, 0, kExprCallIndirect, sig, kTableZero
- ])
- .exportAs('call');
-builder.addImportedTable('imp', 'table');
-const table = new WebAssembly.Table({element: 'anyfunc', initial: 1});
-const instance = builder.instantiate({imp: {table: table}});
-assertThrows(
- () => instance.exports.call(), WebAssembly.RuntimeError,
- /function signature mismatch/);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834624.js b/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
deleted file mode 100644
index 3e3548ed32..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834624.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --wasm-interpret-all
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-let instance;
-(function DoTest() {
- function call_main() {
- instance.exports.main();
- }
- let module = new WasmModuleBuilder();
- module.addImport('mod', 'func', kSig_v_i);
- module.addFunction('main', kSig_v_i)
- .addBody([kExprLocalGet, 0, kExprCallFunction, 0])
- .exportFunc();
- instance = module.instantiate({
- mod: {
- func: call_main
- }
- });
- try {
- instance.exports.main();
- } catch (e) {
- // ignore
- }
-})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js
deleted file mode 100644
index 279d2dbd06..0000000000
--- a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1007608.js
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Bug is in the C-to-Wasm entry, used e.g. by the Wasm interpreter.
-// Flags: --wasm-interpret-all
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-let argc = 7;
-let builder = new WasmModuleBuilder();
-let types = new Array(argc).fill(kWasmI32);
-let sig = makeSig(types, []);
-let body = [];
-for (let i = 0; i < argc; ++i) {
- body.push(kExprLocalGet, i);
-}
-body.push(kExprCallFunction, 0);
-builder.addImport('', 'f', sig);
-builder.addFunction("main", sig).addBody(body).exportAs('main');
-let instance = builder.instantiate({
- '': {
- 'f': function() { throw "don't crash"; }
- }
-});
-assertThrows(instance.exports.main);
diff --git a/deps/v8/test/mjsunit/serialize-deserialize-now.js b/deps/v8/test/mjsunit/serialize-deserialize-now.js
new file mode 100644
index 0000000000..b906328eff
--- /dev/null
+++ b/deps/v8/test/mjsunit/serialize-deserialize-now.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+%SerializeDeserializeNow();
+
+const xs = [0, 1, 2];
+var o = { a: 0, b: 1, c: 2 };
+
+%SerializeDeserializeNow();
+
+const p = new Promise((resolve, reject) => { resolve("Promise"); });
+p.then((msg) => console.log(msg));
+
+%SerializeDeserializeNow();
diff --git a/deps/v8/test/mjsunit/tools/foozzie.js b/deps/v8/test/mjsunit/tools/foozzie.js
index 30faf46116..67e6547c67 100644
--- a/deps/v8/test/mjsunit/tools/foozzie.js
+++ b/deps/v8/test/mjsunit/tools/foozzie.js
@@ -8,9 +8,9 @@
// Test foozzie mocks for differential fuzzing.
// Deterministic Math.random.
-assertEquals(0.1, Math.random());
-assertEquals(0.2, Math.random());
-assertEquals(0.3, Math.random());
+assertEquals(0.7098480789645691, Math.random());
+assertEquals(0.9742682568175951, Math.random());
+assertEquals(0.20008059867222983, Math.random());
// Deterministic date.
assertEquals(1477662728698, Date.now());
@@ -77,3 +77,15 @@ if (isBigEndian){
else {
testArrayType(Float64Array, [0, 1072693248]);
}
+
+// Realm.eval is just eval.
+assertEquals(1477662728716, Realm.eval(Realm.create(), `Date.now()`));
+
+// Test suppressions when Math.pow is optimized.
+function callPow(v) {
+ return Math.pow(v, -0.5);
+}
+%PrepareFunctionForOptimization(callPow);
+const unoptimized = callPow(6996);
+%OptimizeFunctionOnNextCall(callPow);
+assertEquals(unoptimized, callPow(6996));
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc-interpreter.js b/deps/v8/test/mjsunit/wasm/anyfunc-interpreter.js
deleted file mode 100644
index 10d66c6b36..0000000000
--- a/deps/v8/test/mjsunit/wasm/anyfunc-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-anyref --expose-gc
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/anyfunc.js");
diff --git a/deps/v8/test/mjsunit/wasm/anyref-globals-interpreter.js b/deps/v8/test/mjsunit/wasm/anyref-globals-interpreter.js
deleted file mode 100644
index bca5d6c202..0000000000
--- a/deps/v8/test/mjsunit/wasm/anyref-globals-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-wasm-anyref --expose-gc
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/anyref-globals.js");
diff --git a/deps/v8/test/mjsunit/wasm/anyref-interpreter.js b/deps/v8/test/mjsunit/wasm/anyref-interpreter.js
deleted file mode 100644
index d22e841049..0000000000
--- a/deps/v8/test/mjsunit/wasm/anyref-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-anyref --expose-gc
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/anyref.js");
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index 97219f113b..10db2c2be8 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -1397,3 +1397,27 @@ assertWasm(3.25, TestFloatGlobals);
assertEquals(42, m.bar());
assertEquals(42, m.baz());
})();
+
+(function TestGenerator() {
+ function* asmModule() {
+ "use asm";
+ function foo() {
+ return 42;
+ }
+ return {foo: foo};
+ }
+ asmModule();
+ assertFalse(%IsAsmWasmCode(asmModule));
+})();
+
+(function TestAsyncFunction() {
+ async function asmModule() {
+ "use asm";
+ function foo() {
+ return 42;
+ }
+ return {foo: foo};
+ }
+ asmModule();
+ assertFalse(%IsAsmWasmCode(asmModule));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
index 4723b92acf..b01d0e55b7 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
@@ -19,7 +19,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertPromiseResult(WebAssembly.compile(bytes)
.then(assertUnreachable,
error => assertEquals("WebAssembly.compile(): Invalid compilation " +
- "hint 0x2d (forbidden downgrade) @+49", error.message)));
+ "hint 0x19 (forbidden downgrade) @+49", error.message)));
})();
(function testCompileWithBadLazyFunctionBody() {
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
index 5bcac2af9e..aa0525c09d 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
@@ -20,7 +20,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.exportFunc();
assertThrows(() => builder.instantiate({mod: {pow: Math.pow}}),
WebAssembly.CompileError,
- "WebAssembly.Module(): Invalid compilation hint 0x2d " +
+ "WebAssembly.Module(): Invalid compilation hint 0x19 " +
"(forbidden downgrade) @+70");
})();
@@ -33,8 +33,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyDefault,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
+ kCompilationHintTierBaseline,
+ kCompilationHintTierBaseline)
.exportFunc();
builder.addFunction('upow2', kSig_i_i)
.addBody([kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js b/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
index 4bfc22fb89..09af4c7f69 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
@@ -13,8 +13,8 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
kExprLocalGet, 0,
kExprCallFunction, 0])
.setCompilationHint(kCompilationHintStrategyDefault,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
+ kCompilationHintTierBaseline,
+ kCompilationHintTierBaseline)
.exportFunc();
let instance = builder.instantiate({mod: {pow: Math.pow}});
assertEquals(27, instance.exports.upow(3))
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js b/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
deleted file mode 100644
index f9f85a7d91..0000000000
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-// Flags: --experimental-wasm-compilation-hints --wasm-lazy-validation
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-(function testInterpreterCallsLazyFunctionInOtherInstance() {
- print(arguments.callee.name);
- let builder0 = new WasmModuleBuilder();
- builder0.addFunction("getX", kSig_i_v)
- .addBody([kExprI32Const, 42])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierBaseline,
- kCompilationHintTierBaseline)
- .exportFunc();
- let builder1 = new WasmModuleBuilder();
- builder1.addImport("otherModule", "getX", kSig_i_v);
- builder1.addFunction("plusX", kSig_i_i)
- .addBody([kExprCallFunction, 0,
- kExprLocalGet, 0,
- kExprI32Add])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
- .exportFunc();
- let instance0 = builder0.instantiate();
- let instance1 = builder1.instantiate(
- {otherModule: {getX: instance0.exports.getX}});
- assertEquals(46, instance1.exports.plusX(4));
-})();
-
-(function testInterpreterCallsLazyBadFunctionInOtherInstance() {
- print(arguments.callee.name);
- let builder0 = new WasmModuleBuilder();
- builder0.addFunction("getX", kSig_i_v)
- .addBody([kExprI64Const, 42])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierBaseline,
- kCompilationHintTierBaseline)
- .exportFunc();
- let builder1 = new WasmModuleBuilder();
- builder1.addImport("otherModule", "getX", kSig_i_v);
- builder1.addFunction("plusX", kSig_i_i)
- .addBody([kExprCallFunction, 0,
- kExprLocalGet, 0,
- kExprI32Add])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
- .exportFunc();
- let instance0 = builder0.instantiate();
- let instance1 = builder1.instantiate(
- {otherModule: {getX: instance0.exports.getX}});
- assertThrows(() => instance1.exports.plusX(4),
- WebAssembly.CompileError,
- "Compiling function #0:\"getX\" failed: type error in " +
- "merge[0] (expected i32, got i64) @+57");
-})();
-
-(function testInterpreterCallsLazyFunctionThroughIndirection() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- let sig_i_ii = builder.addType(kSig_i_ii);
- let add = builder.addFunction('add', sig_i_ii)
- .addBody([kExprLocalGet, 0,
- kExprLocalGet, 1,
- kExprI32Add])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter);
- builder.appendToTable([add.index]);
- builder.addFunction('main', kSig_i_iii)
- .addBody([// Call indirect #0 with args <#1, #2>.
- kExprLocalGet, 1,
- kExprLocalGet, 2,
- kExprLocalGet, 0,
- kExprCallIndirect, sig_i_ii, kTableZero])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
- .exportFunc();
- assertEquals(99, builder.instantiate().exports.main(0, 22, 77));
-})();
-
-(function testInterpreterCallsLazyBadFunctionThroughIndirection() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- let sig_i_ii = builder.addType(kSig_i_ii);
- let add = builder.addFunction('add', sig_i_ii)
- .addBody([kExprLocalGet, 0,
- kExprLocalGet, 1,
- kExprI64Add])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter);
- builder.appendToTable([add.index]);
- builder.addFunction('main', kSig_i_iii)
- .addBody([// Call indirect #0 with args <#1, #2>.
- kExprLocalGet, 1,
- kExprLocalGet, 2,
- kExprLocalGet, 0,
- kExprCallIndirect, sig_i_ii, kTableZero])
- .setCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
- .exportFunc();
- assertThrows(() => builder.instantiate().exports.main(0, 22, 77),
- WebAssembly.CompileError,
- "Compiling function #0:\"add\" failed: i64.add[1] expected " +
- "type i64, found local.get of type i32 @+83");
-})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
index 2708da149b..59c1a9ed3a 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
@@ -49,7 +49,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
{mod: {pow: Math.pow}})
.then(assertUnreachable,
error => assertEquals("WebAssembly.instantiateStreaming(): Invalid " +
- "compilation hint 0x2d (forbidden downgrade) " +
+ "compilation hint 0x19 (forbidden downgrade) " +
"@+78",
error.message)));
})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
index 35f77de157..1f9c00a1f7 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
@@ -17,7 +17,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.exportFunc();
assertThrows(() => builder.toModule(),
WebAssembly.CompileError,
- "WebAssembly.Module(): Invalid compilation hint 0x2d " +
+ "WebAssembly.Module(): Invalid compilation hint 0x19 " +
"(forbidden downgrade) @+49");
})();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-management.js b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
index a2b102f083..47a12c165b 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-management.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
@@ -80,6 +80,7 @@ assertEquals(0, %WasmGetNumberOfInstances(module));
instance4 = new WebAssembly.Instance(module, {"": {getValue: () => 4}});
assertEquals(4, instance4.exports.f());
module = null;
+ instance4 = null;
})();
// Note that two GC's are required because weak slots clearing is deferred.
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref-interpreter.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref-interpreter.js
deleted file mode 100644
index 14d1e60e88..0000000000
--- a/deps/v8/test/mjsunit/wasm/exceptions-anyref-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-wasm-eh --experimental-wasm-anyref --allow-natives-syntax
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for existing exception handling test cases that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/exceptions-anyref.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-global-interpreter.js b/deps/v8/test/mjsunit/wasm/exceptions-global-interpreter.js
deleted file mode 100644
index ae06ff00fb..0000000000
--- a/deps/v8/test/mjsunit/wasm/exceptions-global-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for existing exception handling test cases that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/exceptions-global.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-interpreter.js b/deps/v8/test/mjsunit/wasm/exceptions-interpreter.js
deleted file mode 100644
index 9393aade0f..0000000000
--- a/deps/v8/test/mjsunit/wasm/exceptions-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for existing exception handling test cases that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/exceptions.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow-interpreter.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow-interpreter.js
deleted file mode 100644
index 42285ba169..0000000000
--- a/deps/v8/test/mjsunit/wasm/exceptions-rethrow-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-eh --allow-natives-syntax
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for existing exception handling test cases that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/exceptions-rethrow.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd-interpreter.js b/deps/v8/test/mjsunit/wasm/exceptions-simd-interpreter.js
deleted file mode 100644
index 7fd4e60032..0000000000
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-eh --experimental-wasm-simd --allow-natives-syntax
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for existing exception handling test cases that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/exceptions-simd.js");
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
index 6fb7283d74..8a8638f81f 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -42,7 +42,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd,
kExprLocalGet, 0,
kSimdPrefix, kExprI32x4Eq,
- kSimdPrefix, kExprS1x4AllTrue,
+ kSimdPrefix, kExprS1x16AllTrue,
])
.exportFunc();
var instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index fc82455ca8..de1026c045 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -182,13 +182,62 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd
]).exportFunc();
function throw_exc() {
- throw exception = new WebAssembly.RuntimeError('My user text');
+ throw new WebAssembly.RuntimeError('My user text');
}
let instance = builder.instantiate({imp: {ort: throw_exc}});
assertEquals(11, instance.exports.call_import());
})();
+(function TestExnWithWasmProtoNotCaught() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ let imp = builder.addImport('imp', 'ort', kSig_v_v);
+ let throw_fn = builder.addFunction('throw', kSig_v_v)
+ .addBody([kExprThrow, except])
+ .exportFunc();
+ builder.addFunction('test', kSig_v_v)
+ .addBody([
+ // Calling "throw" directly should produce the expected exception.
+ kExprTry, kWasmStmt,
+ kExprCallFunction, throw_fn.index,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
+ // Calling through JS produces a wrapped exceptions which does not match
+ // the br_on_exn.
+ kExprTry, kWasmStmt,
+ kExprCallFunction, imp,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd
+ ]).exportFunc();
+ let instance;
+ let wrapped_exn;
+ function js_import() {
+ try {
+ instance.exports.throw();
+ } catch (e) {
+ wrapped_exn = new Error();
+ wrapped_exn.__proto__ = e;
+ throw wrapped_exn;
+ }
+ }
+ instance = builder.instantiate({imp: {ort: js_import}});
+ let caught = undefined;
+ try {
+ instance.exports.test();
+ } catch (e) {
+ caught = e;
+ }
+ assertTrue(!!caught, 'should have trapped');
+ assertEquals(caught, wrapped_exn);
+ assertInstanceof(caught.__proto__, WebAssembly.RuntimeError);
+})();
+
// Test that we can distinguish which exception was thrown by using a cascaded
// sequence of nested try blocks with a single handler in each catch block.
(function TestCatchComplex1() {
diff --git a/deps/v8/test/mjsunit/wasm/futex-interpreter.js b/deps/v8/test/mjsunit/wasm/futex-interpreter.js
deleted file mode 100644
index 738e62a0a5..0000000000
--- a/deps/v8/test/mjsunit/wasm/futex-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-wasm-threads --allow-natives-syntax
-// Flags: --wasm-interpret-all
-
-// This is a wrapper for existing futex tests with the --wasm-interpret-all
-// flag added. If we ever decide to add a test variant for this, this file can
-// be removed.
-
-load("test/mjsunit/wasm/futex.js");
diff --git a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js
deleted file mode 100644
index f9275d7ccc..0000000000
--- a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-anyref --experimental-wasm-return-call
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/indirect-call-non-zero-table.js");
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
deleted file mode 100644
index 2e72a88839..0000000000
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-gc
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-// =============================================================================
-// Tests in this file test the interaction between the wasm interpreter and
-// compiled code.
-// =============================================================================
-
-// The stack trace contains file path, replace it by "file".
-let stripPath = s => s.replace(/[^ (]*interpreter-mixed\.js/g, 'file');
-
-function checkStack(stack, expected_lines) {
- print('stack: ' + stack);
- let lines = stack.split('\n');
- assertEquals(expected_lines.length, lines.length);
- for (let i = 0; i < lines.length; ++i) {
- let test =
- typeof expected_lines[i] == 'string' ? assertEquals : assertMatches;
- test(expected_lines[i], lines[i], 'line ' + i);
- }
-}
-
-(function testMemoryGrowBetweenInterpretedAndCompiled() {
- // grow_memory can be called from interpreted or compiled code, and changes
- // should be reflected in either execution.
- var builder = new WasmModuleBuilder();
- var grow_body = [kExprLocalGet, 0, kExprMemoryGrow, kMemoryZero];
- var load_body = [kExprLocalGet, 0, kExprI32LoadMem, 0, 0];
- var store_body = [kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, 0];
- builder.addFunction('grow_memory', kSig_i_i).addBody(grow_body).exportFunc();
- builder.addFunction('load', kSig_i_i).addBody(load_body).exportFunc();
- builder.addFunction('store', kSig_v_ii).addBody(store_body).exportFunc();
- var grow_interp_function =
- builder.addFunction('grow_memory_interpreted', kSig_i_i)
- .addBody(grow_body)
- .exportFunc();
- var load_interp_function = builder.addFunction('load_interpreted', kSig_i_i)
- .addBody(load_body)
- .exportFunc();
- var kNumPages = 2;
- var kMaxPages = 10;
- builder.addMemory(kNumPages, kMaxPages, false);
- var instance = builder.instantiate();
- var exp = instance.exports;
- %RedirectToWasmInterpreter(instance, grow_interp_function.index);
- %RedirectToWasmInterpreter(instance, load_interp_function.index);
-
- // Initially, we can load from offset 12, but not OOB.
- var oob_index = kNumPages * kPageSize;
- var initial_interpreted = %WasmNumInterpretedCalls(instance);
- assertEquals(0, exp.load(12));
- assertEquals(0, exp.load_interpreted(12));
- assertTraps(kTrapMemOutOfBounds, () => exp.load(oob_index));
- assertTraps(
- kTrapMemOutOfBounds, () => exp.load_interpreted(oob_index));
- // Grow by 2 pages from compiled code, and ensure that this is reflected in
- // the interpreter.
- assertEquals(kNumPages, exp.grow_memory(2));
- kNumPages += 2;
- assertEquals(kNumPages, exp.grow_memory_interpreted(0));
- assertEquals(kNumPages, exp.grow_memory(0));
- // Now we can load from the previous OOB index.
- assertEquals(0, exp.load(oob_index));
- assertEquals(0, exp.load_interpreted(oob_index));
- // Set new OOB index and ensure that it traps.
- oob_index = kNumPages * kPageSize;
- assertTraps(kTrapMemOutOfBounds, () => exp.load(oob_index));
- assertTraps(
- kTrapMemOutOfBounds, () => exp.load_interpreted(oob_index));
- // Grow by another page in the interpreter, and ensure that this is reflected
- // in compiled code.
- assertEquals(kNumPages, exp.grow_memory_interpreted(1));
- kNumPages += 1;
- assertEquals(kNumPages, exp.grow_memory_interpreted(0));
- assertEquals(kNumPages, exp.grow_memory(0));
- // Now we can store to the previous OOB index and read it back in both
- // environments.
- exp.store(oob_index, 47);
- assertEquals(47, exp.load(oob_index));
- assertEquals(47, exp.load_interpreted(oob_index));
- // We cannot grow beyong kMaxPages.
- assertEquals(-1, exp.grow_memory(kMaxPages - kNumPages + 1));
- assertEquals(-1, exp.grow_memory_interpreted(kMaxPages - kNumPages + 1));
- // Overall, we executed 9 functions in the interpreter.
- assertEquals(initial_interpreted + 9, %WasmNumInterpretedCalls(instance));
-})();
-
-function createTwoInstancesCallingEachOther(inner_throws = false) {
- let builder1 = new WasmModuleBuilder();
-
- let id_imp = builder1.addImport('q', 'id', kSig_i_i);
- let plus_one = builder1.addFunction('plus_one', kSig_i_i)
- .addBody([
- kExprLocalGet, 0, // -
- kExprI32Const, 1, // -
- kExprI32Add, // -
- kExprCallFunction, id_imp
- ])
- .exportFunc();
- function imp(i) {
- if (inner_throws) throw new Error('i=' + i);
- return i;
- }
- let instance1 = builder1.instantiate({q: {id: imp}});
-
- let builder2 = new WasmModuleBuilder();
-
- let plus_one_imp = builder2.addImport('q', 'plus_one', kSig_i_i);
- let plus_two = builder2.addFunction('plus_two', kSig_i_i)
- .addBody([
- // Call import, add one more.
- kExprLocalGet, 0, // -
- kExprCallFunction, plus_one_imp, // -
- kExprI32Const, 1, // -
- kExprI32Add
- ])
- .exportFunc();
-
- let instance2 =
- builder2.instantiate({q: {plus_one: instance1.exports.plus_one}});
- return [instance1, instance2];
-}
-
-function redirectToInterpreter(
- instance1, instance2, redirect_plus_one, redirect_plus_two) {
- // Redirect functions to the interpreter.
- if (redirect_plus_one) {
- %RedirectToWasmInterpreter(instance1,
- parseInt(instance1.exports.plus_one.name));
- }
- if (redirect_plus_two) {
- %RedirectToWasmInterpreter(instance2,
- parseInt(instance2.exports.plus_two.name));
- }
-}
-
-(function testImportFromOtherInstance() {
- print("testImportFromOtherInstance");
- // Three runs: Break in instance 1, break in instance 2, or both.
- for (let run = 0; run < 3; ++run) {
- print(" - run " + run);
- (() => {
- // Trigger a GC to ensure that the underlying native module is not a cached
- // one from a previous run, with functions already redirected to the
- // interpreter. This is not observable from pure JavaScript, but this is
- // observable with the internal runtime functions used in this test.
- // Run in a local scope to ensure previous native modules are
- // unreachable.
- gc();
- let [instance1, instance2] = createTwoInstancesCallingEachOther();
- let interpreted_before_1 = %WasmNumInterpretedCalls(instance1);
- let interpreted_before_2 = %WasmNumInterpretedCalls(instance2);
- // Call plus_two, which calls plus_one.
- assertEquals(9, instance2.exports.plus_two(7));
- // Nothing interpreted:
- assertEquals(interpreted_before_1, %WasmNumInterpretedCalls(instance1));
- assertEquals(interpreted_before_2, %WasmNumInterpretedCalls(instance2));
- // Now redirect functions to the interpreter.
- redirectToInterpreter(instance1, instance2, run != 1, run != 0);
- // Call plus_two, which calls plus_one.
- assertEquals(9, instance2.exports.plus_two(7));
- // TODO(6668): Fix patching of instances which imported others' code.
- //assertEquals(interpreted_before_1 + (run == 1 ? 0 : 1),
- // %WasmNumInterpretedCalls(instance1));
- assertEquals(interpreted_before_2 + (run == 0 ? 0 : 1),
- %WasmNumInterpretedCalls(instance2))
- })();
- }
-})();
-
-(function testStackTraceThroughCWasmEntry() {
- print("testStackTraceThroughCWasmEntry");
- for (let run = 0; run < 3; ++run) {
- print(" - run " + run);
- let [instance1, instance2] = createTwoInstancesCallingEachOther(true);
- redirectToInterpreter(instance1, instance2, run != 1, run != 0);
-
- try {
- // Call plus_two, which calls plus_one.
- instance2.exports.plus_two(7);
- assertUnreachable('should trap because of unreachable instruction');
- } catch (e) {
- checkStack(stripPath(e.stack), [
- 'Error: i=8', // -
- /^ at imp \(file:\d+:29\)$/, // -
- ' at plus_one (<anonymous>:wasm-function[1]:0x3b)', // -
- ' at plus_two (<anonymous>:wasm-function[1]:0x3e)', // -
- /^ at testStackTraceThroughCWasmEntry \(file:\d+:25\)$/, // -
- /^ at file:\d+:3$/
- ]);
- }
- }
-})();
-
-(function testInterpreterPreservedOnTierUp() {
- print(arguments.callee.name);
- var builder = new WasmModuleBuilder();
- var fun_body = [kExprI32Const, 23];
- var fun = builder.addFunction('fun', kSig_i_v).addBody(fun_body).exportFunc();
- var instance = builder.instantiate();
- var exp = instance.exports;
-
- // Initially the interpreter is not being called.
- var initial_interpreted = %WasmNumInterpretedCalls(instance);
- assertEquals(23, exp.fun());
- assertEquals(initial_interpreted + 0, %WasmNumInterpretedCalls(instance));
-
- // Redirection will cause the interpreter to be called.
- %RedirectToWasmInterpreter(instance, fun.index);
- assertEquals(23, exp.fun());
- assertEquals(initial_interpreted + 1, %WasmNumInterpretedCalls(instance));
-
- // Requesting a tier-up still ensure the interpreter is being called.
- %WasmTierUpFunction(instance, fun.index);
- assertEquals(23, exp.fun());
- assertEquals(initial_interpreted + 2, %WasmNumInterpretedCalls(instance));
-})();
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
deleted file mode 100644
index 99debff846..0000000000
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ /dev/null
@@ -1,569 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --wasm-interpret-all --allow-natives-syntax --expose-gc
-
-load('test/mjsunit/wasm/wasm-module-builder.js');
-
-// The stack trace contains file path, only keep "interpreter.js".
-let stripPath = s => s.replace(/[^ (]*interpreter\.js/g, 'interpreter.js');
-
-function checkStack(stack, expected_lines) {
- print('stack: ' + stack);
- var lines = stack.split('\n');
- assertEquals(expected_lines.length, lines.length);
- for (var i = 0; i < lines.length; ++i) {
- let test =
- typeof expected_lines[i] == 'string' ? assertEquals : assertMatches;
- test(expected_lines[i], lines[i], 'line ' + i);
- }
-}
-
-(function testCallImported() {
- print(arguments.callee.name);
- var stack;
- let func = () => stack = new Error('test imported stack').stack;
-
- var builder = new WasmModuleBuilder();
- builder.addImport('mod', 'func', kSig_v_v);
- builder.addFunction('main', kSig_v_v)
- .addBody([kExprCallFunction, 0])
- .exportFunc();
- var instance = builder.instantiate({mod: {func: func}});
- // Test that this does not mess up internal state by executing it three times.
- for (var i = 0; i < 3; ++i) {
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- instance.exports.main();
- assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
- checkStack(stripPath(stack), [
- 'Error: test imported stack', // -
- /^ at func \(interpreter.js:\d+:28\)$/, // -
- ' at main (<anonymous>:wasm-function[1]:0x32)', // -
- /^ at testCallImported \(interpreter.js:\d+:22\)$/, // -
- /^ at interpreter.js:\d+:3$/
- ]);
- }
-})();
-
-(function testCallImportedWithParameters() {
- print(arguments.callee.name);
- var stack;
- var passed_args = [];
- let func1 = (i, j) => (passed_args.push(i, j), 2 * i + j);
- let func2 = (f) => (passed_args.push(f), 8 * f);
-
- var builder = new WasmModuleBuilder();
- builder.addImport('mod', 'func1', makeSig([kWasmI32, kWasmI32], [kWasmF32]));
- builder.addImport('mod', 'func2', makeSig([kWasmF64], [kWasmI32]));
- builder.addFunction('main', makeSig([kWasmI32, kWasmF64], [kWasmF32]))
- .addBody([
- // call #0 with arg 0 and arg 0 + 1
- kExprLocalGet, 0, kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
- kExprCallFunction, 0,
- // call #1 with arg 1
- kExprLocalGet, 1, kExprCallFunction, 1,
- // convert returned value to f32
- kExprF32UConvertI32,
- // add the two values
- kExprF32Add
- ])
- .exportFunc();
- var instance = builder.instantiate({mod: {func1: func1, func2: func2}});
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- var args = [11, 0.3];
- var ret = instance.exports.main(...args);
- assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
- var passed_test_args = [...passed_args];
- var expected = func1(args[0], args[0] + 1) + func2(args[1]) | 0;
- assertEquals(expected, ret);
- assertArrayEquals([args[0], args[0] + 1, args[1]], passed_test_args);
-})();
-
-(function testTrap() {
- print(arguments.callee.name);
- var builder = new WasmModuleBuilder();
- var foo_idx = builder.addFunction('foo', kSig_v_v)
- .addBody([kExprNop, kExprNop, kExprUnreachable])
- .index;
- builder.addFunction('main', kSig_v_v)
- .addBody([kExprNop, kExprCallFunction, foo_idx])
- .exportFunc();
- var instance = builder.instantiate();
- // Test that this does not mess up internal state by executing it three times.
- for (var i = 0; i < 3; ++i) {
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- var stack;
- try {
- instance.exports.main();
- assertUnreachable();
- } catch (e) {
- stack = e.stack;
- }
- assertEquals(interpreted_before + 2, %WasmNumInterpretedCalls(instance));
- checkStack(stripPath(stack), [
- 'RuntimeError: unreachable', // -
- ' at foo (<anonymous>:wasm-function[0]:0x27)', // -
- ' at main (<anonymous>:wasm-function[1]:0x2c)', // -
- /^ at testTrap \(interpreter.js:\d+:24\)$/, // -
- /^ at interpreter.js:\d+:3$/
- ]);
- }
-})();
-
-(function testThrowFromImport() {
- print(arguments.callee.name);
- function func() {
- throw new Error('thrown from imported function');
- }
- var builder = new WasmModuleBuilder();
- builder.addImport("mod", "func", kSig_v_v);
- builder.addFunction('main', kSig_v_v)
- .addBody([kExprCallFunction, 0])
- .exportFunc();
- var instance = builder.instantiate({mod: {func: func}});
- // Test that this does not mess up internal state by executing it three times.
- for (var i = 0; i < 3; ++i) {
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- var stack;
- try {
- instance.exports.main();
- assertUnreachable();
- } catch (e) {
- stack = e.stack;
- }
- assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
- checkStack(stripPath(stack), [
- 'Error: thrown from imported function', // -
- /^ at func \(interpreter.js:\d+:11\)$/, // -
- ' at main (<anonymous>:wasm-function[1]:0x32)', // -
- /^ at testThrowFromImport \(interpreter.js:\d+:24\)$/, // -
- /^ at interpreter.js:\d+:3$/
- ]);
- }
-})();
-
-(function testGlobals() {
- print(arguments.callee.name);
- var builder = new WasmModuleBuilder();
- builder.addGlobal(kWasmI32, true); // 0
- builder.addGlobal(kWasmI64, true); // 1
- builder.addGlobal(kWasmF32, true); // 2
- builder.addGlobal(kWasmF64, true); // 3
- builder.addFunction('get_i32', kSig_i_v)
- .addBody([kExprGlobalGet, 0])
- .exportFunc();
- builder.addFunction('get_i64', kSig_d_v)
- .addBody([kExprGlobalGet, 1, kExprF64SConvertI64])
- .exportFunc();
- builder.addFunction('get_f32', kSig_d_v)
- .addBody([kExprGlobalGet, 2, kExprF64ConvertF32])
- .exportFunc();
- builder.addFunction('get_f64', kSig_d_v)
- .addBody([kExprGlobalGet, 3])
- .exportFunc();
- builder.addFunction('set_i32', kSig_v_i)
- .addBody([kExprLocalGet, 0, kExprGlobalSet, 0])
- .exportFunc();
- builder.addFunction('set_i64', kSig_v_d)
- .addBody([kExprLocalGet, 0, kExprI64SConvertF64, kExprGlobalSet, 1])
- .exportFunc();
- builder.addFunction('set_f32', kSig_v_d)
- .addBody([kExprLocalGet, 0, kExprF32ConvertF64, kExprGlobalSet, 2])
- .exportFunc();
- builder.addFunction('set_f64', kSig_v_d)
- .addBody([kExprLocalGet, 0, kExprGlobalSet, 3])
- .exportFunc();
- var instance = builder.instantiate();
- // Initially, all should be zero.
- assertEquals(0, instance.exports.get_i32());
- assertEquals(0, instance.exports.get_i64());
- assertEquals(0, instance.exports.get_f32());
- assertEquals(0, instance.exports.get_f64());
- // Assign values to all variables.
- var values = [4711, 1<<40 + 1 << 33, 0.3, 12.34567];
- instance.exports.set_i32(values[0]);
- instance.exports.set_i64(values[1]);
- instance.exports.set_f32(values[2]);
- instance.exports.set_f64(values[3]);
- // Now check the values.
- assertEquals(values[0], instance.exports.get_i32());
- assertEquals(values[1], instance.exports.get_i64());
- assertEqualsDelta(values[2], instance.exports.get_f32(), 2**-23);
- assertEquals(values[3], instance.exports.get_f64());
-})();
-
-(function testReentrantInterpreter() {
- print(arguments.callee.name);
- var stacks;
- var instance;
- function func(i) {
- stacks.push(new Error('reentrant interpreter test #' + i).stack);
- if (i < 2) instance.exports.main(i + 1);
- }
-
- var builder = new WasmModuleBuilder();
- builder.addImport('mod', 'func', kSig_v_i);
- builder.addFunction('main', kSig_v_i)
- .addBody([kExprLocalGet, 0, kExprCallFunction, 0])
- .exportFunc();
- instance = builder.instantiate({mod: {func: func}});
- // Test that this does not mess up internal state by executing it three times.
- for (var i = 0; i < 3; ++i) {
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- stacks = [];
- instance.exports.main(0);
- assertEquals(interpreted_before + 3, %WasmNumInterpretedCalls(instance));
- assertEquals(3, stacks.length);
- for (var e = 0; e < stacks.length; ++e) {
- expected = ['Error: reentrant interpreter test #' + e];
- expected.push(/^ at func \(interpreter.js:\d+:17\)$/);
- expected.push(' at main (<anonymous>:wasm-function[1]:0x36)');
- for (var k = e; k > 0; --k) {
- expected.push(/^ at func \(interpreter.js:\d+:33\)$/);
- expected.push(' at main (<anonymous>:wasm-function[1]:0x36)');
- }
- expected.push(
- /^ at testReentrantInterpreter \(interpreter.js:\d+:22\)$/);
- expected.push(/ at interpreter.js:\d+:3$/);
- checkStack(stripPath(stacks[e]), expected);
- }
- }
-})();
-
-(function testIndirectImports() {
- print(arguments.callee.name);
- var builder = new WasmModuleBuilder();
-
- var sig_i_ii = builder.addType(kSig_i_ii);
- var sig_i_i = builder.addType(kSig_i_i);
- var mul = builder.addImport('q', 'mul', sig_i_ii);
- var add = builder.addFunction('add', sig_i_ii).addBody([
- kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
- ]);
- var mismatch =
- builder.addFunction('sig_mismatch', sig_i_i).addBody([kExprLocalGet, 0]);
- var main = builder.addFunction('main', kSig_i_iii)
- .addBody([
- // Call indirect #0 with args <#1, #2>.
- kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 0,
- kExprCallIndirect, sig_i_ii, kTableZero
- ])
- .exportFunc();
- builder.appendToTable([mul, add.index, mismatch.index, main.index]);
-
- var instance = builder.instantiate({q: {mul: (a, b) => a * b}});
-
- // Call mul.
- assertEquals(-6, instance.exports.main(0, -2, 3));
- // Call add.
- assertEquals(99, instance.exports.main(1, 22, 77));
- // main and sig_mismatch have another signature.
- assertTraps(kTrapFuncSigMismatch, () => instance.exports.main(2, 12, 33));
- assertTraps(kTrapFuncSigMismatch, () => instance.exports.main(3, 12, 33));
- // Function index 4 does not exist.
- assertTraps(kTrapFuncInvalid, () => instance.exports.main(4, 12, 33));
-})();
-
-(function testIllegalImports() {
- print(arguments.callee.name);
- var builder = new WasmModuleBuilder();
-
- var sig_l_v = builder.addType(kSig_l_v);
- var imp = builder.addImport('q', 'imp', sig_l_v);
- var direct = builder.addFunction('direct', kSig_l_v)
- .addBody([kExprCallFunction, imp])
- .exportFunc();
- var indirect = builder.addFunction('indirect', kSig_l_v).addBody([
- kExprI32Const, 0, kExprCallIndirect, sig_l_v, kTableZero
- ]);
- var main =
- builder.addFunction('main', kSig_v_i)
- .addBody([
- // Call indirect #0 with arg #0, drop result.
- kExprLocalGet, 0, kExprCallIndirect, sig_l_v, kTableZero, kExprDrop
- ])
- .exportFunc();
- builder.appendToTable([imp, direct.index, indirect.index]);
-
- var instance = builder.instantiate({q: {imp: () => 1}});
-
- // Calling imported functions with i64 in signature should fail.
- try {
- // Via direct call.
- instance.exports.main(1);
- } catch (e) {
- if (!(e instanceof TypeError)) throw e;
- checkStack(stripPath(e.stack), [
- 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at direct (<anonymous>:wasm-function[1]:0x55)', // -
- ' at main (<anonymous>:wasm-function[3]:0x64)', // -
- /^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
- /^ at interpreter.js:\d+:3$/
- ]);
- }
- try {
- // Via indirect call.
- instance.exports.main(2);
- } catch (e) {
- if (!(e instanceof TypeError)) throw e;
- checkStack(stripPath(e.stack), [
- 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at indirect (<anonymous>:wasm-function[2]:0x5c)', // -
- ' at main (<anonymous>:wasm-function[3]:0x64)', // -
- /^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
- /^ at interpreter.js:\d+:3$/
- ]);
- }
-})();
-
-(function testImportExportedFunction() {
- // See https://crbug.com/860392.
- print(arguments.callee.name);
- let instance0 = (() => {
- let builder = new WasmModuleBuilder();
- builder.addFunction('f11', kSig_i_v).addBody(wasmI32Const(11)).exportFunc();
- builder.addFunction('f17', kSig_i_v).addBody(wasmI32Const(17)).exportFunc();
- return builder.instantiate();
- })();
-
- let builder = new WasmModuleBuilder();
- let sig_i_v = builder.addType(kSig_i_v);
- let f11_imp = builder.addImport('q', 'f11', sig_i_v);
- let f17_imp = builder.addImport('q', 'f17', sig_i_v);
- let add = builder.addFunction('add', sig_i_v).addBody([
- kExprCallFunction, f11_imp, // call f11
- kExprCallFunction, f17_imp, // call f17
- kExprI32Add // i32.add
- ]).exportFunc();
- let instance = builder.instantiate(
- {q: {f11: instance0.exports.f11, f17: instance0.exports.f17}});
-
- assertEquals(28, instance.exports.add());
-})();
-
-(function testInfiniteRecursion() {
- print(arguments.callee.name);
- var builder = new WasmModuleBuilder();
-
- var direct = builder.addFunction('main', kSig_v_v)
- .addBody([kExprNop, kExprCallFunction, 0])
- .exportFunc();
- var instance = builder.instantiate();
-
- try {
- instance.exports.main();
- assertUnreachable("should throw");
- } catch (e) {
- if (!(e instanceof RangeError)) throw e;
- checkStack(stripPath(e.stack), [
- 'RangeError: Maximum call stack size exceeded',
- ' at main (<anonymous>:wasm-function[0]:0x20)'
- ].concat(Array(9).fill(' at main (<anonymous>:wasm-function[0]:0x22)')));
- }
-})();
-
-(function testUnwindSingleActivation() {
- print(arguments.callee.name);
- // Create two activations and unwind just the top one.
- var builder = new WasmModuleBuilder();
-
- function MyError(i) {
- this.i = i;
- }
-
- // We call wasm -> func 1 -> wasm -> func2.
- // func2 throws, func 1 catches.
- function func1() {
- try {
- return instance.exports.foo();
- } catch (e) {
- if (!(e instanceof MyError)) throw e;
- return e.i + 2;
- }
- }
- function func2() {
- throw new MyError(11);
- }
- var imp1 = builder.addImport('mod', 'func1', kSig_i_v);
- var imp2 = builder.addImport('mod', 'func2', kSig_v_v);
- builder.addFunction('main', kSig_i_v)
- .addBody([kExprCallFunction, imp1, kExprI32Const, 2, kExprI32Mul])
- .exportFunc();
- builder.addFunction('foo', kSig_v_v)
- .addBody([kExprCallFunction, imp2])
- .exportFunc();
- var instance = builder.instantiate({mod: {func1: func1, func2: func2}});
-
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- assertEquals(2 * (11 + 2), instance.exports.main());
- assertEquals(interpreted_before + 2, %WasmNumInterpretedCalls(instance));
-})();
-
-(function testInterpreterGC() {
- print(arguments.callee.name);
- function run(f) {
- // wrap the creation in a closure so that the only thing returned is
- // the module (i.e. the underlying array buffer of wasm wire bytes dies).
- var module = (() => {
- var builder = new WasmModuleBuilder();
- var imp = builder.addImport('mod', 'the_name_of_my_import', kSig_i_i);
- builder.addFunction('main', kSig_i_i)
- .addBody([kExprLocalGet, 0, kExprCallFunction, imp])
- .exportAs('main');
- print('module');
- return new WebAssembly.Module(builder.toBuffer());
- })();
-
- gc();
- for (var i = 0; i < 10; i++) {
- print(' instance ' + i);
- var instance =
- new WebAssembly.Instance(module, {'mod': {the_name_of_my_import: f}});
- var g = instance.exports.main;
- assertEquals('function', typeof g);
- for (var j = 0; j < 10; j++) {
- assertEquals(f(j), g(j));
- }
- }
- }
-
- for (var i = 0; i < 3; i++) {
- run(x => (x + 19));
- run(x => (x - 18));
- }
-})();
-
-(function testImportThrowsOnToNumber() {
- print(arguments.callee.name);
- const builder = new WasmModuleBuilder();
- const imp_idx = builder.addImport('mod', 'func', kSig_i_v);
- builder.addFunction('main', kSig_i_v)
- .addBody([kExprCallFunction, imp_idx])
- .exportFunc();
- var num_callback_calls = 0;
- const callback = () => {
- ++num_callback_calls;
- return Symbol()
- };
- var instance = builder.instantiate({mod: {func: callback}});
- // Test that this does not mess up internal state by executing it three times.
- for (var i = 0; i < 3; ++i) {
- var interpreted_before = %WasmNumInterpretedCalls(instance);
- assertThrows(
- () => instance.exports.main(), TypeError,
- 'Cannot convert a Symbol value to a number');
- assertEquals(interpreted_before + 1, %WasmNumInterpretedCalls(instance));
- assertEquals(i + 1, num_callback_calls);
- }
-})();
-
-(function testCallWithMoreReturnsThenParams() {
- print(arguments.callee.name);
- const builder1 = new WasmModuleBuilder();
- builder1.addFunction('exp', kSig_l_v)
- .addBody([kExprI64Const, 23])
- .exportFunc();
- const exp = builder1.instantiate().exports.exp;
- const builder2 = new WasmModuleBuilder();
- const imp_idx = builder2.addImport('imp', 'func', kSig_l_v);
- builder2.addFunction('main', kSig_i_v)
- .addBody([kExprCallFunction, imp_idx, kExprI32ConvertI64])
- .exportFunc();
- const instance = builder2.instantiate({imp: {func: exp}});
- assertEquals(23, instance.exports.main());
-})();
-
-(function testTableCall() {
- print(arguments.callee.name);
- const builder1 = new WasmModuleBuilder();
- builder1.addFunction('func', kSig_v_v).addBody([]).exportFunc();
- const instance1 = builder1.instantiate();
- const table = new WebAssembly.Table({element: 'anyfunc', initial: 2});
-
- const builder2 = new WasmModuleBuilder()
- builder2.addImportedTable('m', 'table');
- const sig = builder2.addType(kSig_v_v);
- builder2.addFunction('call_func', kSig_v_v)
- .addBody([kExprI32Const, 0, kExprCallIndirect, sig, kTableZero])
- .exportFunc();
- const instance2 = builder2.instantiate({m: {table: table}});
- table.set(0, instance1.exports.func);
- instance2.exports.call_func();
-})();
-
-(function testTableCall2() {
- // See crbug.com/787910.
- print(arguments.callee.name);
- const builder1 = new WasmModuleBuilder();
- builder1.addFunction('exp', kSig_i_i)
- .addBody([kExprI32Const, 0])
- .exportFunc();
- const instance1 = builder1.instantiate();
- const builder2 = new WasmModuleBuilder();
- const sig1 = builder2.addType(kSig_i_v);
- const sig2 = builder2.addType(kSig_i_i);
- builder2.addFunction('call2', kSig_i_v)
- .addBody([
- kExprI32Const, 0, kExprI32Const, 0, kExprCallIndirect, sig2, kTableZero
- ])
- .exportAs('call2');
- builder2.addImportedTable('imp', 'table');
- const tab = new WebAssembly.Table({
- element: 'anyfunc',
- initial: 3,
- });
- const instance2 = builder2.instantiate({imp: {table: tab}});
- tab.set(0, instance1.exports.exp);
- instance2.exports.call2();
-})();
-
-(function testTableCall3() {
- // See crbug.com/814562.
- print(arguments.callee.name);
- const builder0 = new WasmModuleBuilder();
- const sig_index = builder0.addType(kSig_i_v);
- builder0.addFunction('main', kSig_i_i)
- .addBody([
- kExprLocalGet, 0, // --
- kExprCallIndirect, sig_index, kTableZero
- ]) // --
- .exportAs('main');
- builder0.setTableBounds(3, 3);
- builder0.addExportOfKind('table', kExternalTable);
- const module0 = new WebAssembly.Module(builder0.toBuffer());
- const instance0 = new WebAssembly.Instance(module0);
-
- const builder1 = new WasmModuleBuilder();
- builder1.addFunction('main', kSig_i_v).addBody([kExprUnreachable]);
- builder1.addImportedTable('z', 'table');
- builder1.addElementSegment(0, 0, false, [0]);
- const module1 = new WebAssembly.Module(builder1.toBuffer());
- const instance1 =
- new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
- assertThrows(
- () => instance0.exports.main(0), WebAssembly.RuntimeError, 'unreachable');
-})();
-
-(function testSerializeInterpreted() {
- print(arguments.callee.name);
- const builder = new WasmModuleBuilder();
- builder.addFunction('main', kSig_i_i)
- .addBody([kExprLocalGet, 0, kExprI32Const, 7, kExprI32Add])
- .exportFunc();
-
- const wire_bytes = builder.toBuffer();
- var module = new WebAssembly.Module(wire_bytes);
- const i1 = new WebAssembly.Instance(module);
-
- assertEquals(11, i1.exports.main(4));
-
- const buff = %SerializeWasmModule(module);
- module = null;
- gc();
-
- module = %DeserializeWasmModule(buff, wire_bytes);
- const i2 = new WebAssembly.Instance(module);
-
- assertEquals(11, i2.exports.main(4));
-})();
diff --git a/deps/v8/test/mjsunit/wasm/multi-value-interpreter.js b/deps/v8/test/mjsunit/wasm/multi-value-interpreter.js
deleted file mode 100644
index 53f5cfbbde..0000000000
--- a/deps/v8/test/mjsunit/wasm/multi-value-interpreter.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-wasm-mv --wasm-interpret-all
-
-load("test/mjsunit/wasm/multi-value.js");
diff --git a/deps/v8/test/mjsunit/wasm/nullref-interpreter.js b/deps/v8/test/mjsunit/wasm/nullref-interpreter.js
deleted file mode 100644
index f0411394a7..0000000000
--- a/deps/v8/test/mjsunit/wasm/nullref-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --experimental-wasm-anyref -experimental-wasm-eh
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/nullref.js");
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index 80a41481aa..50f795f770 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -395,5 +395,5 @@ function testErrorPosition(bytes, pos, message) {
]);
let pos = bytes.length - 1 - 1;
- testErrorPositionAsyncOnly(bytes, pos, 'invalid local type');
+ testErrorPositionAsyncOnly(bytes, pos, 'invalid value type');
})();
diff --git a/deps/v8/test/mjsunit/wasm/table-access-interpreter.js b/deps/v8/test/mjsunit/wasm/table-access-interpreter.js
deleted file mode 100644
index 561ac5aca4..0000000000
--- a/deps/v8/test/mjsunit/wasm/table-access-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-anyref
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/table-access.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-fill-interpreter.js b/deps/v8/test/mjsunit/wasm/table-fill-interpreter.js
deleted file mode 100644
index ed9c48b406..0000000000
--- a/deps/v8/test/mjsunit/wasm/table-fill-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-anyref
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/table-fill.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js
deleted file mode 100644
index 15bbc63a21..0000000000
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --experimental-wasm-anyref
-// Flags: --wasm-interpret-all
-
-// This is just a wrapper for an existing reference types test case that runs
-// with the --wasm-interpret-all flag added. If we ever decide to add a test
-// variant for this, then we can remove this file.
-
-load("test/mjsunit/wasm/table-grow-from-wasm.js");
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 845236cf2e..fc6ce9724c 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -468,15 +468,21 @@ let kExprI64AtomicCompareExchange32U = 0x4e;
// Simd opcodes.
let kExprS128LoadMem = 0x00;
-let kExprS128StoreMem = 0x01;
-let kExprI32x4Splat = 0x0c;
-let kExprF32x4Splat = 0x12;
-let kExprI32x4Eq = 0x2c;
-let kExprS1x8AnyTrue = 0x63;
-let kExprS1x4AllTrue = 0x75;
-let kExprI32x4Add = 0x79;
-let kExprF32x4Min = 0x9e;
-let kExprS8x16LoadSplat = 0xc2;
+let kExprS128StoreMem = 0x0b;
+let kExprS8x16Shuffle = 0x0d;
+let kExprI8x16Splat = 0x0f;
+let kExprI16x8Splat = 0x10;
+let kExprI32x4Splat = 0x11;
+let kExprF32x4Splat = 0x13;
+let kExprI8x16LtU = 0x26;
+let kExprI8x16LeU = 0x2a;
+let kExprI32x4Eq = 0x37;
+let kExprS1x16AnyTrue = 0x62;
+let kExprS1x16AllTrue = 0x63;
+let kExprI8x16Add = 0x6e;
+let kExprI16x8ShrS = [0x8c, 01];
+let kExprS1x4AnyTrue = 0xa2;
+let kExprF32x4Min = 0xe8;
// Compilation hint constants.
let kCompilationHintStrategyDefault = 0x00;
@@ -484,9 +490,8 @@ let kCompilationHintStrategyLazy = 0x01;
let kCompilationHintStrategyEager = 0x02;
let kCompilationHintStrategyLazyBaselineEagerTopTier = 0x03;
let kCompilationHintTierDefault = 0x00;
-let kCompilationHintTierInterpreter = 0x01;
-let kCompilationHintTierBaseline = 0x02;
-let kCompilationHintTierOptimized = 0x03;
+let kCompilationHintTierBaseline = 0x01;
+let kCompilationHintTierOptimized = 0x02;
let kTrapUnreachable = 0;
let kTrapMemOutOfBounds = 1;
diff --git a/deps/v8/test/mjsunit/wasm/worker-interpreter.js b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
deleted file mode 100644
index 276ff079b5..0000000000
--- a/deps/v8/test/mjsunit/wasm/worker-interpreter.js
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-gc
-
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-(function TestPostInterpretedModule() {
- let builder = new WasmModuleBuilder();
- let add = builder.addFunction("add", kSig_i_ii)
- .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
- .exportFunc();
-
- // Trigger a GC to ensure that the underlying native module is not a cached
- // one from a previous run, with functions already redirected to the
- // interpreter. This is not observable from pure JavaScript, but this is
- // observable with the internal runtime functions used in this test.
- gc();
-
- let module = builder.toModule();
- let instance = new WebAssembly.Instance(module);
- let exp = instance.exports;
-
- let workerScript = `
- var instance = null;
- onmessage = function(message) {
- try {
- if (message.command == 'module') {
- instance = new WebAssembly.Instance(message.module);
- postMessage('OK');
- }
- if (message.command == 'call') {
- let result = instance.exports.add(40, 2);
- postMessage(result);
- }
- } catch(e) {
- postMessage('ERROR: ' + e);
- }
- }
- `;
- let worker = new Worker(workerScript, {type: 'string'});
-
- // Call method without using the interpreter.
- var initial_interpreted = %WasmNumInterpretedCalls(instance);
- assertEquals(23, exp.add(20, 3));
- assertEquals(initial_interpreted + 0, %WasmNumInterpretedCalls(instance));
-
- // Send module to the worker, still not interpreting.
- worker.postMessage({ command:'module', module:module });
- assertEquals('OK', worker.getMessage());
- worker.postMessage({ command:'call' });
- assertEquals(42, worker.getMessage());
- assertEquals(initial_interpreted + 0, %WasmNumInterpretedCalls(instance));
-
- // Switch to the interpreter and call method.
- %RedirectToWasmInterpreter(instance, add.index);
- assertEquals(23, exp.add(20, 3));
- assertEquals(initial_interpreted + 1, %WasmNumInterpretedCalls(instance));
-
- // Let worker call interpreted function.
- worker.postMessage({ command:'call' });
- assertEquals(42, worker.getMessage());
- assertEquals(initial_interpreted + 1, %WasmNumInterpretedCalls(instance));
-
- // All done.
- worker.terminate();
-})();
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index cb556331bd..37fb66884f 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -1063,4 +1063,9 @@
'*': [SKIP],
}], # variant == no_wasm_traps
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/test262/BUILD.gn b/deps/v8/test/test262/BUILD.gn
index 093e489df0..c3d71866d2 100644
--- a/deps/v8/test/test262/BUILD.gn
+++ b/deps/v8/test/test262/BUILD.gn
@@ -17,6 +17,7 @@ group("v8_test262") {
"harness-adapt.js",
"harness-adapt-donotevaluate.js",
"harness-agent.js",
+ "harness-ishtmldda.js",
"test262.status",
"testcfg.py",
"local-tests/",
diff --git a/deps/v8/test/test262/harness-ishtmldda.js b/deps/v8/test/test262/harness-ishtmldda.js
new file mode 100644
index 0000000000..bcbadf6dc6
--- /dev/null
+++ b/deps/v8/test/test262/harness-ishtmldda.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+$262.IsHTMLDDA = %GetUndetectable();
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index c8603bd4fb..6057d3f74c 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -62,16 +62,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4709
'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
- # Intl tests which require flags.
- # https://bugs.chromium.org/p/v8/issues/detail?id=9154
- 'intl402/NumberFormat/numbering-system-options': ['--harmony-intl-add-calendar-numbering-system'],
- 'intl402/DateTimeFormat/constructor-calendar-numberingSystem-order': ['--harmony-intl-add-calendar-numbering-system'],
- 'intl402/DateTimeFormat/numbering-system-calendar-options': ['--harmony-intl-add-calendar-numbering-system'],
- 'intl402/DateTimeFormat/constructor-options-throwing-getters': ['--harmony-intl-add-calendar-numbering-system'],
- 'intl402/NumberFormat/constructor-options-throwing-getters': ['--harmony-intl-add-calendar-numbering-system'],
- 'intl402/NumberFormat/constructor-numberingSystem-order': ['--harmony-intl-add-calendar-numbering-system'],
- 'intl402/DateTimeFormat/prototype/formatToParts/pattern-on-calendar': ['--harmony-intl-other-calendars'],
- 'intl402/DateTimeFormat/prototype/formatToParts/related-year': ['--harmony-intl-other-calendars'],
+ # https://github.com/tc39/test262/issues/2591
+ 'intl402/DateTimeFormat/prototype/resolvedOptions/order': ['--no-harmony-intl_dateformat_fractional_second_digits'],
+ 'intl402/DateTimeFormat/prototype/resolvedOptions/order-dayPeriod': ['--no-harmony-intl_dateformat_fractional_second_digits'],
# https://bugs.chromium.org/p/v8/issues/detail?id=9084
'intl402/supportedLocalesOf-consistent-with-resolvedOptions': [FAIL],
@@ -364,11 +357,8 @@
'language/literals/regexp/named-groups/invalid-punctuator-starting-groupspecifier-u': [FAIL_PHASE_ONLY],
'language/literals/regexp/named-groups/invalid-punctuator-within-groupspecifier': [FAIL_PHASE_ONLY],
'language/literals/regexp/named-groups/invalid-punctuator-within-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-u-escape-in-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-u-escape-in-groupspecifier-2': [FAIL_PHASE_ONLY],
'language/literals/regexp/named-groups/invalid-unterminated-groupspecifier': [FAIL_PHASE_ONLY],
'language/literals/regexp/named-groups/invalid-unterminated-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-dec-esc': [FAIL_PHASE_ONLY],
'language/literals/regexp/u-invalid-class-escape': [FAIL_PHASE_ONLY],
'language/literals/regexp/u-invalid-extended-pattern-char': [FAIL_PHASE_ONLY],
'language/literals/regexp/u-invalid-identity-escape': [FAIL_PHASE_ONLY],
@@ -390,6 +380,9 @@
'language/literals/regexp/u-unicode-esc-non-hex': [FAIL_PHASE_ONLY],
'language/literals/regexp/unicode-escape-nls-err': [FAIL_PHASE_ONLY],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=10379
+ 'built-ins/RegExp/named-groups/non-unicode-property-names-valid': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=4628
'language/eval-code/direct/non-definable-function-with-function': [FAIL],
'language/eval-code/direct/non-definable-function-with-variable': [FAIL],
@@ -451,11 +444,6 @@
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-arguments': [FAIL_SLOPPY],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7184
- 'annexB/language/expressions/yield/star-iterable-return-emulates-undefined-throws-when-called': [FAIL],
- 'annexB/language/statements/for-await-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
- 'annexB/language/statements/for-of/iterator-close-return-emulates-undefined-throws-when-called': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5690
'language/expressions/call/eval-spread': [FAIL],
'language/expressions/call/eval-spread-empty-leading': [FAIL],
@@ -501,23 +489,27 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=9049
'language/comments/hashbang/use-strict': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=8179
- 'built-ins/FinalizationRegistry/FinalizationRegistryCleanupIteratorPrototype/next-job-not-active-throws': [FAIL],
- 'built-ins/FinalizationRegistry/prototype/cleanupSome/cleanup-throws-in-callback': [FAIL],
- 'built-ins/FinalizationRegistry/prototype/cleanupSome/poisoned-callback-throws': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=10313
- 'built-ins/Date/parse/without-utc-offset': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=9613
'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
'intl402/Intl/getCanonicalLocales/grandfathered': [FAIL],
- 'intl402/Intl/getCanonicalLocales/non-iana-canon': [FAIL],
'intl402/Intl/getCanonicalLocales/preferred-grandfathered': [FAIL],
'intl402/Intl/getCanonicalLocales/preferred-variant': [FAIL],
- 'intl402/Locale/constructor-non-iana-canon': [FAIL],
+ 'intl402/Locale/constructor-apply-options-canonicalizes-twice': [FAIL],
'intl402/Locale/likely-subtags-grandfathered': [FAIL],
+ # http://crbug/v8/10447
+ 'intl402/Intl/getCanonicalLocales/complex-language-subtag-replacement': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/complex-region-subtag-replacement': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/transformed-ext-canonical': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/transformed-ext-invalid': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/unicode-ext-canonicalize-region': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/unicode-ext-canonicalize-subdivision': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/unicode-ext-canonicalize-yes-to-true': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/unicode-ext-key-with-digit': [FAIL],
+
+ # http://crbug/v8/10448
+ 'intl402/Locale/prototype/minimize/removing-likely-subtags-first-adds-likely-subtags': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=9646
'built-ins/ThrowTypeError/name': [FAIL],
'language/expressions/class/name': [FAIL],
@@ -528,34 +520,79 @@
# https://github.com/tc39/test262/pull/2349
'intl402/Locale/constructor-options-region-valid': [FAIL],
+ # http://crbug/v8/10443
+ 'intl402/RelativeTimeFormat/prototype/format/pl-pl-style-long': [FAIL],
+ 'intl402/RelativeTimeFormat/prototype/format/pl-pl-style-narrow': [FAIL],
+ 'intl402/RelativeTimeFormat/prototype/format/pl-pl-style-short': [FAIL],
+ 'intl402/RelativeTimeFormat/prototype/formatToParts/pl-pl-style-long': [FAIL],
+ 'intl402/RelativeTimeFormat/prototype/formatToParts/pl-pl-style-narrow': [FAIL],
+ 'intl402/RelativeTimeFormat/prototype/formatToParts/pl-pl-style-short': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=9818
'built-ins/AsyncFunction/proto-from-ctor-realm': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9808
- 'built-ins/AggregateError/*': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=10111
# super() should evaluate arguments before checking IsConstructable
'language/expressions/super/call-proto-not-ctor': [FAIL],
- # Intl.NumberFormat(..., {signDisplay:'exceptZero'}).format(0.0001)
- # produce +0 due to rounding
- # https://bugs.chromium.org/p/v8/issues/detail?id=9515
- 'intl402/NumberFormat/prototype/format/signDisplay-en-US': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-ja-JP': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-ko-KR': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-zh-TW': [FAIL],
- 'intl402/NumberFormat/prototype/format/signDisplay-rounding': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-en-US': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-de-DE': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-ja-JP': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-ko-KR': [FAIL],
- 'intl402/NumberFormat/prototype/formatToParts/signDisplay-zh-TW': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=10272
- 'intl402/DateTimeFormat/invalid-numbering-system-calendar-options': [FAIL],
- 'intl402/NumberFormat/invalid-numbering-system-options': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=10381
+ 'built-ins/Array/prototype/concat/arg-length-near-integer-limit': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=10383
+ 'built-ins/RegExp/prototype/Symbol.replace/fn-invoke-args-empty-result': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=10395
+ 'built-ins/AsyncFromSyncIteratorPrototype/next/absent-value-not-passed': [FAIL],
+ 'built-ins/AsyncFromSyncIteratorPrototype/return/absent-value-not-passed': [FAIL],
+
+ # http://crbug/v8/10449
+ 'built-ins/Atomics/waitAsync/bad-range': [FAIL],
+ 'built-ins/Atomics/waitAsync/descriptor': [FAIL],
+ 'built-ins/Atomics/waitAsync/false-for-timeout': [FAIL],
+ 'built-ins/Atomics/waitAsync/is-function': [FAIL],
+ 'built-ins/Atomics/waitAsync/length': [FAIL],
+ 'built-ins/Atomics/waitAsync/name': [FAIL],
+ 'built-ins/Atomics/waitAsync/nan-for-timeout-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/negative-index-throws': [FAIL],
+ 'built-ins/Atomics/waitAsync/negative-timeout': [FAIL],
+ 'built-ins/Atomics/waitAsync/null-for-timeout': [FAIL],
+ 'built-ins/Atomics/waitAsync/object-for-timeout': [FAIL],
+ 'built-ins/Atomics/waitAsync/poisoned-object-for-timeout-throws-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/returns-result-object-value-is-promise-resolves-to-ok': [FAIL],
+ 'built-ins/Atomics/waitAsync/returns-result-object-value-is-promise-resolves-to-timed-out': [FAIL],
+ 'built-ins/Atomics/waitAsync/returns-result-object-value-is-string-not-equal': [FAIL],
+ 'built-ins/Atomics/waitAsync/returns-result-object-value-is-string-timed-out': [FAIL],
+ 'built-ins/Atomics/waitAsync/symbol-for-index-throws': [FAIL],
+ 'built-ins/Atomics/waitAsync/symbol-for-index-throws-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/symbol-for-timeout-throws': [FAIL],
+ 'built-ins/Atomics/waitAsync/symbol-for-timeout-throws-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/symbol-for-value-throws': [FAIL],
+ 'built-ins/Atomics/waitAsync/symbol-for-value-throws-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/true-for-timeout': [FAIL],
+ 'built-ins/Atomics/waitAsync/undefined-for-timeout-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/undefined-for-timeout': [FAIL],
+ 'built-ins/Atomics/waitAsync/undefined-index-defaults-to-zero-agent': [FAIL],
+ 'built-ins/Atomics/waitAsync/value-not-equal': [FAIL],
+ 'built-ins/Atomics/waitAsync/waiterlist-block-indexedposition-wake': [FAIL],
+ 'built-ins/Atomics/waitAsync/was-woken-before-timeout': [FAIL],
+
+ # SKIP the following TIMEOUT tests instead of FAIL
+ 'built-ins/Atomics/waitAsync/false-for-timeout-agent': [SKIP],
+ 'built-ins/Atomics/waitAsync/good-views': [SKIP],
+ 'built-ins/Atomics/waitAsync/negative-timeout-agent': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-no-operation': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-add': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-and': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-compareExchange': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-exchange': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-or': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-store': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-sub': [SKIP],
+ 'built-ins/Atomics/waitAsync/no-spurious-wakeup-on-xor': [SKIP],
+ 'built-ins/Atomics/waitAsync/null-for-timeout-agent': [SKIP],
+ 'built-ins/Atomics/waitAsync/object-for-timeout-agent': [SKIP],
+ 'built-ins/Atomics/waitAsync/true-for-timeout-agent': [SKIP],
+ 'built-ins/Atomics/waitAsync/value-not-equal-agent': [SKIP],
######################## NEEDS INVESTIGATION ###########################
@@ -572,7 +609,10 @@
# https://github.com/tc39/ecma262/pull/889
'annexB/language/function-code/block-decl-func-skip-arguments': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6538
+ # Non-simple assignment targets are runtime errors instead of syntax errors for web compat.
+ 'language/expressions/logical-assignment/lgcl-or-assignment-operator-non-simple-lhs': [FAIL],
+ 'language/expressions/logical-assignment/lgcl-and-assignment-operator-non-simple-lhs': [FAIL],
+ 'language/expressions/logical-assignment/lgcl-nullish-assignment-operator-non-simple-lhs': [FAIL],
############################ INVALID TESTS #############################
@@ -630,6 +670,9 @@
# Unicode regexp case mapping is not available with i18n turned off.
'language/literals/regexp/u-case-mapping': [SKIP],
+ # Unicode in capture group
+ 'built-ins/RegExp/prototype/Symbol.replace/named-groups': [FAIL],
+
# BUG(v8:4437).
'built-ins/String/prototype/normalize/return-normalized-string': [SKIP],
'built-ins/String/prototype/normalize/return-normalized-string-from-coerced-form': [SKIP],
@@ -656,7 +699,18 @@
# Unicode features unavaible without i18n, ie property escapes.
'built-ins/RegExp/property-escapes/*': [SKIP],
'built-ins/RegExp/named-groups/unicode-property-names': [SKIP],
+ 'built-ins/RegExp/named-groups/unicode-property-names-valid': [SKIP],
'built-ins/RegExp/match-indices/indices-array-unicode-property-names': [SKIP],
+
+ # Unicode in identifiers
+ 'language/identifiers/part-unicode-*': [FAIL],
+ 'language/identifiers/start-unicode-1*': [FAIL],
+ 'language/identifiers/start-unicode-5*': [FAIL],
+ 'language/identifiers/start-unicode-6*': [FAIL],
+ 'language/identifiers/start-unicode-7*': [FAIL],
+ 'language/identifiers/start-unicode-8*': [FAIL],
+ 'language/identifiers/start-unicode-9*': [FAIL],
+
}], # no_i18n == True
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
@@ -719,4 +773,9 @@
'intl402/DateTimeFormat/prototype/resolvedOptions/basic': [SKIP],
}], # system == windows
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index e1f1956392..9f1d4b4e4e 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -48,22 +48,23 @@ FEATURE_FLAGS = {
'Intl.DateTimeFormat-dayPeriod': '--harmony-intl-dateformat-day-period',
'Intl.DateTimeFormat-quarter': '--harmony-intl-dateformat-quarter',
'Intl.DateTimeFormat-fractionalSecondDigits': '--harmony-intl-dateformat-fractional-second-digits',
- 'Intl.DisplayNames': '--harmony-intl-displaynames',
'String.prototype.replaceAll': '--harmony_string_replaceall',
'Symbol.prototype.description': '--harmony-symbol-description',
'export-star-as-namespace-from-module': '--harmony-namespace-exports',
'Promise.allSettled': '--harmony-promise-all-settled',
- 'FinalizationRegistry': '--harmony-weak-refs',
- 'WeakRef': '--harmony-weak-refs',
+ 'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
+ 'WeakRef': '--harmony-weak-refs-with-cleanup-some',
'host-gc-required': '--expose-gc-as=v8GC',
- 'optional-chaining': '--harmony-optional-chaining',
+ 'IsHTMLDDA': '--allow-natives-syntax',
'top-level-await': '--harmony-top-level-await',
'regexp-match-indices': '--harmony-regexp-match-indices',
# https://github.com/tc39/test262/pull/2395
'regexp-named-groups': '--harmony-regexp-match-indices',
'class-methods-private': '--harmony-private-methods',
'class-static-methods-private': '--harmony-private-methods',
- 'coalesce-expression': '--harmony-nullish',
+ 'AggregateError': '--harmony-promise-any',
+ 'logical-assignment-operators': '--harmony-logical-assignment',
+ 'Promise.any': '--harmony-promise-any',
}
SKIPPED_FEATURES = set([])
@@ -206,6 +207,8 @@ class TestCase(testcase.D8TestCase):
list(self.suite.harness) +
([os.path.join(self.suite.root, "harness-agent.js")]
if self.__needs_harness_agent() else []) +
+ ([os.path.join(self.suite.root, "harness-ishtmldda.js")]
+ if "IsHTMLDDA" in self.test_record.get("features", []) else []) +
([os.path.join(self.suite.root, "harness-adapt-donotevaluate.js")]
if self.fail_phase_only and not self._fail_phase_reverse else []) +
self._get_includes() +
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 0693878cce..8db908bc33 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -2,1226 +2,1231 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Test line comment
+/* Test mulitline
+ comment
+*/
+
namespace test {
- macro ElementsKindTestHelper1(kind: constexpr ElementsKind): bool {
- if constexpr (
- (kind == ElementsKind::UINT8_ELEMENTS) ||
- (kind == ElementsKind::UINT16_ELEMENTS)) {
- return true;
- } else {
- return false;
- }
+macro ElementsKindTestHelper1(kind: constexpr ElementsKind): bool {
+ if constexpr (
+ (kind == ElementsKind::UINT8_ELEMENTS) ||
+ (kind == ElementsKind::UINT16_ELEMENTS)) {
+ return true;
+ } else {
+ return false;
}
+}
- macro ElementsKindTestHelper2(kind: constexpr ElementsKind): constexpr bool {
- return (
- (kind == ElementsKind::UINT8_ELEMENTS) ||
- (kind == ElementsKind::UINT16_ELEMENTS));
- }
+macro ElementsKindTestHelper2(kind: constexpr ElementsKind): constexpr bool {
+ return (
+ (kind == ElementsKind::UINT8_ELEMENTS) ||
+ (kind == ElementsKind::UINT16_ELEMENTS));
+}
- macro LabelTestHelper1(): never
- labels Label1 {
- goto Label1;
- }
+macro LabelTestHelper1(): never
+ labels Label1 {
+ goto Label1;
+}
- macro LabelTestHelper2(): never
- labels Label2(Smi) {
- goto Label2(42);
- }
+macro LabelTestHelper2(): never
+ labels Label2(Smi) {
+ goto Label2(42);
+}
- macro LabelTestHelper3(): never
- labels Label3(Oddball, Smi) {
- goto Label3(Null, 7);
- }
+macro LabelTestHelper3(): never
+ labels Label3(Oddball, Smi) {
+ goto Label3(Null, 7);
+}
- @export
- macro TestConstexpr1() {
- check(FromConstexpr<bool>(
- IsFastElementsKind(ElementsKind::PACKED_SMI_ELEMENTS)));
- }
+@export
+macro TestConstexpr1() {
+ check(FromConstexpr<bool>(
+ IsFastElementsKind(ElementsKind::PACKED_SMI_ELEMENTS)));
+}
- @export
- macro TestConstexprIf() {
- check(ElementsKindTestHelper1(ElementsKind::UINT8_ELEMENTS));
- check(ElementsKindTestHelper1(ElementsKind::UINT16_ELEMENTS));
- check(!ElementsKindTestHelper1(ElementsKind::UINT32_ELEMENTS));
- }
+@export
+macro TestConstexprIf() {
+ check(ElementsKindTestHelper1(ElementsKind::UINT8_ELEMENTS));
+ check(ElementsKindTestHelper1(ElementsKind::UINT16_ELEMENTS));
+ check(!ElementsKindTestHelper1(ElementsKind::UINT32_ELEMENTS));
+}
- @export
- macro TestConstexprReturn() {
- check(FromConstexpr<bool>(
- ElementsKindTestHelper2(ElementsKind::UINT8_ELEMENTS)));
- check(FromConstexpr<bool>(
- ElementsKindTestHelper2(ElementsKind::UINT16_ELEMENTS)));
- check(!FromConstexpr<bool>(
- ElementsKindTestHelper2(ElementsKind::UINT32_ELEMENTS)));
- check(FromConstexpr<bool>(
- !ElementsKindTestHelper2(ElementsKind::UINT32_ELEMENTS)));
- }
+@export
+macro TestConstexprReturn() {
+ check(FromConstexpr<bool>(
+ ElementsKindTestHelper2(ElementsKind::UINT8_ELEMENTS)));
+ check(FromConstexpr<bool>(
+ ElementsKindTestHelper2(ElementsKind::UINT16_ELEMENTS)));
+ check(!FromConstexpr<bool>(
+ ElementsKindTestHelper2(ElementsKind::UINT32_ELEMENTS)));
+ check(FromConstexpr<bool>(
+ !ElementsKindTestHelper2(ElementsKind::UINT32_ELEMENTS)));
+}
- @export
- macro TestGotoLabel(): Boolean {
- try {
- LabelTestHelper1() otherwise Label1;
- }
- label Label1 {
- return True;
- }
+@export
+macro TestGotoLabel(): Boolean {
+ try {
+ LabelTestHelper1() otherwise Label1;
+ } label Label1 {
+ return True;
}
+}
- @export
- macro TestGotoLabelWithOneParameter(): Boolean {
- try {
- LabelTestHelper2() otherwise Label2;
- }
- label Label2(smi: Smi) {
- check(smi == 42);
- return True;
- }
+@export
+macro TestGotoLabelWithOneParameter(): Boolean {
+ try {
+ LabelTestHelper2() otherwise Label2;
+ } label Label2(smi: Smi) {
+ check(smi == 42);
+ return True;
}
+}
- @export
- macro TestGotoLabelWithTwoParameters(): Boolean {
- try {
- LabelTestHelper3() otherwise Label3;
- }
- label Label3(o: Oddball, smi: Smi) {
- check(o == Null);
- check(smi == 7);
- return True;
- }
+@export
+macro TestGotoLabelWithTwoParameters(): Boolean {
+ try {
+ LabelTestHelper3() otherwise Label3;
+ } label Label3(o: Oddball, smi: Smi) {
+ check(o == Null);
+ check(smi == 7);
+ return True;
}
+}
- builtin GenericBuiltinTest<T: type>(_c: Context, _param: T): JSAny {
- return Null;
- }
+builtin GenericBuiltinTest<T: type>(_param: T): JSAny {
+ return Null;
+}
- GenericBuiltinTest<JSAny>(_c: Context, param: JSAny): JSAny {
- return param;
- }
+GenericBuiltinTest<JSAny>(param: JSAny): JSAny {
+ return param;
+}
- @export
- macro TestBuiltinSpecialization(c: Context) {
- check(GenericBuiltinTest<Smi>(c, 0) == Null);
- check(GenericBuiltinTest<Smi>(c, 1) == Null);
- check(GenericBuiltinTest<JSAny>(c, Undefined) == Undefined);
- check(GenericBuiltinTest<JSAny>(c, Undefined) == Undefined);
- }
+@export
+macro TestBuiltinSpecialization() {
+ check(GenericBuiltinTest<Smi>(0) == Null);
+ check(GenericBuiltinTest<Smi>(1) == Null);
+ check(GenericBuiltinTest<JSAny>(Undefined) == Undefined);
+ check(GenericBuiltinTest<JSAny>(Undefined) == Undefined);
+}
- macro LabelTestHelper4(flag: constexpr bool): never
- labels Label4, Label5 {
- if constexpr (flag) {
- goto Label4;
- } else {
- goto Label5;
- }
+macro LabelTestHelper4(flag: constexpr bool): never
+ labels Label4, Label5 {
+ if constexpr (flag) {
+ goto Label4;
+ } else {
+ goto Label5;
}
+}
- macro CallLabelTestHelper4(flag: constexpr bool): bool {
- try {
- LabelTestHelper4(flag) otherwise Label4, Label5;
- }
- label Label4 {
- return true;
- }
- label Label5 {
- return false;
- }
+macro CallLabelTestHelper4(flag: constexpr bool): bool {
+ try {
+ LabelTestHelper4(flag) otherwise Label4, Label5;
+ } label Label4 {
+ return true;
+ } label Label5 {
+ return false;
}
+}
- @export
- macro TestPartiallyUnusedLabel(): Boolean {
- const r1: bool = CallLabelTestHelper4(true);
- const r2: bool = CallLabelTestHelper4(false);
+@export
+macro TestPartiallyUnusedLabel(): Boolean {
+ const r1: bool = CallLabelTestHelper4(true);
+ const r2: bool = CallLabelTestHelper4(false);
- if (r1 && !r2) {
- return True;
- } else {
- return False;
- }
+ if (r1 && !r2) {
+ return True;
+ } else {
+ return False;
}
+}
- macro GenericMacroTest<T: type>(_param: T): Object {
- return Undefined;
- }
+macro GenericMacroTest<T: type>(_param: T): Object {
+ return Undefined;
+}
- GenericMacroTest<Object>(param2: Object): Object {
- return param2;
- }
+GenericMacroTest<Object>(param2: Object): Object {
+ return param2;
+}
- macro GenericMacroTestWithLabels<T: type>(_param: T): Object
- labels _X {
- return Undefined;
- }
+macro GenericMacroTestWithLabels<T: type>(_param: T): Object
+labels _X {
+ return Undefined;
+}
- GenericMacroTestWithLabels<Object>(param2: Object): Object
- labels Y {
- return Cast<Smi>(param2) otherwise Y;
- }
+GenericMacroTestWithLabels<Object>(param2: Object): Object
+ labels Y {
+ return Cast<Smi>(param2) otherwise Y;
+}
- @export
- macro TestMacroSpecialization() {
+@export
+macro TestMacroSpecialization() {
+ try {
+ const _smi0: Smi = 0;
+ check(GenericMacroTest<Smi>(0) == Undefined);
+ check(GenericMacroTest<Smi>(1) == Undefined);
+ check(GenericMacroTest<Object>(Null) == Null);
+ check(GenericMacroTest<Object>(False) == False);
+ check(GenericMacroTest<Object>(True) == True);
+ check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
+ check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
try {
- const _smi0: Smi = 0;
- check(GenericMacroTest<Smi>(0) == Undefined);
- check(GenericMacroTest<Smi>(1) == Undefined);
- check(GenericMacroTest<Object>(Null) == Null);
- check(GenericMacroTest<Object>(False) == False);
- check(GenericMacroTest<Object>(True) == True);
- check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
- check((GenericMacroTestWithLabels<Smi>(0) otherwise Fail) == Undefined);
- try {
- GenericMacroTestWithLabels<Object>(False) otherwise Expected;
- }
- label Expected {}
- }
- label Fail {
- unreachable;
- }
+ GenericMacroTestWithLabels<Object>(False) otherwise Expected;
+ } label Expected {}
+ } label Fail {
+ unreachable;
}
+}
- builtin TestHelperPlus1(_context: Context, x: Smi): Smi {
- return x + 1;
- }
- builtin TestHelperPlus2(_context: Context, x: Smi): Smi {
- return x + 2;
- }
+builtin TestHelperPlus1(x: Smi): Smi {
+ return x + 1;
+}
+builtin TestHelperPlus2(x: Smi): Smi {
+ return x + 2;
+}
- @export
- macro TestFunctionPointers(implicit context: Context)(): Boolean {
- let fptr: builtin(Context, Smi) => Smi = TestHelperPlus1;
- check(fptr(context, 42) == 43);
- fptr = TestHelperPlus2;
- check(fptr(context, 42) == 44);
- return True;
- }
+@export
+macro TestFunctionPointers(implicit context: Context)(): Boolean {
+ let fptr: builtin(Smi) => Smi = TestHelperPlus1;
+ check(fptr(42) == 43);
+ fptr = TestHelperPlus2;
+ check(fptr(42) == 44);
+ return True;
+}
- @export
- macro TestVariableRedeclaration(implicit context: Context)(): Boolean {
- let _var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
- let _var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
- return True;
- }
+@export
+macro TestVariableRedeclaration(implicit context: Context)(): Boolean {
+ let _var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
+ let _var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
+ return True;
+}
- @export
- macro TestTernaryOperator(x: Smi): Smi {
- const b: bool = x < 0 ? true : false;
- return b ? x - 10 : x + 100;
- }
+@export
+macro TestTernaryOperator(x: Smi): Smi {
+ const b: bool = x < 0 ? true : false;
+ return b ? x - 10 : x + 100;
+}
- @export
- macro TestFunctionPointerToGeneric(c: Context) {
- const fptr1: builtin(Context, Smi) => JSAny = GenericBuiltinTest<Smi>;
- const fptr2: builtin(Context, JSAny) => JSAny = GenericBuiltinTest<JSAny>;
+@export
+macro TestFunctionPointerToGeneric() {
+ const fptr1: builtin(Smi) => JSAny = GenericBuiltinTest<Smi>;
+ const fptr2: builtin(JSAny) => JSAny = GenericBuiltinTest<JSAny>;
- check(fptr1(c, 0) == Null);
- check(fptr1(c, 1) == Null);
- check(fptr2(c, Undefined) == Undefined);
- check(fptr2(c, Undefined) == Undefined);
- }
+ check(fptr1(0) == Null);
+ check(fptr1(1) == Null);
+ check(fptr2(Undefined) == Undefined);
+ check(fptr2(Undefined) == Undefined);
+}
- type ObjectToObject = builtin(Context, JSAny) => JSAny;
- @export
- macro TestTypeAlias(x: ObjectToObject): BuiltinPtr {
- return x;
- }
+type ObjectToObject = builtin(Context, JSAny) => JSAny;
+@export
+macro TestTypeAlias(x: ObjectToObject): BuiltinPtr {
+ return x;
+}
- @export
- macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
- if (TaggedIsSmi(n)) {
- const m: Smi = UnsafeCast<Smi>(n);
+@export
+macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
+ if (TaggedIsSmi(n)) {
+ const m: Smi = UnsafeCast<Smi>(n);
- check(TestHelperPlus1(context, m) == 11);
- return True;
- }
- return False;
+ check(TestHelperPlus1(m) == 11);
+ return True;
}
+ return False;
+}
- @export
- macro TestHexLiteral() {
- check(Convert<intptr>(0xffff) + 1 == 0x10000);
- check(Convert<intptr>(-0xffff) == -65535);
- }
+@export
+macro TestHexLiteral() {
+ check(Convert<intptr>(0xffff) + 1 == 0x10000);
+ check(Convert<intptr>(-0xffff) == -65535);
+}
- @export
- macro TestLargeIntegerLiterals(implicit c: Context)() {
- let _x: int32 = 0x40000000;
- let _y: int32 = 0x7fffffff;
- }
+@export
+macro TestLargeIntegerLiterals(implicit c: Context)() {
+ let _x: int32 = 0x40000000;
+ let _y: int32 = 0x7fffffff;
+}
- @export
- macro TestMultilineAssert() {
- const someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
- check(
- someVeryLongVariableNameThatWillCauseLineBreaks > 0 &&
- someVeryLongVariableNameThatWillCauseLineBreaks < 10);
- }
+@export
+macro TestMultilineAssert() {
+ const someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
+ check(
+ someVeryLongVariableNameThatWillCauseLineBreaks > 0 &&
+ someVeryLongVariableNameThatWillCauseLineBreaks < 10);
+}
- @export
- macro TestNewlineInString() {
- Print('Hello, World!\n');
- }
+@export
+macro TestNewlineInString() {
+ Print('Hello, World!\n');
+}
- const kConstexprConst: constexpr int31 = 5;
- const kIntptrConst: intptr = 4;
- const kSmiConst: Smi = 3;
+const kConstexprConst: constexpr int31 = 5;
+const kIntptrConst: intptr = 4;
+const kSmiConst: Smi = 3;
- @export
- macro TestModuleConstBindings() {
- check(kConstexprConst == Int32Constant(5));
- check(kIntptrConst == 4);
- check(kSmiConst == 3);
- }
+@export
+macro TestModuleConstBindings() {
+ check(kConstexprConst == Int32Constant(5));
+ check(kIntptrConst == 4);
+ check(kSmiConst == 3);
+}
- @export
- macro TestLocalConstBindings() {
- const x: constexpr int31 = 3;
+@export
+macro TestLocalConstBindings() {
+ const x: constexpr int31 = 3;
+ const xSmi: Smi = x;
+ {
+ const x: Smi = x + FromConstexpr<Smi>(1);
+ check(x == xSmi + 1);
const xSmi: Smi = x;
- {
- const x: Smi = x + FromConstexpr<Smi>(1);
- check(x == xSmi + 1);
- const xSmi: Smi = x;
- check(x == xSmi);
- check(x == 4);
- }
- check(xSmi == 3);
check(x == xSmi);
+ check(x == 4);
}
+ check(xSmi == 3);
+ check(x == xSmi);
+}
- struct TestStructA {
- indexes: FixedArray;
- i: Smi;
- k: Number;
- }
+struct TestStructA {
+ indexes: FixedArray;
+ i: Smi;
+ k: Number;
+}
- struct TestStructB {
- x: TestStructA;
- y: Smi;
- }
+struct TestStructB {
+ x: TestStructA;
+ y: Smi;
+}
- @export
- macro TestStruct1(i: TestStructA): Smi {
- return i.i;
- }
+@export
+macro TestStruct1(i: TestStructA): Smi {
+ return i.i;
+}
- @export
- macro TestStruct2(implicit context: Context)(): TestStructA {
- return TestStructA{
- indexes: UnsafeCast<FixedArray>(kEmptyFixedArray),
- i: 27,
- k: 31
- };
- }
+@export
+macro TestStruct2(implicit context: Context)(): TestStructA {
+ return TestStructA{
+ indexes: UnsafeCast<FixedArray>(kEmptyFixedArray),
+ i: 27,
+ k: 31
+ };
+}
- @export
- macro TestStruct3(implicit context: Context)(): TestStructA {
- let a: TestStructA =
- TestStructA{indexes: UnsafeCast<FixedArray>(kEmptyFixedArray), i: 13, k: 5};
- let _b: TestStructA = a;
- const c: TestStructA = TestStruct2();
- a.i = TestStruct1(c);
- a.k = a.i;
- let d: TestStructB;
- d.x = a;
- d = TestStructB{x: a, y: 7};
- let _e: TestStructA = d.x;
- let f: Smi = TestStructA{
- indexes: UnsafeCast<FixedArray>(kEmptyFixedArray),
- i: 27,
- k: 31
- }.i;
- f = TestStruct2().i;
- return a;
- }
+@export
+macro TestStruct3(implicit context: Context)(): TestStructA {
+ let a: TestStructA =
+ TestStructA{indexes: UnsafeCast<FixedArray>(kEmptyFixedArray), i: 13, k: 5};
+ let _b: TestStructA = a;
+ const c: TestStructA = TestStruct2();
+ a.i = TestStruct1(c);
+ a.k = a.i;
+ let d: TestStructB;
+ d.x = a;
+ d = TestStructB{x: a, y: 7};
+ let _e: TestStructA = d.x;
+ let f: Smi = TestStructA{
+ indexes: UnsafeCast<FixedArray>(kEmptyFixedArray),
+ i: 27,
+ k: 31
+ }.i;
+ f = TestStruct2().i;
+ return a;
+}
- struct TestStructC {
- x: TestStructA;
- y: TestStructA;
- }
+struct TestStructC {
+ x: TestStructA;
+ y: TestStructA;
+}
- @export
- macro TestStruct4(implicit context: Context)(): TestStructC {
- return TestStructC{x: TestStruct2(), y: TestStruct2()};
- }
+@export
+macro TestStruct4(implicit context: Context)(): TestStructC {
+ return TestStructC{x: TestStruct2(), y: TestStruct2()};
+}
- macro TestStructInLabel(implicit context: Context)(): never labels
- Foo(TestStructA) {
- goto Foo(TestStruct2());
- }
- @export // Silence unused warning.
- macro CallTestStructInLabel(implicit context: Context)() {
- try {
- TestStructInLabel() otherwise Foo;
+macro TestStructInLabel(implicit context: Context)(): never labels
+Foo(TestStructA) {
+ goto Foo(TestStruct2());
+}
+@export // Silence unused warning.
+macro CallTestStructInLabel(implicit context: Context)() {
+ try {
+ TestStructInLabel() otherwise Foo;
+ } label Foo(_s: TestStructA) {}
+}
+
+// This macro tests different versions of the for-loop where some parts
+// are (not) present.
+@export
+macro TestForLoop() {
+ let sum: Smi = 0;
+ for (let i: Smi = 0; i < 5; ++i) sum += i;
+ check(sum == 10);
+
+ sum = 0;
+ let j: Smi = 0;
+ for (; j < 5; ++j) sum += j;
+ check(sum == 10);
+
+ sum = 0;
+ j = 0;
+ for (; j < 5;) sum += j++;
+ check(sum == 10);
+
+ // Check that break works. No test expression.
+ sum = 0;
+ for (let i: Smi = 0;; ++i) {
+ if (i == 5) break;
+ sum += i;
+ }
+ check(sum == 10);
+
+ sum = 0;
+ j = 0;
+ for (;;) {
+ if (j == 5) break;
+ sum += j;
+ j++;
+ }
+ check(sum == 10);
+
+ // The following tests are the same as above, but use continue to skip
+ // index 3.
+ sum = 0;
+ for (let i: Smi = 0; i < 5; ++i) {
+ if (i == 3) continue;
+ sum += i;
+ }
+ check(sum == 7);
+
+ sum = 0;
+ j = 0;
+ for (; j < 5; ++j) {
+ if (j == 3) continue;
+ sum += j;
+ }
+ check(sum == 7);
+
+ sum = 0;
+ j = 0;
+ for (; j < 5;) {
+ if (j == 3) {
+ j++;
+ continue;
}
- label Foo(_s: TestStructA) {}
+ sum += j;
+ j++;
}
+ check(sum == 7);
- // This macro tests different versions of the for-loop where some parts
- // are (not) present.
- @export
- macro TestForLoop() {
- let sum: Smi = 0;
- for (let i: Smi = 0; i < 5; ++i) sum += i;
- check(sum == 10);
-
- sum = 0;
- let j: Smi = 0;
- for (; j < 5; ++j) sum += j;
- check(sum == 10);
-
- sum = 0;
- j = 0;
- for (; j < 5;) sum += j++;
- check(sum == 10);
-
- // Check that break works. No test expression.
- sum = 0;
- for (let i: Smi = 0;; ++i) {
- if (i == 5) break;
- sum += i;
- }
- check(sum == 10);
+ sum = 0;
+ for (let i: Smi = 0;; ++i) {
+ if (i == 3) continue;
+ if (i == 5) break;
+ sum += i;
+ }
+ check(sum == 7);
- sum = 0;
- j = 0;
- for (;;) {
- if (j == 5) break;
- sum += j;
+ sum = 0;
+ j = 0;
+ for (;;) {
+ if (j == 3) {
j++;
+ continue;
}
- check(sum == 10);
-
- // The following tests are the same as above, but use continue to skip
- // index 3.
- sum = 0;
- for (let i: Smi = 0; i < 5; ++i) {
- if (i == 3) continue;
- sum += i;
- }
- check(sum == 7);
- sum = 0;
- j = 0;
- for (; j < 5; ++j) {
- if (j == 3) continue;
- sum += j;
- }
- check(sum == 7);
-
- sum = 0;
- j = 0;
- for (; j < 5;) {
- if (j == 3) {
- j++;
- continue;
- }
- sum += j;
- j++;
- }
- check(sum == 7);
+ if (j == 5) break;
+ sum += j;
+ j++;
+ }
+ check(sum == 7);
- sum = 0;
- for (let i: Smi = 0;; ++i) {
- if (i == 3) continue;
- if (i == 5) break;
- sum += i;
+ j = 0;
+ try {
+ for (;;) {
+ if (++j == 10) goto Exit;
}
- check(sum == 7);
+ } label Exit {
+ check(j == 10);
+ }
- sum = 0;
- j = 0;
- for (;;) {
- if (j == 3) {
- j++;
- continue;
- }
+ // Test if we can handle uninitialized values on the stack.
+ let _i: Smi;
+ for (let j: Smi = 0; j < 10; ++j) {
+ }
+}
- if (j == 5) break;
- sum += j;
- j++;
- }
- check(sum == 7);
+@export
+macro TestSubtyping(x: Smi) {
+ const _foo: JSAny = x;
+}
- j = 0;
- try {
- for (;;) {
- if (++j == 10) goto Exit;
- }
+macro IncrementIfSmi<A: type>(x: A): A {
+ typeswitch (x) {
+ case (x: Smi): {
+ return x + 1;
}
- label Exit {
- check(j == 10);
+ case (o: A): {
+ return o;
}
+ }
+}
- // Test if we can handle uninitialized values on the stack.
- let _i: Smi;
- for (let j: Smi = 0; j < 10; ++j) {
+type NumberOrFixedArray = Number|FixedArray;
+macro TypeswitchExample(implicit context: Context)(x: NumberOrFixedArray):
+ int32 {
+ let result: int32 = 0;
+ typeswitch (IncrementIfSmi(x)) {
+ case (_x: FixedArray): {
+ result = result + 1;
+ }
+ case (Number): {
+ result = result + 2;
}
}
- @export
- macro TestSubtyping(x: Smi) {
- const _foo: JSAny = x;
- }
+ result = result * 10;
- macro IncrementIfSmi<A: type>(x: A): A {
- typeswitch (x) {
- case (x: Smi): {
- return x + 1;
- }
- case (o: A): {
- return o;
- }
+ typeswitch (IncrementIfSmi(x)) {
+ case (x: Smi): {
+ result = result + Convert<int32>(x);
}
- }
-
- type NumberOrFixedArray = Number|FixedArray;
- macro TypeswitchExample(implicit context: Context)(x: NumberOrFixedArray):
- int32 {
- let result: int32 = 0;
- typeswitch (IncrementIfSmi(x)) {
- case (_x: FixedArray): {
- result = result + 1;
- }
- case (Number): {
- result = result + 2;
- }
+ case (a: FixedArray): {
+ result = result + Convert<int32>(a.length);
}
-
- result = result * 10;
-
- typeswitch (IncrementIfSmi(x)) {
- case (x: Smi): {
- result = result + Convert<int32>(x);
- }
- case (a: FixedArray): {
- result = result + Convert<int32>(a.length);
- }
- case (_x: HeapNumber): {
- result = result + 7;
- }
+ case (_x: HeapNumber): {
+ result = result + 7;
}
-
- return result;
}
- @export
- macro TestTypeswitch(implicit context: Context)() {
- check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
- const a: FixedArray = AllocateZeroedFixedArray(3);
- check(TypeswitchExample(a) == 13);
- check(TypeswitchExample(FromConstexpr<Number>(0.5)) == 27);
- }
+ return result;
+}
- @export
- macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
- typeswitch (obj) {
- case (_o: Smi): {
- }
- case (_o: JSTypedArray): {
- }
- case (_o: JSReceiver): {
- }
- case (_o: HeapObject): {
- }
- }
- }
+@export
+macro TestTypeswitch(implicit context: Context)() {
+ check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
+ const a: FixedArray = AllocateZeroedFixedArray(3);
+ check(TypeswitchExample(a) == 13);
+ check(TypeswitchExample(FromConstexpr<Number>(0.5)) == 27);
+}
- macro ExampleGenericOverload<A: type>(o: Object): A {
- return o;
- }
- macro ExampleGenericOverload<A: type>(o: Smi): A {
- return o + 1;
+@export
+macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
+ typeswitch (obj) {
+ case (_o: Smi): {
+ }
+ case (_o: JSTypedArray): {
+ }
+ case (_o: JSReceiver): {
+ }
+ case (_o: HeapObject): {
+ }
}
+}
- @export
- macro TestGenericOverload(implicit context: Context)() {
- const xSmi: Smi = 5;
- const xObject: Object = xSmi;
- check(ExampleGenericOverload<Smi>(xSmi) == 6);
- check(UnsafeCast<Smi>(ExampleGenericOverload<Object>(xObject)) == 5);
- }
+macro ExampleGenericOverload<A: type>(o: Object): A {
+ return o;
+}
+macro ExampleGenericOverload<A: type>(o: Smi): A {
+ return o + 1;
+}
- @export
- macro TestEquality(implicit context: Context)() {
- const notEqual: bool =
- AllocateHeapNumberWithValue(0.5) != AllocateHeapNumberWithValue(0.5);
- check(!notEqual);
- const equal: bool =
- AllocateHeapNumberWithValue(0.5) == AllocateHeapNumberWithValue(0.5);
- check(equal);
- }
+@export
+macro TestGenericOverload(implicit context: Context)() {
+ const xSmi: Smi = 5;
+ const xObject: Object = xSmi;
+ check(ExampleGenericOverload<Smi>(xSmi) == 6);
+ check(UnsafeCast<Smi>(ExampleGenericOverload<Object>(xObject)) == 5);
+}
- @export
- macro TestOrAnd(x: bool, y: bool, z: bool): bool {
- return x || y && z ? true : false;
- }
+@export
+macro TestEquality(implicit context: Context)() {
+ const notEqual: bool =
+ AllocateHeapNumberWithValue(0.5) != AllocateHeapNumberWithValue(0.5);
+ check(!notEqual);
+ const equal: bool =
+ AllocateHeapNumberWithValue(0.5) == AllocateHeapNumberWithValue(0.5);
+ check(equal);
+}
- @export
- macro TestAndOr(x: bool, y: bool, z: bool): bool {
- return x && y || z ? true : false;
- }
+@export
+macro TestOrAnd(x: bool, y: bool, z: bool): bool {
+ return x || y && z ? true : false;
+}
- @export
- macro TestLogicalOperators() {
- check(TestAndOr(true, true, true));
- check(TestAndOr(true, true, false));
- check(TestAndOr(true, false, true));
- check(!TestAndOr(true, false, false));
- check(TestAndOr(false, true, true));
- check(!TestAndOr(false, true, false));
- check(TestAndOr(false, false, true));
- check(!TestAndOr(false, false, false));
- check(TestOrAnd(true, true, true));
- check(TestOrAnd(true, true, false));
- check(TestOrAnd(true, false, true));
- check(TestOrAnd(true, false, false));
- check(TestOrAnd(false, true, true));
- check(!TestOrAnd(false, true, false));
- check(!TestOrAnd(false, false, true));
- check(!TestOrAnd(false, false, false));
- }
+@export
+macro TestAndOr(x: bool, y: bool, z: bool): bool {
+ return x && y || z ? true : false;
+}
- @export
- macro TestCall(i: Smi): Smi labels A {
- if (i < 5) return i;
- goto A;
- }
+@export
+macro TestLogicalOperators() {
+ check(TestAndOr(true, true, true));
+ check(TestAndOr(true, true, false));
+ check(TestAndOr(true, false, true));
+ check(!TestAndOr(true, false, false));
+ check(TestAndOr(false, true, true));
+ check(!TestAndOr(false, true, false));
+ check(TestAndOr(false, false, true));
+ check(!TestAndOr(false, false, false));
+ check(TestOrAnd(true, true, true));
+ check(TestOrAnd(true, true, false));
+ check(TestOrAnd(true, false, true));
+ check(TestOrAnd(true, false, false));
+ check(TestOrAnd(false, true, true));
+ check(!TestOrAnd(false, true, false));
+ check(!TestOrAnd(false, false, true));
+ check(!TestOrAnd(false, false, false));
+}
- @export
- macro TestOtherwiseWithCode1() {
- let v: Smi = 0;
- let s: Smi = 1;
- try {
- TestCall(10) otherwise goto B(++s);
- }
- label B(v1: Smi) {
- v = v1;
- }
- assert(v == 2);
- }
+@export
+macro TestCall(i: Smi): Smi labels A {
+ if (i < 5) return i;
+ goto A;
+}
- @export
- macro TestOtherwiseWithCode2() {
- let s: Smi = 0;
- for (let i: Smi = 0; i < 10; ++i) {
- TestCall(i) otherwise break;
- ++s;
- }
- assert(s == 5);
+@export
+macro TestOtherwiseWithCode1() {
+ let v: Smi = 0;
+ let s: Smi = 1;
+ try {
+ TestCall(10) otherwise goto B(++s);
+ } label B(v1: Smi) {
+ v = v1;
}
+ assert(v == 2);
+}
- @export
- macro TestOtherwiseWithCode3() {
- let s: Smi = 0;
- for (let i: Smi = 0; i < 10; ++i) {
- s += TestCall(i) otherwise break;
- }
- assert(s == 10);
+@export
+macro TestOtherwiseWithCode2() {
+ let s: Smi = 0;
+ for (let i: Smi = 0; i < 10; ++i) {
+ TestCall(i) otherwise break;
+ ++s;
}
+ assert(s == 5);
+}
- @export
- macro TestForwardLabel() {
- try {
- goto A;
- }
- label A {
- goto B(5);
- }
- label B(b: Smi) {
- assert(b == 5);
- }
+@export
+macro TestOtherwiseWithCode3() {
+ let s: Smi = 0;
+ for (let i: Smi = 0; i < 10; ++i) {
+ s += TestCall(i) otherwise break;
}
+ assert(s == 10);
+}
- @export
- macro TestQualifiedAccess(implicit context: Context)() {
- const s: Smi = 0;
- check(!Is<JSArray>(s));
+@export
+macro TestForwardLabel() {
+ try {
+ goto A;
+ } label A {
+ goto B(5);
+ } label B(b: Smi) {
+ assert(b == 5);
}
+}
- @export
- macro TestCatch1(implicit context: Context)(): Smi {
- let r: Smi = 0;
- try {
- ThrowTypeError(MessageTemplate::kInvalidArrayLength);
- } catch (_e) {
- r = 1;
- return r;
- }
- }
+@export
+macro TestQualifiedAccess(implicit context: Context)() {
+ const s: Smi = 0;
+ check(!Is<JSArray>(s));
+}
- @export
- macro TestCatch2Wrapper(implicit context: Context)(): never {
+@export
+macro TestCatch1(implicit context: Context)(): Smi {
+ let r: Smi = 0;
+ try {
ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+ } catch (_e) {
+ r = 1;
+ return r;
}
+}
- @export
- macro TestCatch2(implicit context: Context)(): Smi {
- let r: Smi = 0;
- try {
- TestCatch2Wrapper();
- } catch (_e) {
- r = 2;
- return r;
- }
- }
+@export
+macro TestCatch2Wrapper(implicit context: Context)(): never {
+ ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+}
- @export
- macro TestCatch3WrapperWithLabel(implicit context: Context)():
- never labels _Abort {
- ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+@export
+macro TestCatch2(implicit context: Context)(): Smi {
+ let r: Smi = 0;
+ try {
+ TestCatch2Wrapper();
+ } catch (_e) {
+ r = 2;
+ return r;
}
+}
- @export
- macro TestCatch3(implicit context: Context)(): Smi {
- let r: Smi = 0;
- try {
- TestCatch3WrapperWithLabel() otherwise Abort;
- }
- label Abort {
- return -1;
- }
- catch (_e) {
- r = 2;
- return r;
- }
+@export
+macro TestCatch3WrapperWithLabel(implicit context: Context)():
+ never labels _Abort {
+ ThrowTypeError(MessageTemplate::kInvalidArrayLength);
+}
+
+@export
+macro TestCatch3(implicit context: Context)(): Smi {
+ let r: Smi = 0;
+ try {
+ TestCatch3WrapperWithLabel() otherwise Abort;
+ } catch (_e) {
+ r = 2;
+ return r;
+ } label Abort {
+ return -1;
}
+}
- // This test doesn't actually test the functionality of iterators,
- // it's only purpose is to make sure tha the CSA macros in the
- // IteratorBuiltinsAssembler match the signatures provided in
- // iterator.tq.
- @export
- macro TestIterator(implicit context: Context)(o: JSReceiver, map: Map) {
- try {
- const t1: JSAny = iterator::GetIteratorMethod(o);
- const t2: iterator::IteratorRecord = iterator::GetIterator(o);
+// This test doesn't actually test the functionality of iterators,
+// it's only purpose is to make sure tha the CSA macros in the
+// IteratorBuiltinsAssembler match the signatures provided in
+// iterator.tq.
+@export
+transitioning macro TestIterator(implicit context: Context)(
+ o: JSReceiver, map: Map) {
+ try {
+ const t1: JSAny = iterator::GetIteratorMethod(o);
+ const t2: iterator::IteratorRecord = iterator::GetIterator(o);
- const _t3: JSAny = iterator::IteratorStep(t2) otherwise Fail;
- const _t4: JSAny = iterator::IteratorStep(t2, map) otherwise Fail;
+ const _t3: JSAny = iterator::IteratorStep(t2) otherwise Fail;
+ const _t4: JSAny = iterator::IteratorStep(t2, map) otherwise Fail;
- const t5: JSAny = iterator::IteratorValue(o);
- const _t6: JSAny = iterator::IteratorValue(o, map);
+ const _t5: JSAny = iterator::IteratorValue(o);
+ const _t6: JSAny = iterator::IteratorValue(o, map);
- const _t7: JSArray = iterator::IterableToList(t1, t1);
+ const _t7: JSArray = iterator::IterableToList(t1, t1);
- iterator::IteratorCloseOnException(t2, t5);
- }
- label Fail {}
- }
+ iterator::IteratorCloseOnException(t2);
+ } label Fail {}
+}
- @export
- macro TestFrame1(implicit context: Context)() {
- const f: Frame = LoadFramePointer();
- const frameType: FrameType =
- Cast<FrameType>(f.context_or_frame_type) otherwise unreachable;
- assert(frameType == STUB_FRAME);
- assert(f.caller == LoadParentFramePointer());
- typeswitch (f) {
- case (_f: StandardFrame): {
- unreachable;
- }
- case (_f: ArgumentsAdaptorFrame): {
- unreachable;
- }
- case (_f: StubFrame): {
- }
+@export
+macro TestFrame1(implicit context: Context)() {
+ const f: Frame = LoadFramePointer();
+ const frameType: FrameType =
+ Cast<FrameType>(f.context_or_frame_type) otherwise unreachable;
+ assert(frameType == STUB_FRAME);
+ assert(f.caller == LoadParentFramePointer());
+ typeswitch (f) {
+ case (_f: StandardFrame): {
+ unreachable;
}
- }
-
- @export
- macro TestNew(implicit context: Context)() {
- const f: JSArray = NewJSArray();
- check(f.IsEmpty());
- f.length = 0;
- }
-
- struct TestInner {
- macro SetX(newValue: int32) {
- this.x = newValue;
+ case (_f: ArgumentsAdaptorFrame): {
+ unreachable;
}
- macro GetX(): int32 {
- return this.x;
+ case (_f: StubFrame): {
}
- x: int32;
- y: int32;
}
+}
- struct TestOuter {
- a: int32;
- b: TestInner;
- c: int32;
- }
+@export
+macro TestNew(implicit context: Context)() {
+ const f: JSArray = NewJSArray();
+ check(f.IsEmpty());
+ f.length = 0;
+}
- @export
- macro TestStructConstructor(implicit context: Context)() {
- // Test default constructor
- let a: TestOuter = TestOuter{a: 5, b: TestInner{x: 6, y: 7}, c: 8};
- check(a.a == 5);
- check(a.b.x == 6);
- check(a.b.y == 7);
- check(a.c == 8);
- a.b.x = 1;
- check(a.b.x == 1);
- a.b.SetX(2);
- check(a.b.x == 2);
- check(a.b.GetX() == 2);
+struct TestInner {
+ macro SetX(newValue: int32) {
+ this.x = newValue;
}
-
- class InternalClass extends HeapObject {
- macro Flip() labels NotASmi {
- const tmp = Cast<Smi>(this.b) otherwise NotASmi;
- this.b = this.a;
- this.a = tmp;
- }
- a: Smi;
- b: Number;
+ macro GetX(): int32 {
+ return this.x;
}
+ x: int32;
+ y: int32;
+}
- macro NewInternalClass(x: Smi): InternalClass {
- return new InternalClass{a: x, b: x + 1};
- }
+struct TestOuter {
+ a: int32;
+ b: TestInner;
+ c: int32;
+}
- @export
- macro TestInternalClass(implicit context: Context)() {
- const o = NewInternalClass(5);
- o.Flip() otherwise unreachable;
- check(o.a == 6);
- check(o.b == 5);
- }
+@export
+macro TestStructConstructor(implicit context: Context)() {
+ // Test default constructor
+ let a: TestOuter = TestOuter{a: 5, b: TestInner{x: 6, y: 7}, c: 8};
+ check(a.a == 5);
+ check(a.b.x == 6);
+ check(a.b.y == 7);
+ check(a.c == 8);
+ a.b.x = 1;
+ check(a.b.x == 1);
+ a.b.SetX(2);
+ check(a.b.x == 2);
+ check(a.b.GetX() == 2);
+}
- struct StructWithConst {
- macro TestMethod1(): int32 {
- return this.b;
- }
- macro TestMethod2(): Object {
- return this.a;
- }
- a: Object;
- const b: int32;
+class InternalClass extends HeapObject {
+ macro Flip() labels NotASmi {
+ const tmp = Cast<Smi>(this.b) otherwise NotASmi;
+ this.b = this.a;
+ this.a = tmp;
}
+ a: Smi;
+ b: Number;
+}
- @export
- macro TestConstInStructs() {
- const x = StructWithConst{a: Null, b: 1};
- let y = StructWithConst{a: Null, b: 1};
- y.a = Undefined;
- const _copy = x;
+macro NewInternalClass(x: Smi): InternalClass {
+ return new InternalClass{a: x, b: x + 1};
+}
- check(x.TestMethod1() == 1);
- check(x.TestMethod2() == Null);
- }
+@export
+macro TestInternalClass(implicit context: Context)() {
+ const o = NewInternalClass(5);
+ o.Flip() otherwise unreachable;
+ check(o.a == 6);
+ check(o.b == 5);
+}
- @export
- macro TestParentFrameArguments(implicit context: Context)() {
- const parentFrame = LoadParentFramePointer();
- const castFrame = Cast<StandardFrame>(parentFrame) otherwise unreachable;
- const arguments = GetFrameArguments(castFrame, 1);
- ArgumentsIterator{arguments, current: 0};
+struct StructWithConst {
+ macro TestMethod1(): int32 {
+ return this.b;
}
-
- struct TestIterator {
- macro Next(): Object labels NoMore {
- if (this.count-- == 0) goto NoMore;
- return TheHole;
- }
- count: Smi;
+ macro TestMethod2(): Object {
+ return this.a;
}
+ a: Object;
+ const b: int32;
+}
- @export
- macro TestNewFixedArrayFromSpread(implicit context: Context)(): Object {
- let i = TestIterator{count: 5};
- return new FixedArray{map: kFixedArrayMap, length: 5, objects: ...i};
- }
+@export
+macro TestConstInStructs() {
+ const x = StructWithConst{a: Null, b: 1};
+ let y = StructWithConst{a: Null, b: 1};
+ y.a = Undefined;
+ const _copy = x;
- class SmiPair extends HeapObject {
- macro GetA():&Smi {
- return & this.a;
- }
- a: Smi;
- b: Smi;
- }
+ check(x.TestMethod1() == 1);
+ check(x.TestMethod2() == Null);
+}
- macro Swap<T: type>(a:&T, b:&T) {
- const tmp = * a;
- * a = * b;
- * b = tmp;
- }
+@export
+macro TestParentFrameArguments(implicit context: Context)() {
+ const parentFrame = LoadParentFramePointer();
+ const castFrame = Cast<StandardFrame>(parentFrame) otherwise unreachable;
+ const arguments = GetFrameArguments(castFrame, 1);
+ ArgumentsIterator{arguments, current: 0};
+}
- @export
- macro TestReferences() {
- const array = new SmiPair{a: 7, b: 2};
- const ref:&Smi = & array.a;
- * ref = 3 + * ref;
- -- * ref;
- Swap(& array.b, array.GetA());
- check(array.a == 2);
- check(array.b == 9);
+struct TestIterator {
+ macro Next(): Object labels NoMore {
+ if (this.count-- == 0) goto NoMore;
+ return TheHole;
}
+ count: Smi;
+}
- @export
- macro TestSlices() {
- const it = TestIterator{count: 3};
- const a = new FixedArray{map: kFixedArrayMap, length: 3, objects: ...it};
- check(a.length == 3);
+@export
+macro TestNewFixedArrayFromSpread(implicit context: Context)(): Object {
+ let i = TestIterator{count: 5};
+ return new FixedArray{map: kFixedArrayMap, length: 5, objects: ...i};
+}
- const oneTwoThree = Convert<Smi>(123);
- a.objects[0] = oneTwoThree;
- const firstRef:&Object = & a.objects[0];
- check(TaggedEqual(* firstRef, oneTwoThree));
+class SmiPair extends HeapObject {
+ macro GetA():&Smi {
+ return & this.a;
+ }
+ a: Smi;
+ b: Smi;
+}
- const slice: torque_internal::Slice<Object> = & a.objects;
- const firstRefAgain:&Object = slice.TryAtIndex(0) otherwise unreachable;
- check(TaggedEqual(* firstRefAgain, oneTwoThree));
+macro Swap<T: type>(a:&T, b:&T) {
+ const tmp = * a;
+ * a = * b;
+ * b = tmp;
+}
- const threeTwoOne = Convert<Smi>(321);
- * firstRefAgain = threeTwoOne;
- check(TaggedEqual(a.objects[0], threeTwoOne));
+@export
+macro TestReferences() {
+ const array = new SmiPair{a: 7, b: 2};
+ const ref:&Smi = & array.a;
+ * ref = 3 + * ref;
+ -- * ref;
+ Swap(& array.b, array.GetA());
+ check(array.a == 2);
+ check(array.b == 9);
+}
- // *slice; // error, not allowed
- // a.objects; // error, not allowed
- // a.objects = slice; // error, not allowed
+@export
+macro TestSlices() {
+ const it = TestIterator{count: 3};
+ const a = new FixedArray{map: kFixedArrayMap, length: 3, objects: ...it};
+ check(a.length == 3);
- // TODO(gsps): Currently errors, but should be allowed:
- // const _sameSlice: torque_internal::Slice<Object> = &(*slice);
- // (*slice)[0] : Smi
- }
+ const oneTwoThree = Convert<Smi>(123);
+ a.objects[0] = oneTwoThree;
+ const firstRef:&Object = & a.objects[0];
+ check(TaggedEqual(* firstRef, oneTwoThree));
- @export
- macro TestSliceEnumeration(implicit context: Context)(): Undefined {
- const fixedArray: FixedArray = AllocateZeroedFixedArray(3);
- for (let i: intptr = 0; i < 3; i++) {
- check(UnsafeCast<Smi>(fixedArray.objects[i]) == 0);
- fixedArray.objects[i] = Convert<Smi>(i) + 3;
- }
+ const slice: torque_internal::Slice<Object> = & a.objects;
+ const firstRefAgain:&Object = slice.TryAtIndex(0) otherwise unreachable;
+ check(TaggedEqual(* firstRefAgain, oneTwoThree));
- let slice = & fixedArray.objects;
- for (let i: intptr = 0; i < slice.length; i++) {
- let ref = slice.TryAtIndex(i) otherwise unreachable;
- const value = UnsafeCast<Smi>(* ref);
- check(value == Convert<Smi>(i) + 3);
- * ref = value + 4;
- }
+ const threeTwoOne = Convert<Smi>(321);
+ * firstRefAgain = threeTwoOne;
+ check(TaggedEqual(a.objects[0], threeTwoOne));
- let it = slice.Iterator();
- let count: Smi = 0;
- while (true) {
- const value = UnsafeCast<Smi>(it.Next() otherwise break);
- check(value == count + 7);
- count++;
- }
- check(count == 3);
- check(it.Empty());
+ // *slice; // error, not allowed
+ // a.objects; // error, not allowed
+ // a.objects = slice; // error, not allowed
- return Undefined;
- }
+ // TODO(gsps): Currently errors, but should be allowed:
+ // const _sameSlice: torque_internal::Slice<Object> = &(*slice);
+ // (*slice)[0] : Smi
+}
- @export
- macro TestStaticAssert() {
- StaticAssert(1 + 2 == 3);
+@export
+macro TestSliceEnumeration(implicit context: Context)(): Undefined {
+ const fixedArray: FixedArray = AllocateZeroedFixedArray(3);
+ for (let i: intptr = 0; i < 3; i++) {
+ check(UnsafeCast<Smi>(fixedArray.objects[i]) == 0);
+ fixedArray.objects[i] = Convert<Smi>(i) + 3;
}
- class SmiBox extends HeapObject {
- value: Smi;
- unrelated: Smi;
+ let slice = & fixedArray.objects;
+ for (let i: intptr = 0; i < slice.length; i++) {
+ let ref = slice.TryAtIndex(i) otherwise unreachable;
+ const value = UnsafeCast<Smi>(* ref);
+ check(value == Convert<Smi>(i) + 3);
+ * ref = value + 4;
}
- builtin NewSmiBox(implicit context: Context)(value: Smi): SmiBox {
- return new SmiBox{value, unrelated: 0};
+ let it = slice.Iterator();
+ let count: Smi = 0;
+ while (true) {
+ const value = UnsafeCast<Smi>(it.Next() otherwise break);
+ check(value == count + 7);
+ count++;
}
+ check(count == 3);
+ check(it.Empty());
- @export
- macro TestLoadEliminationFixed(implicit context: Context)() {
- const box = NewSmiBox(123);
- const v1 = box.value;
- box.unrelated = 999;
- const v2 = (box.unrelated == 0) ? box.value : box.value;
- StaticAssert(TaggedEqual(v1, v2));
-
- box.value = 11;
- const v3 = box.value;
- const eleven: Smi = 11;
- StaticAssert(TaggedEqual(v3, eleven));
- }
+ return Undefined;
+}
- @export
- macro TestLoadEliminationVariable(implicit context: Context)() {
- const a = UnsafeCast<FixedArray>(kEmptyFixedArray);
- const box = NewSmiBox(1);
- const v1 = a.objects[box.value];
- const u1 = a.objects[box.value + 2];
- const v2 = a.objects[box.value];
- const u2 = a.objects[box.value + 2];
- StaticAssert(TaggedEqual(v1, v2));
- StaticAssert(TaggedEqual(u1, u2));
- }
+@export
+macro TestStaticAssert() {
+ StaticAssert(1 + 2 == 3);
+}
+
+class SmiBox extends HeapObject {
+ value: Smi;
+ unrelated: Smi;
+}
+
+builtin NewSmiBox(implicit context: Context)(value: Smi): SmiBox {
+ return new SmiBox{value, unrelated: 0};
+}
+
+@export
+macro TestLoadEliminationFixed(implicit context: Context)() {
+ const box = NewSmiBox(123);
+ const v1 = box.value;
+ box.unrelated = 999;
+ const v2 = (box.unrelated == 0) ? box.value : box.value;
+ StaticAssert(TaggedEqual(v1, v2));
+
+ box.value = 11;
+ const v3 = box.value;
+ const eleven: Smi = 11;
+ StaticAssert(TaggedEqual(v3, eleven));
+}
+
+@export
+macro TestLoadEliminationVariable(implicit context: Context)() {
+ const a = UnsafeCast<FixedArray>(kEmptyFixedArray);
+ const box = NewSmiBox(1);
+ const v1 = a.objects[box.value];
+ const u1 = a.objects[box.value + 2];
+ const v2 = a.objects[box.value];
+ const u2 = a.objects[box.value + 2];
+ StaticAssert(TaggedEqual(v1, v2));
+ StaticAssert(TaggedEqual(u1, u2));
+}
- @export
- macro TestRedundantArrayElementCheck(implicit context: Context)(): Smi {
- const a = kEmptyFixedArray;
- for (let i: Smi = 0; i < a.length; i++) {
+@export
+macro TestRedundantArrayElementCheck(implicit context: Context)(): Smi {
+ const a = kEmptyFixedArray;
+ for (let i: Smi = 0; i < a.length; i++) {
+ if (a.objects[i] == TheHole) {
if (a.objects[i] == TheHole) {
- if (a.objects[i] == TheHole) {
- return -1;
- } else {
- StaticAssert(false);
- }
+ return -1;
+ } else {
+ StaticAssert(false);
}
}
- return 1;
}
+ return 1;
+}
- @export
- macro TestRedundantSmiCheck(implicit context: Context)(): Smi {
- const a = kEmptyFixedArray;
- const x = a.objects[1];
- typeswitch (x) {
- case (Smi): {
- Cast<Smi>(x) otherwise VerifiedUnreachable();
- return -1;
- }
- case (Object): {
- }
+@export
+macro TestRedundantSmiCheck(implicit context: Context)(): Smi {
+ const a = kEmptyFixedArray;
+ const x = a.objects[1];
+ typeswitch (x) {
+ case (Smi): {
+ Cast<Smi>(x) otherwise VerifiedUnreachable();
+ return -1;
+ }
+ case (Object): {
}
- return 1;
}
+ return 1;
+}
- struct SBox<T: type> {
- value: T;
- }
+struct SBox<T: type> {
+ value: T;
+}
- @export
- macro TestGenericStruct1(): intptr {
- const i: intptr = 123;
- let box = SBox{value: i};
- let boxbox: SBox<SBox<intptr>> = SBox{value: box};
- check(box.value == 123);
- boxbox.value.value *= 2;
- check(boxbox.value.value == 246);
- return boxbox.value.value;
- }
+@export
+macro TestGenericStruct1(): intptr {
+ const i: intptr = 123;
+ let box = SBox{value: i};
+ let boxbox: SBox<SBox<intptr>> = SBox{value: box};
+ check(box.value == 123);
+ boxbox.value.value *= 2;
+ check(boxbox.value.value == 246);
+ return boxbox.value.value;
+}
- struct TestTuple<T1: type, T2: type> {
- const fst: T1;
- const snd: T2;
- }
+struct TestTuple<T1: type, T2: type> {
+ const fst: T1;
+ const snd: T2;
+}
- macro TupleSwap<T1: type, T2: type>(tuple: TestTuple<T1, T2>):
- TestTuple<T2, T1> {
- return TestTuple{fst: tuple.snd, snd: tuple.fst};
- }
+macro TupleSwap<T1: type, T2: type>(tuple: TestTuple<T1, T2>):
+ TestTuple<T2, T1> {
+ return TestTuple{fst: tuple.snd, snd: tuple.fst};
+}
- @export
- macro TestGenericStruct2():
- TestTuple<TestTuple<intptr, Smi>, TestTuple<Smi, intptr>> {
- const intptrAndSmi = TestTuple<intptr, Smi>{fst: 1, snd: 2};
- const smiAndIntptr = TupleSwap(intptrAndSmi);
- check(intptrAndSmi.fst == smiAndIntptr.snd);
- check(intptrAndSmi.snd == smiAndIntptr.fst);
- const tupleTuple =
- TestTuple<TestTuple<intptr, Smi>>{fst: intptrAndSmi, snd: smiAndIntptr};
- return tupleTuple;
- }
+@export
+macro TestGenericStruct2():
+ TestTuple<TestTuple<intptr, Smi>, TestTuple<Smi, intptr>> {
+ const intptrAndSmi = TestTuple<intptr, Smi>{fst: 1, snd: 2};
+ const smiAndIntptr = TupleSwap(intptrAndSmi);
+ check(intptrAndSmi.fst == smiAndIntptr.snd);
+ check(intptrAndSmi.snd == smiAndIntptr.fst);
+ const tupleTuple =
+ TestTuple<TestTuple<intptr, Smi>>{fst: intptrAndSmi, snd: smiAndIntptr};
+ return tupleTuple;
+}
- macro BranchAndWriteResult(x: Smi, box: SmiBox): bool {
- if (x > 5 || x < 0) {
- box.value = 1;
- return true;
- } else {
- box.value = 2;
- return false;
- }
+macro BranchAndWriteResult(x: Smi, box: SmiBox): bool {
+ if (x > 5 || x < 0) {
+ box.value = 1;
+ return true;
+ } else {
+ box.value = 2;
+ return false;
}
+}
- @export
- macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi) {
- const box = NewSmiBox(1);
- // If the two branches get combined into one, we should be able to determine
- // the value of {box} statically.
- if (BranchAndWriteResult(input, box)) {
- StaticAssert(box.value == 1);
- } else {
- StaticAssert(box.value == 2);
- }
+@export
+macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi) {
+ const box = NewSmiBox(1);
+ // If the two branches get combined into one, we should be able to determine
+ // the value of {box} statically.
+ if (BranchAndWriteResult(input, box)) {
+ StaticAssert(box.value == 1);
+ } else {
+ StaticAssert(box.value == 2);
}
+}
- bitfield struct TestBitFieldStruct extends uint8 {
- a: bool: 1 bit;
- b: uint16: 3 bit;
- c: uint32: 3 bit;
- d: bool: 1 bit;
- }
+bitfield struct TestBitFieldStruct extends uint8 {
+ a: bool: 1 bit;
+ b: uint16: 3 bit;
+ c: uint32: 3 bit;
+ d: bool: 1 bit;
+}
- @export
- macro TestBitFieldLoad(
- val: TestBitFieldStruct, expectedA: bool, expectedB: uint16,
- expectedC: uint32, expectedD: bool) {
- check(val.a == expectedA);
- check(val.b == expectedB);
- check(val.c == expectedC);
- check(val.d == expectedD);
- }
+@export
+macro TestBitFieldLoad(
+ val: TestBitFieldStruct, expectedA: bool, expectedB: uint16,
+ expectedC: uint32, expectedD: bool) {
+ check(val.a == expectedA);
+ check(val.b == expectedB);
+ check(val.c == expectedC);
+ check(val.d == expectedD);
+}
- @export
- macro TestBitFieldStore(val: TestBitFieldStruct) {
- let val: TestBitFieldStruct = val; // Get a mutable local copy.
- const a: bool = val.a;
- const b: uint16 = val.b;
- let c: uint32 = val.c;
- const d: bool = val.d;
+@export
+macro TestBitFieldStore(val: TestBitFieldStruct) {
+ let val: TestBitFieldStruct = val; // Get a mutable local copy.
+ const a: bool = val.a;
+ const b: uint16 = val.b;
+ let c: uint32 = val.c;
+ const d: bool = val.d;
- val.a = !a;
- TestBitFieldLoad(val, !a, b, c, d);
+ val.a = !a;
+ TestBitFieldLoad(val, !a, b, c, d);
- c = Unsigned(7 - Signed(val.c));
- val.c = c;
- TestBitFieldLoad(val, !a, b, c, d);
+ c = Unsigned(7 - Signed(val.c));
+ val.c = c;
+ TestBitFieldLoad(val, !a, b, c, d);
- val.d = val.b == val.c;
- TestBitFieldLoad(val, !a, b, c, b == c);
- }
+ val.d = val.b == val.c;
+ TestBitFieldLoad(val, !a, b, c, b == c);
+}
- // Some other bitfield structs, to verify getting uintptr values out of word32
- // structs and vice versa.
- bitfield struct TestBitFieldStruct2 extends uint32 {
- a: uintptr: 5 bit;
- b: uintptr: 6 bit;
- }
- bitfield struct TestBitFieldStruct3 extends uintptr {
- c: bool: 1 bit;
- d: uint32: 9 bit;
- e: uintptr: 17 bit;
- }
+@export
+macro TestBitFieldInit(a: bool, b: uint16, c: uint32, d: bool) {
+ const val: TestBitFieldStruct = TestBitFieldStruct{a: a, b: b, c: c, d: d};
+ TestBitFieldLoad(val, a, b, c, d);
+}
- @export
- macro TestBitFieldUintptrOps(
- val2: TestBitFieldStruct2, val3: TestBitFieldStruct3) {
- let val2: TestBitFieldStruct2 = val2; // Get a mutable local copy.
- let val3: TestBitFieldStruct3 = val3; // Get a mutable local copy.
-
- // Caller is expected to provide these exact values, so we can verify
- // reading values before starting to write anything.
- check(val2.a == 3);
- check(val2.b == 61);
- check(val3.c);
- check(val3.d == 500);
- check(val3.e == 0x1cc);
-
- val2.b = 16;
- check(val2.a == 3);
- check(val2.b == 16);
-
- val2.b++;
- check(val2.a == 3);
- check(val2.b == 17);
-
- val3.d = 99;
- val3.e = 1234;
- check(val3.c);
- check(val3.d == 99);
- check(val3.e == 1234);
- }
+// Some other bitfield structs, to verify getting uintptr values out of word32
+// structs and vice versa.
+bitfield struct TestBitFieldStruct2 extends uint32 {
+ a: uintptr: 5 bit;
+ b: uintptr: 6 bit;
+}
+bitfield struct TestBitFieldStruct3 extends uintptr {
+ c: bool: 1 bit;
+ d: uint32: 9 bit;
+ e: uintptr: 17 bit;
+}
- @export
- class ExportedSubClass extends ExportedSubClassBase {
- c_field: int32;
- d_field: int32;
- e_field: Smi;
- }
+@export
+macro TestBitFieldUintptrOps(
+ val2: TestBitFieldStruct2, val3: TestBitFieldStruct3) {
+ let val2: TestBitFieldStruct2 = val2; // Get a mutable local copy.
+ let val3: TestBitFieldStruct3 = val3; // Get a mutable local copy.
+
+ // Caller is expected to provide these exact values, so we can verify
+ // reading values before starting to write anything.
+ check(val2.a == 3);
+ check(val2.b == 61);
+ check(val3.c);
+ check(val3.d == 500);
+ check(val3.e == 0x1cc);
+
+ val2.b = 16;
+ check(val2.a == 3);
+ check(val2.b == 16);
+
+ val2.b++;
+ check(val2.a == 3);
+ check(val2.b == 17);
+
+ val3.d = 99;
+ val3.e = 1234;
+ check(val3.c);
+ check(val3.d == 99);
+ check(val3.e == 1234);
+}
- @export
- class ExportedSubClassBase extends HeapObject {
- a: HeapObject;
- b: HeapObject;
- }
+@export
+class ExportedSubClass extends ExportedSubClassBase {
+ c_field: int32;
+ d_field: int32;
+ e_field: Smi;
+}
- class InternalClassWithSmiElements extends FixedArrayBase {
- data: Smi;
- object: Oddball;
- entries[length]: Smi;
- }
+@export
+class ExportedSubClassBase extends HeapObject {
+ a: HeapObject;
+ b: HeapObject;
+}
- struct InternalClassStructElement {
- a: Smi;
- b: Smi;
- }
+@abstract
+class AbstractInternalClass extends HeapObject {
+}
- class InternalClassWithStructElements extends HeapObject {
- dummy1: int32;
- dummy2: int32;
- const count: Smi;
- data: Smi;
- object: Object;
- entries[count]: Smi;
- more_entries[count]: InternalClassStructElement;
- }
+class AbstractInternalClassSubclass1 extends AbstractInternalClass {}
- struct SmiGeneratorIterator {
- macro Next(): Smi labels _NoMore {
- return this.value++;
- }
- value: Smi;
- }
+class AbstractInternalClassSubclass2 extends AbstractInternalClass {}
- struct InternalClassStructElementGeneratorIterator {
- macro Next(): InternalClassStructElement labels _NoMore {
- return InternalClassStructElement{a: this.value++, b: this.value++};
- }
- value: Smi;
- }
+class InternalClassWithSmiElements extends FixedArrayBase {
+ data: Smi;
+ object: Oddball;
+ entries[length]: Smi;
+}
- @export
- macro TestFullyGeneratedClassWithElements() {
- // Test creation, initialization and access of a fully generated class with
- // simple (Smi) elements
- const length: Smi = Convert<Smi>(3);
- const object1 = new InternalClassWithSmiElements{
- length,
- data: 0,
- object: Undefined,
- entries: ...SmiGeneratorIterator {
- value: 11
- }
- };
- assert(object1.length == 3);
- assert(object1.data == 0);
- assert(object1.object == Undefined);
- assert(object1.entries[0] == 11);
- assert(object1.entries[1] == 12);
- assert(object1.entries[2] == 13);
-
- // Test creation, initialization and access of a fully generated class
- // with elements that are a struct.
- const object2 = new InternalClassWithStructElements{
- dummy1: 44,
- dummy2: 45,
- count: length,
- data: 55,
- object: Undefined,
- entries: ...SmiGeneratorIterator{value: 3},
- more_entries: ...InternalClassStructElementGeneratorIterator {
- value: 1
- }
- };
-
- assert(object2.dummy1 == 44);
- assert(object2.dummy2 == 45);
- assert(object2.count == 3);
- assert(object2.data == 55);
- assert(object2.object == Undefined);
- assert(object2.entries[0] == 3);
- assert(object2.entries[1] == 4);
- assert(object2.entries[2] == 5);
- assert(object2.more_entries[0].a == 1);
- assert(object2.more_entries[0].b == 2);
- assert(object2.more_entries[1].a == 3);
- assert(object2.more_entries[1].b == 4);
- assert(object2.more_entries[2].a == 5);
- assert(object2.more_entries[2].b == 6);
+struct InternalClassStructElement {
+ a: Smi;
+ b: Smi;
+}
+
+class InternalClassWithStructElements extends HeapObject {
+ dummy1: int32;
+ dummy2: int32;
+ const count: Smi;
+ data: Smi;
+ object: Object;
+ entries[count]: Smi;
+ more_entries[count]: InternalClassStructElement;
+}
+
+struct SmiGeneratorIterator {
+ macro Next(): Smi labels _NoMore {
+ return this.value++;
}
+ value: Smi;
+}
- @export
- macro TestFullyGeneratedClassFromCpp(): ExportedSubClass {
- return new
- ExportedSubClass{a: Null, b: Null, c_field: 7, d_field: 8, e_field: 9};
+struct InternalClassStructElementGeneratorIterator {
+ macro Next(): InternalClassStructElement labels _NoMore {
+ return InternalClassStructElement{a: this.value++, b: this.value++};
}
+ value: Smi;
+}
+
+@export
+macro TestFullyGeneratedClassWithElements() {
+ // Test creation, initialization and access of a fully generated class with
+ // simple (Smi) elements
+ const length: Smi = Convert<Smi>(3);
+ const object1 = new InternalClassWithSmiElements{
+ length,
+ data: 0,
+ object: Undefined,
+ entries: ...SmiGeneratorIterator {
+ value: 11
+ }
+ };
+ assert(object1.length == 3);
+ assert(object1.data == 0);
+ assert(object1.object == Undefined);
+ assert(object1.entries[0] == 11);
+ assert(object1.entries[1] == 12);
+ assert(object1.entries[2] == 13);
+
+ // Test creation, initialization and access of a fully generated class
+ // with elements that are a struct.
+ const object2 = new InternalClassWithStructElements{
+ dummy1: 44,
+ dummy2: 45,
+ count: length,
+ data: 55,
+ object: Undefined,
+ entries: ...SmiGeneratorIterator{value: 3},
+ more_entries: ...InternalClassStructElementGeneratorIterator {
+ value: 1
+ }
+ };
+
+ assert(object2.dummy1 == 44);
+ assert(object2.dummy2 == 45);
+ assert(object2.count == 3);
+ assert(object2.data == 55);
+ assert(object2.object == Undefined);
+ assert(object2.entries[0] == 3);
+ assert(object2.entries[1] == 4);
+ assert(object2.entries[2] == 5);
+ assert(object2.more_entries[0].a == 1);
+ assert(object2.more_entries[0].b == 2);
+ assert(object2.more_entries[1].a == 3);
+ assert(object2.more_entries[1].b == 4);
+ assert(object2.more_entries[2].a == 5);
+ assert(object2.more_entries[2].b == 6);
+}
+
+@export
+macro TestFullyGeneratedClassFromCpp(): ExportedSubClass {
+ return new
+ ExportedSubClass{a: Null, b: Null, c_field: 7, d_field: 8, e_field: 9};
+}
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index f3b060e5da..ae69c24821 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -44,19 +44,35 @@ v8_source_set("cppgc_unittests_sources") {
testonly = true
sources = [
- "heap/cppgc/allocation_unittest.cc",
- "heap/cppgc/finalizer-trait_unittest.cc",
- "heap/cppgc/garbage-collected_unittest.cc",
- "heap/cppgc/gc-info_unittest.cc",
- "heap/cppgc/heap-object-header_unittest.cc",
- "heap/cppgc/stack_unittest.cc",
+ "heap/cppgc/custom-spaces-unittest.cc",
+ "heap/cppgc/finalizer-trait-unittest.cc",
+ "heap/cppgc/free-list-unittest.cc",
+ "heap/cppgc/garbage-collected-unittest.cc",
+ "heap/cppgc/gc-info-unittest.cc",
+ "heap/cppgc/heap-object-header-unittest.cc",
+ "heap/cppgc/heap-page-unittest.cc",
+ "heap/cppgc/heap-unittest.cc",
+ "heap/cppgc/logging-unittest.cc",
+ "heap/cppgc/marker-unittest.cc",
+ "heap/cppgc/marking-visitor-unittest.cc",
+ "heap/cppgc/member-unittest.cc",
+ "heap/cppgc/object-start-bitmap-unittest.cc",
+ "heap/cppgc/page-memory-unittest.cc",
+ "heap/cppgc/persistent-unittest.cc",
+ "heap/cppgc/prefinalizer-unittest.cc",
+ "heap/cppgc/source-location-unittest.cc",
+ "heap/cppgc/stack-unittest.cc",
+ "heap/cppgc/sweeper-unittest.cc",
"heap/cppgc/tests.cc",
"heap/cppgc/tests.h",
+ "heap/cppgc/visitor-unittest.cc",
+ "heap/cppgc/worklist-unittest.cc",
]
configs = [
"../..:external_config",
"../..:internal_config_base",
+ "../..:cppgc_base_config",
]
deps = [
@@ -124,7 +140,6 @@ v8_source_set("unittests_sources") {
"base/functional-unittest.cc",
"base/ieee754-unittest.cc",
"base/iterator-unittest.cc",
- "base/list-unittest.cc",
"base/logging-unittest.cc",
"base/macros-unittest.cc",
"base/ostreams-unittest.cc",
@@ -217,6 +232,7 @@ v8_source_set("unittests_sources") {
"heap/heap-controller-unittest.cc",
"heap/heap-unittest.cc",
"heap/item-parallel-job-unittest.cc",
+ "heap/list-unittest.cc",
"heap/local-heap-unittest.cc",
"heap/marking-unittest.cc",
"heap/marking-worklist-unittest.cc",
@@ -243,6 +259,7 @@ v8_source_set("unittests_sources") {
"interpreter/constant-array-builder-unittest.cc",
"interpreter/interpreter-assembler-unittest.cc",
"interpreter/interpreter-assembler-unittest.h",
+ "libplatform/default-job-unittest.cc",
"libplatform/default-platform-unittest.cc",
"libplatform/default-worker-threads-task-runner-unittest.cc",
"libplatform/task-queue-unittest.cc",
@@ -293,12 +310,15 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-module-sourcemap-unittest.cc",
- "wasm/wasm-opcodes-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
]
+ if (v8_enable_wasm_gdb_remote_debugging) {
+ sources += [ "wasm/wasm-gdbserver-unittest.cc" ]
+ }
+
if (v8_current_cpu == "arm") {
sources += [
"assembler/turbo-assembler-arm-unittest.cc",
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index 5fcc78bbe1..a73db835a4 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -98,22 +98,5 @@ TEST_F(RemoteObjectTest, TypeOfRemoteObject) {
EXPECT_STREQ("object", *result);
}
-TEST_F(RemoteObjectTest, ClassOf) {
- Local<FunctionTemplate> constructor_template =
- FunctionTemplate::New(isolate(), Constructor);
- constructor_template->InstanceTemplate()->SetAccessCheckCallbackAndHandler(
- AccessCheck, NamedPropertyHandlerConfiguration(NamedGetter),
- IndexedPropertyHandlerConfiguration());
- constructor_template->SetClassName(
- String::NewFromUtf8Literal(isolate(), "test_class"));
-
- Local<Object> remote_object =
- constructor_template->NewRemoteInstance().ToLocalChecked();
- Local<String> class_name = Utils::ToLocal(
- i::handle(Utils::OpenHandle(*remote_object)->class_name(), i_isolate()));
- String::Utf8Value result(isolate(), class_name);
- EXPECT_STREQ("test_class", *result);
-}
-
} // namespace remote_object_unittest
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 0990f8c63b..aab8c7b91d 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -69,8 +69,9 @@ class CompilerDispatcherTest : public TestWithNativeContext {
static base::Optional<CompilerDispatcher::JobId> EnqueueUnoptimizedCompileJob(
CompilerDispatcher* dispatcher, Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
+ UnoptimizedCompileState state(isolate);
std::unique_ptr<ParseInfo> outer_parse_info =
- test::OuterParseInfoForShared(isolate, shared);
+ test::OuterParseInfoForShared(isolate, shared, &state);
AstValueFactory* ast_value_factory =
outer_parse_info->GetOrCreateAstValueFactory();
AstNodeFactory ast_node_factory(ast_value_factory,
@@ -139,6 +140,11 @@ class MockPlatform : public v8::Platform {
bool IdleTasksEnabled(v8::Isolate* isolate) override { return true; }
+ std::unique_ptr<JobHandle> PostJob(
+ TaskPriority priority, std::unique_ptr<JobTask> job_state) override {
+ UNREACHABLE();
+ }
+
double MonotonicallyIncreasingTime() override {
time_ += time_step_;
return time_;
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index a779da083a..6c9c6321cc 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -2043,9 +2043,9 @@ struct MulDPInst {
Node* (RawMachineAssembler::*mul_constructor)(Node*, Node*);
Node* (RawMachineAssembler::*add_constructor)(Node*, Node*);
Node* (RawMachineAssembler::*sub_constructor)(Node*, Node*);
- ArchOpcode add_arch_opcode;
- ArchOpcode sub_arch_opcode;
- ArchOpcode neg_arch_opcode;
+ ArchOpcode multiply_add_arch_opcode;
+ ArchOpcode multiply_sub_arch_opcode;
+ ArchOpcode multiply_neg_arch_opcode;
MachineType machine_type;
};
@@ -2077,7 +2077,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, AddWithMul) {
m.Return((m.*mdpi.add_constructor)(m.Parameter(0), n));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2087,7 +2087,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, AddWithMul) {
m.Return((m.*mdpi.add_constructor)(n, m.Parameter(2)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(mdpi.add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2103,7 +2103,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, SubWithMul) {
m.Return((m.*mdpi.sub_constructor)(m.Parameter(0), n));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(mdpi.sub_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.multiply_sub_arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2120,7 +2120,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
m.Return((m.*mdpi.mul_constructor)(n, m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.multiply_neg_arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2131,7 +2131,7 @@ TEST_P(InstructionSelectorIntDPWithIntMulTest, NegativeMul) {
m.Return((m.*mdpi.mul_constructor)(m.Parameter(0), n));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(mdpi.neg_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.multiply_neg_arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2141,6 +2141,85 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorIntDPWithIntMulTest,
::testing::ValuesIn(kMulDPInstructions));
+namespace {
+
+struct SIMDMulDPInst {
+ const char* mul_constructor_name;
+ const Operator* (MachineOperatorBuilder::*mul_operator)(void);
+ const Operator* (MachineOperatorBuilder::*add_operator)(void);
+ const Operator* (MachineOperatorBuilder::*sub_operator)(void);
+ ArchOpcode multiply_add_arch_opcode;
+ ArchOpcode multiply_sub_arch_opcode;
+ MachineType machine_type;
+};
+
+std::ostream& operator<<(std::ostream& os, const SIMDMulDPInst& inst) {
+ return os << inst.mul_constructor_name;
+}
+
+} // namespace
+
+static const SIMDMulDPInst kSIMDMulDPInstructions[] = {
+ {"I32x4Mul", &MachineOperatorBuilder::I32x4Mul,
+ &MachineOperatorBuilder::I32x4Add, &MachineOperatorBuilder::I32x4Sub,
+ kArm64I32x4Mla, kArm64I32x4Mls, MachineType::Simd128()},
+ {"I16x8Mul", &MachineOperatorBuilder::I16x8Mul,
+ &MachineOperatorBuilder::I16x8Add, &MachineOperatorBuilder::I16x8Sub,
+ kArm64I16x8Mla, kArm64I16x8Mls, MachineType::Simd128()},
+ {"I8x16Mul", &MachineOperatorBuilder::I8x16Mul,
+ &MachineOperatorBuilder::I8x16Add, &MachineOperatorBuilder::I8x16Sub,
+ kArm64I8x16Mla, kArm64I8x16Mls, MachineType::Simd128()}};
+
+using InstructionSelectorSIMDDPWithSIMDMulTest =
+ InstructionSelectorTestWithParam<SIMDMulDPInst>;
+
+TEST_P(InstructionSelectorSIMDDPWithSIMDMulTest, AddWithMul) {
+ const SIMDMulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = m.AddNode((m.machine()->*mdpi.mul_operator)(), m.Parameter(1),
+ m.Parameter(2));
+ m.Return(m.AddNode((m.machine()->*mdpi.add_operator)(), m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = m.AddNode((m.machine()->*mdpi.mul_operator)(), m.Parameter(0),
+ m.Parameter(1));
+ m.Return(m.AddNode((m.machine()->*mdpi.add_operator)(), n, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorSIMDDPWithSIMDMulTest, SubWithMul) {
+ const SIMDMulDPInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = m.AddNode((m.machine()->*mdpi.mul_operator)(), m.Parameter(1),
+ m.Parameter(2));
+ m.Return(m.AddNode((m.machine()->*mdpi.sub_operator)(), m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.multiply_sub_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDDPWithSIMDMulTest,
+ ::testing::ValuesIn(kSIMDMulDPInstructions));
+
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 925ae9b5e7..53b7e3a85c 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -292,6 +292,241 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
IsBranch(cond2, control2)))))));
}
+TEST_F(EffectControlLinearizerTest, UnreachableThenBranch) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* unreachable = graph()->NewNode(common()->Unreachable(),
+ graph()->start(), graph()->start());
+ Node* branch =
+ graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* true_throw = graph()->NewNode(common()->Throw(), unreachable, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* false_throw =
+ graph()->NewNode(common()->Throw(), unreachable, if_false);
+
+ graph()->SetEnd(graph()->NewNode(common()->End(0)));
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* tblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, unreachable);
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ schedule.AddNode(tblock, if_true);
+ schedule.AddThrow(tblock, true_throw);
+ NodeProperties::MergeControlToEnd(graph(), common(), true_throw);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddThrow(fblock, false_throw);
+ NodeProperties::MergeControlToEnd(graph(), common(), false_throw);
+
+ ASSERT_THAT(end(), IsEnd(IsThrow(), IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 2);
+
+ // Run the state effect linearizer, maintaining the schedule.
+ LinearizeEffectControl(
+ jsgraph(), &schedule, zone(), source_positions(), node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+
+ // Initial block with the unreachable should be connected directly to end
+ // without any of the subsiquent blocks.
+ ASSERT_THAT(end(), IsEnd(IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 1);
+ ASSERT_THAT(schedule.start()->SuccessorCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorAt(0), start);
+}
+
+TEST_F(EffectControlLinearizerTest, UnreachableThenDiamond) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* unreachable = graph()->NewNode(common()->Unreachable(),
+ graph()->start(), graph()->start());
+ Node* branch =
+ graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* throw_node = graph()->NewNode(common()->Throw(), unreachable, if_false);
+ graph()->SetEnd(graph()->NewNode(common()->End(0)));
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* tblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+ BasicBlock* mblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, unreachable);
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ schedule.AddNode(tblock, if_true);
+ schedule.AddGoto(tblock, mblock);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddGoto(fblock, mblock);
+
+ schedule.AddNode(mblock, merge);
+ schedule.AddThrow(mblock, throw_node);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ASSERT_THAT(end(), IsEnd(IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 1);
+
+ // Run the state effect linearizer, maintaining the schedule.
+ LinearizeEffectControl(
+ jsgraph(), &schedule, zone(), source_positions(), node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+
+ // Initial block with the unreachable should be connected directly to end
+ // without any of the subsiquent blocks.
+ ASSERT_THAT(end(), IsEnd(IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 1);
+ ASSERT_THAT(schedule.start()->SuccessorCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorAt(0), start);
+}
+
+TEST_F(EffectControlLinearizerTest, UnreachableThenLoop) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* unreachable = graph()->NewNode(common()->Unreachable(),
+ graph()->start(), graph()->start());
+ Node* loop = graph()->NewNode(common()->Loop(1), graph()->start());
+
+ Node* cond = Int32Constant(0);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ loop->AppendInput(zone(), if_false);
+ NodeProperties::ChangeOp(loop, common()->Loop(2));
+
+ Node* throw_node = graph()->NewNode(common()->Throw(), unreachable, if_false);
+ graph()->SetEnd(graph()->NewNode(common()->End(0)));
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* lblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+ BasicBlock* tblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, unreachable);
+ schedule.AddGoto(start, lblock);
+
+ schedule.AddNode(lblock, loop);
+ schedule.AddNode(lblock, cond);
+ schedule.AddBranch(lblock, branch, tblock, fblock);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddGoto(fblock, lblock);
+
+ schedule.AddNode(tblock, if_true);
+ schedule.AddThrow(tblock, throw_node);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ASSERT_THAT(end(), IsEnd(IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 1);
+
+ // Run the state effect linearizer, maintaining the schedule.
+ LinearizeEffectControl(
+ jsgraph(), &schedule, zone(), source_positions(), node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+
+ // Initial block with the unreachable should be connected directly to end
+ // without any of the subsiquent blocks.
+ ASSERT_THAT(end(), IsEnd(IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 1);
+ ASSERT_THAT(schedule.start()->SuccessorCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorAt(0), start);
+}
+
+TEST_F(EffectControlLinearizerTest, UnreachableInChangedBlockThenBranch) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* truncate = graph()->NewNode(simplified()->TruncateTaggedToWord32(),
+ NumberConstant(1.1));
+ Node* unreachable = graph()->NewNode(common()->Unreachable(),
+ graph()->start(), graph()->start());
+ Node* branch =
+ graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* true_throw = graph()->NewNode(common()->Throw(), unreachable, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* false_throw =
+ graph()->NewNode(common()->Throw(), unreachable, if_false);
+
+ graph()->SetEnd(graph()->NewNode(common()->End(0)));
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* tblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, truncate);
+ schedule.AddNode(start, unreachable);
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ schedule.AddNode(tblock, if_true);
+ schedule.AddThrow(tblock, true_throw);
+ NodeProperties::MergeControlToEnd(graph(), common(), true_throw);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddThrow(fblock, false_throw);
+ NodeProperties::MergeControlToEnd(graph(), common(), false_throw);
+
+ ASSERT_THAT(end(), IsEnd(IsThrow(), IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 2);
+
+ // Run the state effect linearizer, maintaining the schedule.
+ LinearizeEffectControl(
+ jsgraph(), &schedule, zone(), source_positions(), node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+
+ // Start block now branches due to the lowering of TruncateTaggedToWord32, but
+ // then re-merges and the unreachable should be connected directly to end
+ // without any of the subsiquent blocks.
+ ASSERT_THAT(end(), IsEnd(IsThrow()));
+ ASSERT_THAT(end()->op()->ControlInputCount(), 1);
+ ASSERT_THAT(schedule.end()->PredecessorCount(), 1);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 30e24b0aa4..e6660b7823 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -816,10 +816,8 @@ TEST_F(Int64LoweringTest, I64Ror) {
Matcher<Node*> shift_matcher =
IsWord32And(IsParameter(0), IsInt32Constant(0x1F));
- Matcher<Node*> bit_mask_matcher = IsWord32Shl(
- IsWord32Sar(IsInt32Constant(std::numeric_limits<int32_t>::min()),
- shift_matcher),
- IsInt32Constant(1));
+ Matcher<Node*> bit_mask_matcher = IsWord32Xor(
+ IsWord32Shr(IsInt32Constant(-1), shift_matcher), IsInt32Constant(-1));
Matcher<Node*> inv_mask_matcher =
IsWord32Xor(bit_mask_matcher, IsInt32Constant(-1));
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index c3659032cf..a430967bc8 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator-reducer.h"
+#include <limits>
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
@@ -317,6 +318,7 @@ const ComparisonBinaryOperator kComparisonBinaryOperators[] = {
// Avoid undefined behavior on signed integer overflow.
int32_t Shl(int32_t x, int32_t y) { return static_cast<uint32_t>(x) << y; }
+int64_t Shl(int64_t x, int64_t y) { return static_cast<uint64_t>(x) << y; }
} // namespace
@@ -763,6 +765,44 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithComparisonAndConstantOne) {
}
}
+// -----------------------------------------------------------------------------
+// Word32Or
+
+TEST_F(MachineOperatorReducerTest, Word32OrWithWord32And) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, m, kUint32Values) {
+ TRACED_FOREACH(int32_t, rhs, kUint32Values) {
+ // To get better coverage of interesting cases, run this test twice:
+ // once with the mask from kUint32Values, and once with its inverse.
+ for (int32_t mask : {m, ~m}) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Word32Or(),
+ graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
+ Int32Constant(rhs)));
+ switch (rhs) {
+ case 0: // x | 0 => x
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord32And(p0, IsInt32Constant(mask)));
+ break;
+ case -1: // x | -1 => -1
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(-1));
+ break;
+ default: // (x & K1) | K2 => x | K2, if K1 | K2 == -1
+ if ((mask | rhs) == -1) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsWord32Or(p0, IsInt32Constant(rhs)));
+ } else {
+ ASSERT_TRUE(!r.Changed());
+ }
+ break;
+ }
+ }
+ }
+ }
+}
// -----------------------------------------------------------------------------
// Word32Xor
@@ -1053,11 +1093,123 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
Int32Constant(x));
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- int32_t m = static_cast<int32_t>(~((1U << x) - 1U));
+ int32_t m = static_cast<int32_t>(std::numeric_limits<uint32_t>::max() << x);
EXPECT_THAT(r.replacement(), IsWord32And(p0, IsInt32Constant(m)));
}
}
+TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32SarShiftOutZeros) {
+ Node* p = Parameter(0);
+ TRACED_FORRANGE(int32_t, x, 1, 31) {
+ TRACED_FORRANGE(int32_t, y, 0, 31) {
+ Node* node = graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Sar(ShiftKind::kShiftOutZeros), p,
+ Int32Constant(x)),
+ Int32Constant(y));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ if (x == y) {
+ // (p >> x) << y => p
+ EXPECT_THAT(r.replacement(), p);
+ } else if (x < y) {
+ // (p >> x) << y => p << (y - x)
+ EXPECT_THAT(r.replacement(), IsWord32Shl(p, IsInt32Constant(y - x)));
+ } else {
+ // (p >> x) << y => p >> (x - y)
+ EXPECT_THAT(r.replacement(), IsWord32Sar(p, IsInt32Constant(x - y)));
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word64Shl
+
+TEST_F(MachineOperatorReducerTest, Word64ShlWithZeroShift) {
+ Node* p0 = Parameter(0);
+ Node* node = graph()->NewNode(machine()->Word64Shl(), p0, Int64Constant(0));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Sar) {
+ Node* p0 = Parameter(0);
+ TRACED_FORRANGE(int64_t, x, 1, 63) {
+ Node* node = graph()->NewNode(
+ machine()->Word64Shl(),
+ graph()->NewNode(machine()->Word64Sar(), p0, Int64Constant(x)),
+ Int64Constant(x));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ int64_t m = static_cast<int64_t>(~((uint64_t{1} << x) - 1));
+ EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m)));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest,
+ Word64ShlWithWord64SarAndInt64AddAndConstant) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int64_t, k, kInt64Values) {
+ TRACED_FORRANGE(int64_t, l, 1, 63) {
+ if (Shl(k, l) == 0) continue;
+ // (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Word64Shl(),
+ graph()->NewNode(machine()->Word64Sar(),
+ graph()->NewNode(machine()->Int64Add(), p0,
+ Int64Constant(Shl(k, l))),
+ Int64Constant(l)),
+ Int64Constant(l)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsInt64Add(IsWord64And(p0, IsInt64Constant(Shl(int64_t{-1}, l))),
+ IsInt64Constant(Shl(k, l))));
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64Shr) {
+ Node* p0 = Parameter(0);
+ TRACED_FORRANGE(int64_t, x, 1, 63) {
+ Node* node = graph()->NewNode(
+ machine()->Word64Shl(),
+ graph()->NewNode(machine()->Word64Shr(), p0, Int64Constant(x)),
+ Int64Constant(x));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ int64_t m = static_cast<int64_t>(std::numeric_limits<uint64_t>::max() << x);
+ EXPECT_THAT(r.replacement(), IsWord64And(p0, IsInt64Constant(m)));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Word64ShlWithWord64SarShiftOutZeros) {
+ Node* p = Parameter(0);
+ TRACED_FORRANGE(int64_t, x, 1, 63) {
+ TRACED_FORRANGE(int64_t, y, 0, 63) {
+ Node* node = graph()->NewNode(
+ machine()->Word64Shl(),
+ graph()->NewNode(machine()->Word64Sar(ShiftKind::kShiftOutZeros), p,
+ Int64Constant(x)),
+ Int64Constant(y));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ if (x == y) {
+ // (p >> x) << y => p
+ EXPECT_THAT(r.replacement(), p);
+ } else if (x < y) {
+ // (p >> x) << y => p << (y - x)
+ EXPECT_THAT(r.replacement(), IsWord64Shl(p, IsInt64Constant(y - x)));
+ } else {
+ // (p >> x) << y => p >> (x - y)
+ EXPECT_THAT(r.replacement(), IsWord64Sar(p, IsInt64Constant(x - y)));
+ }
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Word32Equal
@@ -1853,6 +2005,29 @@ TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32Or) {
}
}
+TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32SarShiftOutZeros) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ TRACED_FORRANGE(int32_t, shift0, 1, 3) {
+ TRACED_FORRANGE(int32_t, shift1, 1, 3) {
+ Node* const node =
+ graph()->NewNode(machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Word32SarShiftOutZeros(),
+ p0, Int32Constant(shift0)),
+ graph()->NewNode(machine()->Word32SarShiftOutZeros(),
+ p1, Int32Constant(shift1)));
+
+ Reduction r = Reduce(node);
+ if (shift0 == shift1) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32LessThan(p0, p1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Uint32LessThan
@@ -1873,6 +2048,80 @@ TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
}
}
+TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32SarShiftOutZeros) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ TRACED_FORRANGE(int32_t, shift0, 1, 3) {
+ TRACED_FORRANGE(int32_t, shift1, 1, 3) {
+ Node* const node =
+ graph()->NewNode(machine()->Uint32LessThan(),
+ graph()->NewNode(machine()->Word32SarShiftOutZeros(),
+ p0, Int32Constant(shift0)),
+ graph()->NewNode(machine()->Word32SarShiftOutZeros(),
+ p1, Int32Constant(shift1)));
+
+ Reduction r = Reduce(node);
+ if (shift0 == shift1) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsUint32LessThan(p0, p1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Uint64LessThan
+
+TEST_F(MachineOperatorReducerTest, Uint64LessThanWithWord64SarShiftOutZeros) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ TRACED_FORRANGE(int64_t, shift0, 1, 3) {
+ TRACED_FORRANGE(int64_t, shift1, 1, 3) {
+ Node* const node =
+ graph()->NewNode(machine()->Uint64LessThan(),
+ graph()->NewNode(machine()->Word64SarShiftOutZeros(),
+ p0, Int64Constant(shift0)),
+ graph()->NewNode(machine()->Word64SarShiftOutZeros(),
+ p1, Int64Constant(shift1)));
+
+ Reduction r = Reduce(node);
+ if (shift0 == shift1) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsUint64LessThan(p0, p1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Int64LessThan
+
+TEST_F(MachineOperatorReducerTest, Int64LessThanWithWord64SarShiftOutZeros) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ TRACED_FORRANGE(int64_t, shift0, 1, 3) {
+ TRACED_FORRANGE(int64_t, shift1, 1, 3) {
+ Node* const node =
+ graph()->NewNode(machine()->Int64LessThan(),
+ graph()->NewNode(machine()->Word64SarShiftOutZeros(),
+ p0, Int64Constant(shift0)),
+ graph()->NewNode(machine()->Word64SarShiftOutZeros(),
+ p1, Int64Constant(shift1)));
+
+ Reduction r = Reduce(node);
+ if (shift0 == shift1) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt64LessThan(p0, p1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+ }
+}
// -----------------------------------------------------------------------------
// Float64Mul
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index bddcea5743..aeceabeffa 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1489,6 +1489,18 @@ Matcher<Node*> IsDead() {
return MakeMatcher(new TestNodeMatcher(IrOpcode::kDead));
}
+Matcher<Node*> IsUnreachable() {
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kUnreachable));
+}
+
+Matcher<Node*> IsThrow() {
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kThrow));
+}
+
+Matcher<Node*> IsStart() {
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kStart));
+}
+
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher) {
return MakeMatcher(new IsControl1Matcher(IrOpcode::kEnd, control0_matcher));
}
@@ -2175,6 +2187,8 @@ IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Div)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Int64Mul)
+IS_BINOP_MATCHER(Int64LessThan)
+IS_BINOP_MATCHER(Uint64LessThan)
IS_BINOP_MATCHER(JSAdd)
IS_BINOP_MATCHER(JSParseInt)
IS_BINOP_MATCHER(Float32Equal)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index bf21427a5c..42d6db82cf 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -37,6 +37,9 @@ class Node;
using ::testing::Matcher;
Matcher<Node*> IsDead();
+Matcher<Node*> IsUnreachable();
+Matcher<Node*> IsThrow();
+Matcher<Node*> IsStart();
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher);
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher);
@@ -412,6 +415,10 @@ Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Div(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64LessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsUint64LessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSParseInt(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 9dda52ed8e..6af2969166 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -670,18 +670,16 @@ TEST_F(RedundancyEliminationTest, CheckedUint32Bounds) {
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* check1 = effect = graph()->NewNode(
- simplified()->CheckedUint32Bounds(
- feedback1, CheckBoundsParameters::kDeoptOnOutOfBounds),
- index, length, effect, control);
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint32Bounds(feedback1, {}),
+ index, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
- Node* check2 = effect = graph()->NewNode(
- simplified()->CheckedUint32Bounds(
- feedback2, CheckBoundsParameters::kDeoptOnOutOfBounds),
- index, length, effect, control);
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint32Bounds(feedback2, {}),
+ index, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
@@ -754,18 +752,16 @@ TEST_F(RedundancyEliminationTest, CheckedUint64Bounds) {
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* check1 = effect = graph()->NewNode(
- simplified()->CheckedUint64Bounds(
- feedback1, CheckBoundsParameters::kDeoptOnOutOfBounds),
- index, length, effect, control);
+ Node* check1 = effect =
+ graph()->NewNode(simplified()->CheckedUint64Bounds(feedback1, {}),
+ index, length, effect, control);
Reduction r1 = Reduce(check1);
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(r1.replacement(), check1);
- Node* check2 = effect = graph()->NewNode(
- simplified()->CheckedUint64Bounds(
- feedback2, CheckBoundsParameters::kDeoptOnOutOfBounds),
- index, length, effect, control);
+ Node* check2 = effect =
+ graph()->NewNode(simplified()->CheckedUint64Bounds(feedback2, {}),
+ index, length, effect, control);
Reduction r2 = Reduce(check2);
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(r2.replacement(), check1);
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 8ecee3f8a1..4745f6c4b4 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -600,32 +600,38 @@ TEST_F(TyperTest, Manual_Operation_NumberMax) {
Type b = t(Type::MinusZero(), zero_or_minuszero);
CHECK(Type::MinusZero().Is(b));
CHECK(zero.Is(b));
- CHECK(a.Is(b));
+ CHECK(a.Is(b)); // Monotonicity.
Type c = t(zero_or_minuszero, Type::MinusZero());
CHECK(Type::MinusZero().Is(c));
CHECK(zero.Is(c));
- CHECK(a.Is(c));
+ CHECK(a.Is(c)); // Monotonicity.
Type d = t(zero_or_minuszero, zero_or_minuszero);
CHECK(Type::MinusZero().Is(d));
CHECK(zero.Is(d));
- CHECK(b.Is(d));
- CHECK(c.Is(d));
+ CHECK(b.Is(d)); // Monotonicity.
+ CHECK(c.Is(d)); // Monotonicity.
Type e =
t(Type::MinusZero(), Type::Union(Type::MinusZero(), dot_five, zone()));
CHECK(Type::MinusZero().Is(e));
CHECK(dot_five.Is(e));
- CHECK(a.Is(e));
+ CHECK(a.Is(e)); // Monotonicity.
Type f = t(Type::MinusZero(), zero);
CHECK(zero.Is(f));
- CHECK(f.Is(b));
+ CHECK(f.Is(b)); // Monotonicity.
Type g = t(zero, Type::MinusZero());
CHECK(zero.Is(g));
- CHECK(g.Is(c));
+ CHECK(g.Is(c)); // Monotonicity.
+
+ Type h = t(Type::Signed32(), Type::MinusZero());
+ CHECK(Type::MinusZero().Is(h));
+
+ Type i = t(Type::Signed32(), zero_or_minuszero);
+ CHECK(h.Is(i)); // Monotonicity.
}
TEST_F(TyperTest, Manual_Operation_NumberMin) {
@@ -644,35 +650,41 @@ TEST_F(TyperTest, Manual_Operation_NumberMin) {
Type b = t(Type::MinusZero(), zero_or_minuszero);
CHECK(Type::MinusZero().Is(b));
CHECK(zero.Is(b));
- CHECK(a.Is(b));
+ CHECK(a.Is(b)); // Monotonicity.
Type c = t(zero_or_minuszero, Type::MinusZero());
CHECK(Type::MinusZero().Is(c));
CHECK(zero.Is(c));
- CHECK(a.Is(c));
+ CHECK(a.Is(c)); // Monotonicity.
Type d = t(zero_or_minuszero, zero_or_minuszero);
CHECK(Type::MinusZero().Is(d));
CHECK(zero.Is(d));
- CHECK(b.Is(d));
- CHECK(c.Is(d));
+ CHECK(b.Is(d)); // Monotonicity.
+ CHECK(c.Is(d)); // Monotonicity.
Type e = t(Type::MinusZero(),
Type::Union(Type::MinusZero(), minus_dot_five, zone()));
CHECK(Type::MinusZero().Is(e));
CHECK(minus_dot_five.Is(e));
- CHECK(a.Is(e));
+ CHECK(a.Is(e)); // Monotonicity.
Type f = t(Type::MinusZero(), zero);
CHECK(Type::MinusZero().Is(f));
- CHECK(f.Is(b));
+ CHECK(f.Is(b)); // Monotonicity.
Type g = t(zero, Type::MinusZero());
CHECK(Type::MinusZero().Is(g));
- CHECK(g.Is(c));
+ CHECK(g.Is(c)); // Monotonicity.
Type h = t(one, Type::MinusZero());
CHECK(Type::MinusZero().Is(h));
+
+ Type i = t(Type::Signed32(), Type::MinusZero());
+ CHECK(Type::MinusZero().Is(i));
+
+ Type j = t(Type::Signed32(), zero_or_minuszero);
+ CHECK(i.Is(j)); // Monotonicity.
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 3b59b7cf35..a19cad7953 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -65,7 +65,7 @@ using TestWithNativeContextAndFinalizationRegistry = //
WithContextMixin< //
WithFinalizationRegistryMixin< //
WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+ WithIsolateMixin< //
::testing::Test>>>>>;
namespace {
@@ -89,7 +89,7 @@ class MicrotaskQueueTest : public TestWithNativeContextAndFinalizationRegistry,
void SetUp() override {
microtask_queue_ = MicrotaskQueue::New(isolate());
- native_context()->set_microtask_queue(microtask_queue());
+ native_context()->set_microtask_queue(isolate(), microtask_queue());
if (GetParam()) {
// Use a PromiseHook to switch the implementation to ResolvePromise
@@ -254,9 +254,9 @@ TEST_P(MicrotaskQueueTest, PromiseHandlerContext) {
Handle<Context> context2 = Utils::OpenHandle(*v8_context2, isolate());
Handle<Context> context3 = Utils::OpenHandle(*v8_context3, isolate());
Handle<Context> context4 = Utils::OpenHandle(*v8_context3, isolate());
- context2->native_context().set_microtask_queue(microtask_queue());
- context3->native_context().set_microtask_queue(microtask_queue());
- context4->native_context().set_microtask_queue(microtask_queue());
+ context2->native_context().set_microtask_queue(isolate(), microtask_queue());
+ context3->native_context().set_microtask_queue(isolate(), microtask_queue());
+ context4->native_context().set_microtask_queue(isolate(), microtask_queue());
Handle<JSFunction> handler;
Handle<JSProxy> proxy;
@@ -587,7 +587,7 @@ TEST_P(MicrotaskQueueTest, DetachGlobal_InactiveHandler) {
Local<v8::Context> sub_context = v8::Context::New(v8_isolate());
Utils::OpenHandle(*sub_context)
->native_context()
- .set_microtask_queue(microtask_queue());
+ .set_microtask_queue(isolate(), microtask_queue());
Handle<JSArray> result;
Handle<JSFunction> stale_handler;
diff --git a/deps/v8/test/unittests/heap/bitmap-unittest.cc b/deps/v8/test/unittests/heap/bitmap-unittest.cc
index 393f5ea303..8729d8acb0 100644
--- a/deps/v8/test/unittests/heap/bitmap-unittest.cc
+++ b/deps/v8/test/unittests/heap/bitmap-unittest.cc
@@ -41,7 +41,7 @@ TEST_F(NonAtomicBitmapTest, Cells) {
}
TEST_F(NonAtomicBitmapTest, CellsCount) {
- int last_cell_index = bitmap()->CellsCount() - 1;
+ size_t last_cell_index = bitmap()->CellsCount() - 1;
bitmap()->cells()[last_cell_index] = kBlackCell;
// Manually verify on raw memory.
uint8_t* raw = raw_bitmap();
diff --git a/deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc b/deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc
deleted file mode 100644
index 3a02ae1721..0000000000
--- a/deps/v8/test/unittests/heap/cppgc/allocation_unittest.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/cppgc/allocation.h"
-
-#include <memory>
-
-#include "src/heap/cppgc/heap.h"
-#include "test/unittests/heap/cppgc/tests.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace cppgc {
-
-TEST(GCBasicHeapTest, CreateAndDestroyHeap) {
- std::unique_ptr<Heap> heap{Heap::Create()};
-}
-
-namespace {
-
-class Foo : public GarbageCollected<Foo> {
- public:
- static size_t destructor_callcount;
-
- Foo() { destructor_callcount = 0; }
- ~Foo() { destructor_callcount++; }
-};
-
-size_t Foo::destructor_callcount;
-
-class GCAllocationTest : public testing::TestWithHeap {};
-
-} // namespace
-
-TEST_F(GCAllocationTest, MakeGarbageCollectedAndReclaim) {
- MakeGarbageCollected<Foo>(GetHeap());
- EXPECT_EQ(0u, Foo::destructor_callcount);
- internal::Heap::From(GetHeap())->CollectGarbage();
- EXPECT_EQ(1u, Foo::destructor_callcount);
-}
-
-} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc b/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc
new file mode 100644
index 0000000000..3fb0b13705
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc
@@ -0,0 +1,130 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/custom-space.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+
+namespace cppgc {
+
+class CustomSpace1 : public CustomSpace<CustomSpace1> {
+ public:
+ static constexpr size_t kSpaceIndex = 0;
+};
+
+class CustomSpace2 : public CustomSpace<CustomSpace2> {
+ public:
+ static constexpr size_t kSpaceIndex = 1;
+};
+
+namespace internal {
+
+namespace {
+
+size_t g_destructor_callcount;
+
+class TestWithHeapWithCustomSpaces : public testing::TestWithPlatform {
+ protected:
+ TestWithHeapWithCustomSpaces() {
+ Heap::HeapOptions options;
+ options.custom_spaces.emplace_back(std::make_unique<CustomSpace1>());
+ options.custom_spaces.emplace_back(std::make_unique<CustomSpace2>());
+ heap_ = Heap::Create(std::move(options));
+ g_destructor_callcount = 0;
+ }
+
+ void PreciseGC() {
+ heap_->ForceGarbageCollectionSlow(
+ "TestWithHeapWithCustomSpaces", "Testing",
+ Heap::GCConfig::StackState::kNoHeapPointers);
+ }
+
+ cppgc::Heap* GetHeap() const { return heap_.get(); }
+
+ private:
+ std::unique_ptr<cppgc::Heap> heap_;
+};
+
+class RegularGCed final : public GarbageCollected<RegularGCed> {};
+
+class CustomGCed1 final : public GarbageCollected<CustomGCed1> {
+ public:
+ ~CustomGCed1() { g_destructor_callcount++; }
+};
+class CustomGCed2 final : public GarbageCollected<CustomGCed2> {
+ public:
+ ~CustomGCed2() { g_destructor_callcount++; }
+};
+
+class CustomGCedBase : public GarbageCollected<CustomGCedBase> {};
+class CustomGCedFinal1 final : public CustomGCedBase {
+ public:
+ ~CustomGCedFinal1() { g_destructor_callcount++; }
+};
+class CustomGCedFinal2 final : public CustomGCedBase {
+ public:
+ ~CustomGCedFinal2() { g_destructor_callcount++; }
+};
+
+} // namespace
+
+} // namespace internal
+
+template <>
+struct SpaceTrait<internal::CustomGCed1> {
+ using Space = CustomSpace1;
+};
+
+template <>
+struct SpaceTrait<internal::CustomGCed2> {
+ using Space = CustomSpace2;
+};
+
+template <typename T>
+struct SpaceTrait<
+ T, std::enable_if_t<std::is_base_of<internal::CustomGCedBase, T>::value>> {
+ using Space = CustomSpace1;
+};
+
+namespace internal {
+
+TEST_F(TestWithHeapWithCustomSpaces, AllocateOnCustomSpaces) {
+ auto* regular = MakeGarbageCollected<RegularGCed>(GetHeap());
+ auto* custom1 = MakeGarbageCollected<CustomGCed1>(GetHeap());
+ auto* custom2 = MakeGarbageCollected<CustomGCed2>(GetHeap());
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces,
+ NormalPage::FromPayload(custom1)->space()->index());
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces + 1,
+ NormalPage::FromPayload(custom2)->space()->index());
+ EXPECT_EQ(static_cast<size_t>(RawHeap::RegularSpaceType::kNormal1),
+ NormalPage::FromPayload(regular)->space()->index());
+}
+
+TEST_F(TestWithHeapWithCustomSpaces,
+ AllocateOnCustomSpacesSpecifiedThroughBase) {
+ auto* regular = MakeGarbageCollected<RegularGCed>(GetHeap());
+ auto* custom1 = MakeGarbageCollected<CustomGCedFinal1>(GetHeap());
+ auto* custom2 = MakeGarbageCollected<CustomGCedFinal2>(GetHeap());
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces,
+ NormalPage::FromPayload(custom1)->space()->index());
+ EXPECT_EQ(RawHeap::kNumberOfRegularSpaces,
+ NormalPage::FromPayload(custom2)->space()->index());
+ EXPECT_EQ(static_cast<size_t>(RawHeap::RegularSpaceType::kNormal1),
+ NormalPage::FromPayload(regular)->space()->index());
+}
+
+TEST_F(TestWithHeapWithCustomSpaces, SweepCustomSpace) {
+ MakeGarbageCollected<CustomGCedFinal1>(GetHeap());
+ MakeGarbageCollected<CustomGCedFinal2>(GetHeap());
+ MakeGarbageCollected<CustomGCed1>(GetHeap());
+ MakeGarbageCollected<CustomGCed2>(GetHeap());
+ EXPECT_EQ(0u, g_destructor_callcount);
+ PreciseGC();
+ EXPECT_EQ(4u, g_destructor_callcount);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc b/deps/v8/test/unittests/heap/cppgc/finalizer-trait-unittest.cc
index 91a255e727..23da432730 100644
--- a/deps/v8/test/unittests/heap/cppgc/finalizer-trait_unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/finalizer-trait-unittest.cc
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/cppgc/finalizer-trait.h"
+#include "include/cppgc/internal/finalizer-trait.h"
+
#include <type_traits>
+
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
diff --git a/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc b/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
new file mode 100644
index 0000000000..e059734cf9
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
@@ -0,0 +1,187 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/free-list.h"
+
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "src/base/bits.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+namespace {
+
+class Block {
+ public:
+ Block() = default;
+ explicit Block(size_t size) : address_(calloc(1, size)), size_(size) {}
+
+ Block(Block&& other) V8_NOEXCEPT : address_(other.address_),
+ size_(other.size_) {
+ other.address_ = nullptr;
+ other.size_ = 0;
+ }
+
+ Block& operator=(Block&& other) V8_NOEXCEPT {
+ address_ = other.address_;
+ size_ = other.size_;
+ other.address_ = nullptr;
+ other.size_ = 0;
+ return *this;
+ }
+
+ ~Block() { free(address_); }
+
+ void* Address() const { return address_; }
+ size_t Size() const { return size_; }
+
+ private:
+ void* address_ = nullptr;
+ size_t size_ = 0;
+};
+
+std::vector<Block> CreateEntries() {
+ static constexpr size_t kFreeListEntrySizeLog2 =
+ v8::base::bits::WhichPowerOfTwo(kFreeListEntrySize);
+ std::vector<Block> vector;
+ vector.reserve(kPageSizeLog2);
+ for (size_t i = kFreeListEntrySizeLog2; i < kPageSizeLog2; ++i) {
+ vector.emplace_back(static_cast<size_t>(1u) << i);
+ }
+ return vector;
+}
+
+FreeList CreatePopulatedFreeList(const std::vector<Block>& blocks) {
+ FreeList list;
+ for (const auto& block : blocks) {
+ list.Add({block.Address(), block.Size()});
+ }
+ return list;
+}
+
+} // namespace
+
+TEST(FreeListTest, Empty) {
+ FreeList list;
+ EXPECT_TRUE(list.IsEmpty());
+ EXPECT_EQ(0u, list.Size());
+
+ auto block = list.Allocate(16);
+ EXPECT_EQ(nullptr, block.address);
+ EXPECT_EQ(0u, block.size);
+}
+
+TEST(FreeListTest, Add) {
+ auto blocks = CreateEntries();
+ FreeList list = CreatePopulatedFreeList(blocks);
+ EXPECT_FALSE(list.IsEmpty());
+ const size_t allocated_size = std::accumulate(
+ blocks.cbegin(), blocks.cend(), 0u,
+ [](size_t acc, const Block& b) { return acc + b.Size(); });
+ EXPECT_EQ(allocated_size, list.Size());
+}
+
+TEST(FreeListTest, AddWasted) {
+ FreeList list;
+ alignas(HeapObjectHeader) uint8_t buffer[sizeof(HeapObjectHeader)];
+ list.Add({buffer, sizeof(buffer)});
+ EXPECT_EQ(0u, list.Size());
+ EXPECT_TRUE(list.IsEmpty());
+}
+
+TEST(FreeListTest, Clear) {
+ auto blocks = CreateEntries();
+ FreeList list = CreatePopulatedFreeList(blocks);
+ list.Clear();
+ EXPECT_EQ(0u, list.Size());
+ EXPECT_TRUE(list.IsEmpty());
+}
+
+TEST(FreeListTest, Move) {
+ {
+ auto blocks = CreateEntries();
+ FreeList list1 = CreatePopulatedFreeList(blocks);
+ const size_t expected_size = list1.Size();
+ FreeList list2 = std::move(list1);
+ EXPECT_EQ(expected_size, list2.Size());
+ EXPECT_FALSE(list2.IsEmpty());
+ EXPECT_EQ(0u, list1.Size());
+ EXPECT_TRUE(list1.IsEmpty());
+ }
+ {
+ auto blocks1 = CreateEntries();
+ FreeList list1 = CreatePopulatedFreeList(blocks1);
+ const size_t expected_size = list1.Size();
+
+ auto blocks2 = CreateEntries();
+ FreeList list2 = CreatePopulatedFreeList(blocks2);
+
+ list2 = std::move(list1);
+ EXPECT_EQ(expected_size, list2.Size());
+ EXPECT_FALSE(list2.IsEmpty());
+ EXPECT_EQ(0u, list1.Size());
+ EXPECT_TRUE(list1.IsEmpty());
+ }
+}
+
+TEST(FreeListTest, Append) {
+ auto blocks1 = CreateEntries();
+ FreeList list1 = CreatePopulatedFreeList(blocks1);
+ const size_t list1_size = list1.Size();
+
+ auto blocks2 = CreateEntries();
+ FreeList list2 = CreatePopulatedFreeList(blocks2);
+ const size_t list2_size = list1.Size();
+
+ list2.Append(std::move(list1));
+ EXPECT_EQ(list1_size + list2_size, list2.Size());
+ EXPECT_FALSE(list2.IsEmpty());
+ EXPECT_EQ(0u, list1.Size());
+ EXPECT_TRUE(list1.IsEmpty());
+}
+
+TEST(FreeListTest, Contains) {
+ auto blocks = CreateEntries();
+ FreeList list = CreatePopulatedFreeList(blocks);
+
+ for (const auto& block : blocks) {
+ EXPECT_TRUE(list.Contains({block.Address(), block.Size()}));
+ }
+}
+
+TEST(FreeListTest, Allocate) {
+ static constexpr size_t kFreeListEntrySizeLog2 =
+ v8::base::bits::WhichPowerOfTwo(kFreeListEntrySize);
+
+ std::vector<Block> blocks;
+ blocks.reserve(kPageSizeLog2);
+ for (size_t i = kFreeListEntrySizeLog2; i < kPageSizeLog2; ++i) {
+ blocks.emplace_back(static_cast<size_t>(1u) << i);
+ }
+
+ FreeList list = CreatePopulatedFreeList(blocks);
+
+ // Try allocate from the biggest block.
+ for (auto it = blocks.rbegin(); it < blocks.rend(); ++it) {
+ const auto result = list.Allocate(it->Size());
+ EXPECT_EQ(it->Address(), result.address);
+ EXPECT_EQ(it->Size(), result.size);
+ }
+
+ EXPECT_EQ(0u, list.Size());
+ EXPECT_TRUE(list.IsEmpty());
+
+ // Check that allocation fails for empty list:
+ const auto empty_block = list.Allocate(8);
+ EXPECT_EQ(nullptr, empty_block.address);
+ EXPECT_EQ(0u, empty_block.size);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc b/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
new file mode 100644
index 0000000000..aadd3aab59
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/garbage-collected-unittest.cc
@@ -0,0 +1,143 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/garbage-collected.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/type-traits.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class GCed : public GarbageCollected<GCed> {};
+class NotGCed {};
+class Mixin : public GarbageCollectedMixin {};
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
+ USING_GARBAGE_COLLECTED_MIXIN();
+};
+class OtherMixin : public GarbageCollectedMixin {};
+class MergedMixins : public Mixin, public OtherMixin {
+ MERGE_GARBAGE_COLLECTED_MIXINS();
+
+ public:
+ void Trace(cppgc::Visitor* visitor) const override {
+ Mixin::Trace(visitor);
+ OtherMixin::Trace(visitor);
+ }
+};
+class GCWithMergedMixins : public GCed, public MergedMixins {
+ USING_GARBAGE_COLLECTED_MIXIN();
+
+ public:
+ void Trace(cppgc::Visitor* visitor) const override {
+ MergedMixins::Trace(visitor);
+ }
+};
+
+class GarbageCollectedTestWithHeap
+ : public testing::TestSupportingAllocationOnly {};
+
+} // namespace
+
+TEST(GarbageCollectedTest, GarbageCollectedTrait) {
+ STATIC_ASSERT(!IsGarbageCollectedType<int>::value);
+ STATIC_ASSERT(!IsGarbageCollectedType<NotGCed>::value);
+ STATIC_ASSERT(IsGarbageCollectedType<GCed>::value);
+ STATIC_ASSERT(IsGarbageCollectedType<Mixin>::value);
+ STATIC_ASSERT(IsGarbageCollectedType<GCedWithMixin>::value);
+ STATIC_ASSERT(IsGarbageCollectedType<MergedMixins>::value);
+ STATIC_ASSERT(IsGarbageCollectedType<GCWithMergedMixins>::value);
+}
+
+TEST(GarbageCollectedTest, GarbageCollectedMixinTrait) {
+ STATIC_ASSERT(!IsGarbageCollectedMixinType<int>::value);
+ STATIC_ASSERT(!IsGarbageCollectedMixinType<GCed>::value);
+ STATIC_ASSERT(!IsGarbageCollectedMixinType<NotGCed>::value);
+ STATIC_ASSERT(IsGarbageCollectedMixinType<Mixin>::value);
+ STATIC_ASSERT(IsGarbageCollectedMixinType<GCedWithMixin>::value);
+ STATIC_ASSERT(IsGarbageCollectedMixinType<MergedMixins>::value);
+ STATIC_ASSERT(IsGarbageCollectedMixinType<GCWithMergedMixins>::value);
+}
+
+TEST_F(GarbageCollectedTestWithHeap, GetObjectStartReturnsCurrentAddress) {
+ GCed* gced = MakeGarbageCollected<GCed>(GetHeap());
+ GCedWithMixin* gced_with_mixin =
+ MakeGarbageCollected<GCedWithMixin>(GetHeap());
+ EXPECT_EQ(gced_with_mixin, static_cast<Mixin*>(gced_with_mixin)
+ ->GetTraceDescriptor()
+ .base_object_payload);
+ EXPECT_NE(gced, static_cast<Mixin*>(gced_with_mixin)
+ ->GetTraceDescriptor()
+ .base_object_payload);
+}
+
+namespace {
+
+class GCedWithPostConstructionCallback final : public GCed {
+ public:
+ static size_t cb_callcount;
+ GCedWithPostConstructionCallback() { cb_callcount = 0; }
+};
+size_t GCedWithPostConstructionCallback::cb_callcount;
+
+class MixinWithPostConstructionCallback {
+ public:
+ static size_t cb_callcount;
+ MixinWithPostConstructionCallback() { cb_callcount = 0; }
+ using MarkerForMixinWithPostConstructionCallback = int;
+};
+size_t MixinWithPostConstructionCallback::cb_callcount;
+
+class GCedWithMixinWithPostConstructionCallback final
+ : public GCed,
+ public MixinWithPostConstructionCallback {};
+
+} // namespace
+} // namespace internal
+
+template <>
+struct PostConstructionCallbackTrait<
+ internal::GCedWithPostConstructionCallback> {
+ static void Call(internal::GCedWithPostConstructionCallback* object) {
+ EXPECT_FALSE(
+ internal::HeapObjectHeader::FromPayload(object).IsInConstruction());
+ internal::GCedWithPostConstructionCallback::cb_callcount++;
+ }
+};
+
+template <typename T>
+struct PostConstructionCallbackTrait<
+ T,
+ internal::void_t<typename T::MarkerForMixinWithPostConstructionCallback>> {
+ // The parameter could just be T*.
+ static void Call(
+ internal::GCedWithMixinWithPostConstructionCallback* object) {
+ EXPECT_FALSE(
+ internal::HeapObjectHeader::FromPayload(object).IsInConstruction());
+ internal::GCedWithMixinWithPostConstructionCallback::cb_callcount++;
+ }
+};
+
+namespace internal {
+
+TEST_F(GarbageCollectedTestWithHeap, PostConstructionCallback) {
+ EXPECT_EQ(0u, GCedWithPostConstructionCallback::cb_callcount);
+ MakeGarbageCollected<GCedWithPostConstructionCallback>(GetHeap());
+ EXPECT_EQ(1u, GCedWithPostConstructionCallback::cb_callcount);
+}
+
+TEST_F(GarbageCollectedTestWithHeap, PostConstructionCallbackForMixin) {
+ EXPECT_EQ(0u, MixinWithPostConstructionCallback::cb_callcount);
+ MakeGarbageCollected<GCedWithMixinWithPostConstructionCallback>(GetHeap());
+ EXPECT_EQ(1u, MixinWithPostConstructionCallback::cb_callcount);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc b/deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc
deleted file mode 100644
index 5098bdf48e..0000000000
--- a/deps/v8/test/unittests/heap/cppgc/garbage-collected_unittest.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/cppgc/garbage-collected.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace cppgc {
-namespace internal {
-
-namespace {
-
-class GCed : public GarbageCollected<GCed> {};
-class NotGCed {};
-
-} // namespace
-
-TEST(GarbageCollectedTest, GarbageCollectedTrait) {
- EXPECT_FALSE(IsGarbageCollectedType<int>::value);
- EXPECT_FALSE(IsGarbageCollectedType<NotGCed>::value);
- EXPECT_TRUE(IsGarbageCollectedType<GCed>::value);
-}
-
-} // namespace internal
-} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
index e7bfb5a7fe..199b42daca 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-info_unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/platform.h"
#include "src/base/page-allocator.h"
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
index b062489cb3..b062489cb3 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-object-header_unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
new file mode 100644
index 0000000000..fa8897128d
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
@@ -0,0 +1,274 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-page.h"
+
+#include <algorithm>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/internal/accessors.h"
+#include "include/cppgc/persistent.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class PageTest : public testing::TestWithHeap {
+ public:
+ RawHeap& GetRawHeap() { return Heap::From(GetHeap())->raw_heap(); }
+};
+
+template <size_t Size>
+class GCed : public GarbageCollected<GCed<Size>> {
+ public:
+ virtual void Trace(cppgc::Visitor*) const {}
+ char array[Size];
+};
+
+} // namespace
+
+TEST_F(PageTest, GetHeapForAllocatedObject) {
+ auto* gced = MakeGarbageCollected<GCed<1>>(GetHeap());
+ EXPECT_EQ(GetHeap(), GetHeapFromPayload(gced));
+}
+
+TEST_F(PageTest, SpaceIndexing) {
+ RawHeap& heap = GetRawHeap();
+ size_t space = 0u;
+ for (const auto& ptr : heap) {
+ EXPECT_EQ(&heap, ptr.get()->raw_heap());
+ EXPECT_EQ(space, ptr->index());
+ ++space;
+ }
+ EXPECT_GE(space, RawHeap::kNumberOfRegularSpaces);
+}
+
+TEST_F(PageTest, PredefinedSpaces) {
+ using SpaceType = RawHeap::RegularSpaceType;
+ RawHeap& heap = GetRawHeap();
+ {
+ auto* gced = MakeGarbageCollected<GCed<1>>(GetHeap());
+ BaseSpace* space = NormalPage::FromPayload(gced)->space();
+ EXPECT_EQ(heap.Space(SpaceType::kNormal1), space);
+ EXPECT_EQ(0u, space->index());
+ EXPECT_FALSE(space->is_large());
+ }
+ {
+ auto* gced = MakeGarbageCollected<GCed<32>>(GetHeap());
+ BaseSpace* space = NormalPage::FromPayload(gced)->space();
+ EXPECT_EQ(heap.Space(SpaceType::kNormal2), space);
+ EXPECT_EQ(1u, space->index());
+ EXPECT_FALSE(space->is_large());
+ }
+ {
+ auto* gced = MakeGarbageCollected<GCed<64>>(GetHeap());
+ BaseSpace* space = NormalPage::FromPayload(gced)->space();
+ EXPECT_EQ(heap.Space(SpaceType::kNormal3), space);
+ EXPECT_EQ(2u, space->index());
+ EXPECT_FALSE(space->is_large());
+ }
+ {
+ auto* gced = MakeGarbageCollected<GCed<128>>(GetHeap());
+ BaseSpace* space = NormalPage::FromPayload(gced)->space();
+ EXPECT_EQ(heap.Space(SpaceType::kNormal4), space);
+ EXPECT_EQ(3u, space->index());
+ EXPECT_FALSE(space->is_large());
+ }
+ {
+ auto* gced =
+ MakeGarbageCollected<GCed<2 * kLargeObjectSizeThreshold>>(GetHeap());
+ BaseSpace* space = NormalPage::FromPayload(gced)->space();
+ EXPECT_EQ(heap.Space(SpaceType::kLarge), space);
+ EXPECT_EQ(4u, space->index());
+ EXPECT_TRUE(space->is_large());
+ }
+}
+
+TEST_F(PageTest, NormalPageIndexing) {
+ using SpaceType = RawHeap::RegularSpaceType;
+ constexpr size_t kExpectedNumberOfPages = 10u;
+ constexpr size_t kObjectSize = 8u;
+ using Type = GCed<kObjectSize>;
+ static const size_t kNumberOfObjects =
+ (kExpectedNumberOfPages * NormalPage::PayloadSize() /
+ (sizeof(Type) + sizeof(HeapObjectHeader))) -
+ kExpectedNumberOfPages;
+
+ std::vector<Persistent<Type>> persistents(kNumberOfObjects);
+ for (auto& p : persistents) {
+ p = MakeGarbageCollected<Type>(GetHeap());
+ }
+
+ const RawHeap& heap = GetRawHeap();
+ const BaseSpace* space = heap.Space(SpaceType::kNormal1);
+ EXPECT_EQ(kExpectedNumberOfPages, space->size());
+
+ size_t page_n = 0;
+ for (const BasePage* page : *space) {
+ EXPECT_FALSE(page->is_large());
+ EXPECT_EQ(space, page->space());
+ ++page_n;
+ }
+ EXPECT_EQ(page_n, space->size());
+}
+
+TEST_F(PageTest, LargePageIndexing) {
+ using SpaceType = RawHeap::RegularSpaceType;
+ constexpr size_t kExpectedNumberOfPages = 10u;
+ constexpr size_t kObjectSize = 2 * kLargeObjectSizeThreshold;
+ using Type = GCed<kObjectSize>;
+ const size_t kNumberOfObjects = kExpectedNumberOfPages;
+
+ std::vector<Persistent<Type>> persistents(kNumberOfObjects);
+ for (auto& p : persistents) {
+ p = MakeGarbageCollected<Type>(GetHeap());
+ }
+
+ const RawHeap& heap = GetRawHeap();
+ const BaseSpace* space = heap.Space(SpaceType::kLarge);
+ EXPECT_EQ(kExpectedNumberOfPages, space->size());
+
+ size_t page_n = 0;
+ for (const BasePage* page : *space) {
+ EXPECT_TRUE(page->is_large());
+ ++page_n;
+ }
+ EXPECT_EQ(page_n, space->size());
+}
+
+TEST_F(PageTest, HeapObjectHeaderOnBasePageIndexing) {
+ constexpr size_t kObjectSize = 8;
+ using Type = GCed<kObjectSize>;
+ const size_t kNumberOfObjects =
+ NormalPage::PayloadSize() / (sizeof(Type) + sizeof(HeapObjectHeader));
+ const size_t kLeftSpace =
+ NormalPage::PayloadSize() % (sizeof(Type) + sizeof(HeapObjectHeader));
+
+ std::vector<Persistent<Type>> persistents(kNumberOfObjects);
+ for (auto& p : persistents) {
+ p = MakeGarbageCollected<Type>(GetHeap());
+ }
+
+ const auto* page =
+ static_cast<NormalPage*>(BasePage::FromPayload(persistents[0].Get()));
+ size_t size = 0;
+ size_t num = 0;
+ for (const HeapObjectHeader& header : *page) {
+ EXPECT_EQ(reinterpret_cast<Address>(persistents[num].Get()),
+ header.Payload());
+ size += header.GetSize();
+ ++num;
+ }
+ EXPECT_EQ(num, persistents.size());
+ EXPECT_EQ(size + kLeftSpace, NormalPage::PayloadSize());
+}
+
+TEST_F(PageTest, HeapObjectHeaderOnLargePageIndexing) {
+ constexpr size_t kObjectSize = 2 * kLargeObjectSizeThreshold;
+ using Type = GCed<kObjectSize>;
+ auto* gced = MakeGarbageCollected<Type>(GetHeap());
+
+ const auto* page = static_cast<LargePage*>(BasePage::FromPayload(gced));
+ const size_t expected_payload_size =
+ RoundUp(sizeof(Type) + sizeof(HeapObjectHeader), kAllocationGranularity);
+ EXPECT_EQ(expected_payload_size, page->PayloadSize());
+
+ const HeapObjectHeader* header = page->ObjectHeader();
+ EXPECT_EQ(reinterpret_cast<Address>(gced), header->Payload());
+}
+
+TEST_F(PageTest, NormalPageCreationDestruction) {
+ RawHeap& heap = GetRawHeap();
+ const PageBackend* backend = Heap::From(GetHeap())->page_backend();
+ auto* space = static_cast<NormalPageSpace*>(
+ heap.Space(RawHeap::RegularSpaceType::kNormal1));
+ auto* page = NormalPage::Create(space);
+ EXPECT_NE(space->end(), std::find(space->begin(), space->end(), page));
+ EXPECT_TRUE(
+ space->free_list().Contains({page->PayloadStart(), page->PayloadSize()}));
+ EXPECT_NE(nullptr, backend->Lookup(page->PayloadStart()));
+
+ space->free_list().Clear();
+ EXPECT_FALSE(
+ space->free_list().Contains({page->PayloadStart(), page->PayloadSize()}));
+ space->RemovePage(page);
+ EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ NormalPage::Destroy(page);
+ EXPECT_EQ(nullptr, backend->Lookup(page->PayloadStart()));
+}
+
+TEST_F(PageTest, LargePageCreationDestruction) {
+ constexpr size_t kObjectSize = 2 * kLargeObjectSizeThreshold;
+ RawHeap& heap = GetRawHeap();
+ const PageBackend* backend = Heap::From(GetHeap())->page_backend();
+ auto* space = static_cast<LargePageSpace*>(
+ heap.Space(RawHeap::RegularSpaceType::kLarge));
+ auto* page = LargePage::Create(space, kObjectSize);
+ EXPECT_NE(space->end(), std::find(space->begin(), space->end(), page));
+ EXPECT_NE(nullptr, backend->Lookup(page->PayloadStart()));
+
+ space->RemovePage(page);
+ EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ LargePage::Destroy(page);
+ EXPECT_EQ(nullptr, backend->Lookup(page->PayloadStart()));
+}
+
+#if DEBUG
+TEST_F(PageTest, UnsweptPageDestruction) {
+ RawHeap& heap = GetRawHeap();
+ {
+ auto* space = static_cast<NormalPageSpace*>(
+ heap.Space(RawHeap::RegularSpaceType::kNormal1));
+ auto* page = NormalPage::Create(space);
+ EXPECT_DEATH_IF_SUPPORTED(NormalPage::Destroy(page), "");
+ }
+ {
+ auto* space = static_cast<LargePageSpace*>(
+ heap.Space(RawHeap::RegularSpaceType::kLarge));
+ auto* page = LargePage::Create(space, 2 * kLargeObjectSizeThreshold);
+ EXPECT_DEATH_IF_SUPPORTED(LargePage::Destroy(page), "");
+ // Detach page and really destroy page in the parent process so that sweeper
+ // doesn't consider it.
+ space->RemovePage(page);
+ LargePage::Destroy(page);
+ }
+}
+#endif
+
+TEST_F(PageTest, ObjectHeaderFromInnerAddress) {
+ {
+ auto* object = MakeGarbageCollected<GCed<64>>(GetHeap());
+ const HeapObjectHeader& expected = HeapObjectHeader::FromPayload(object);
+
+ for (auto* inner_ptr = reinterpret_cast<ConstAddress>(object);
+ inner_ptr < reinterpret_cast<ConstAddress>(object + 1); ++inner_ptr) {
+ const HeapObjectHeader* hoh =
+ BasePage::FromPayload(object)->ObjectHeaderFromInnerAddress(
+ inner_ptr);
+ EXPECT_EQ(&expected, hoh);
+ }
+ }
+ {
+ auto* object =
+ MakeGarbageCollected<GCed<2 * kLargeObjectSizeThreshold>>(GetHeap());
+ const HeapObjectHeader& expected = HeapObjectHeader::FromPayload(object);
+
+ const HeapObjectHeader* hoh =
+ BasePage::FromPayload(object)->ObjectHeaderFromInnerAddress(
+ reinterpret_cast<ConstAddress>(object) + kLargeObjectSizeThreshold);
+ EXPECT_EQ(&expected, hoh);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
new file mode 100644
index 0000000000..ca8e225dbc
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap.h"
+
+#include <algorithm>
+#include <iterator>
+#include <numeric>
+
+#include "include/cppgc/allocation.h"
+#include "src/heap/cppgc/globals.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class GCHeapTest : public testing::TestWithHeap {
+ public:
+ void ConservativeGC() {
+ internal::Heap::From(GetHeap())->CollectGarbage(
+ {Heap::GCConfig::StackState::kMayContainHeapPointers});
+ }
+ void PreciseGC() {
+ internal::Heap::From(GetHeap())->CollectGarbage(
+ {Heap::GCConfig::StackState::kNoHeapPointers});
+ }
+};
+
+class Foo : public GarbageCollected<Foo> {
+ public:
+ static size_t destructor_callcount;
+
+ Foo() { destructor_callcount = 0; }
+ ~Foo() { destructor_callcount++; }
+};
+
+size_t Foo::destructor_callcount;
+
+template <size_t Size>
+class GCed : public GarbageCollected<Foo> {
+ public:
+ void Visit(cppgc::Visitor*) {}
+ char buf[Size];
+};
+
+} // namespace
+
+TEST_F(GCHeapTest, PreciseGCReclaimsObjectOnStack) {
+ Foo* volatile do_not_access = MakeGarbageCollected<Foo>(GetHeap());
+ USE(do_not_access);
+ EXPECT_EQ(0u, Foo::destructor_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Foo::destructor_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Foo::destructor_callcount);
+}
+
+namespace {
+
+const void* ConservativeGCReturningObject(cppgc::Heap* heap,
+ const void* volatile object) {
+ internal::Heap::From(heap)->CollectGarbage(
+ {Heap::GCConfig::StackState::kMayContainHeapPointers});
+ return object;
+}
+
+} // namespace
+
+TEST_F(GCHeapTest, ConservativeGCRetainsObjectOnStack) {
+ Foo* volatile object = MakeGarbageCollected<Foo>(GetHeap());
+ EXPECT_EQ(0u, Foo::destructor_callcount);
+ EXPECT_EQ(object, ConservativeGCReturningObject(GetHeap(), object));
+ EXPECT_EQ(0u, Foo::destructor_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Foo::destructor_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Foo::destructor_callcount);
+}
+
+TEST_F(GCHeapTest, ObjectPayloadSize) {
+ static constexpr size_t kNumberOfObjectsPerArena = 16;
+ static constexpr size_t kObjectSizes[] = {1, 32, 64, 128,
+ 2 * kLargeObjectSizeThreshold};
+
+ Heap::From(GetHeap())->CollectGarbage();
+
+ for (size_t k = 0; k < kNumberOfObjectsPerArena; ++k) {
+ MakeGarbageCollected<GCed<kObjectSizes[0]>>(GetHeap());
+ MakeGarbageCollected<GCed<kObjectSizes[1]>>(GetHeap());
+ MakeGarbageCollected<GCed<kObjectSizes[2]>>(GetHeap());
+ MakeGarbageCollected<GCed<kObjectSizes[3]>>(GetHeap());
+ MakeGarbageCollected<GCed<kObjectSizes[4]>>(GetHeap());
+ }
+
+ size_t aligned_object_sizes[arraysize(kObjectSizes)];
+ std::transform(std::cbegin(kObjectSizes), std::cend(kObjectSizes),
+ std::begin(aligned_object_sizes), [](size_t size) {
+ return RoundUp(size, kAllocationGranularity);
+ });
+ const size_t expected_size = std::accumulate(
+ std::cbegin(aligned_object_sizes), std::cend(aligned_object_sizes), 0u,
+ [](size_t acc, size_t size) {
+ return acc + kNumberOfObjectsPerArena * size;
+ });
+ // TODO(chromium:1056170): Change to EXPECT_EQ when proper sweeping is
+ // implemented.
+ EXPECT_LE(expected_size, Heap::From(GetHeap())->ObjectPayloadSize());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc b/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
new file mode 100644
index 0000000000..d9ff910686
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/logging-unittest.cc
@@ -0,0 +1,79 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/logging.h"
+
+#include <string>
+
+#include "include/cppgc/source-location.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+// GCC < 9 has a bug due to which calling non-constexpr functions are not
+// allowed even on constexpr path:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67026.
+#if !defined(__GNUC__) || defined(__clang__)
+constexpr int CheckInConstexpr(int a) {
+ CPPGC_DCHECK(a > 0);
+ CPPGC_CHECK(a > 0);
+ return a;
+}
+#endif
+} // namespace
+
+TEST(LoggingTest, Pass) {
+ CPPGC_DCHECK(true);
+ CPPGC_CHECK(true);
+}
+
+TEST(LoggingTest, Fail) {
+#if DEBUG
+ EXPECT_DEATH_IF_SUPPORTED(CPPGC_DCHECK(false), "");
+#endif
+ EXPECT_DEATH_IF_SUPPORTED(CPPGC_CHECK(false), "");
+}
+
+TEST(LoggingTest, DontReportUnused) {
+ int a = 1;
+ CPPGC_DCHECK(a);
+}
+
+#if !defined(__GNUC__) || defined(__clang__)
+TEST(LoggingTest, ConstexprContext) {
+ constexpr int a = CheckInConstexpr(1);
+ CPPGC_DCHECK(a);
+}
+#endif
+
+#if DEBUG && !defined(OFFICIAL_BUILD)
+TEST(LoggingTest, Message) {
+ using ::testing::ContainsRegex;
+ EXPECT_DEATH_IF_SUPPORTED(CPPGC_DCHECK(5 == 7),
+ ContainsRegex("failed.*5 == 7"));
+ EXPECT_DEATH_IF_SUPPORTED(CPPGC_CHECK(5 == 7),
+ ContainsRegex("failed.*5 == 7"));
+}
+
+#if CPPGC_SUPPORTS_SOURCE_LOCATION
+TEST(LoggingTest, SourceLocation) {
+ using ::testing::AllOf;
+ using ::testing::HasSubstr;
+ constexpr auto loc = SourceLocation::Current();
+ EXPECT_DEATH_IF_SUPPORTED(CPPGC_DCHECK(false),
+ AllOf(HasSubstr(loc.FileName()),
+ HasSubstr(std::to_string(loc.Line() + 3))));
+ EXPECT_DEATH_IF_SUPPORTED(CPPGC_CHECK(false),
+ AllOf(HasSubstr(loc.FileName()),
+ HasSubstr(std::to_string(loc.Line() + 6))));
+}
+#endif // CPPGC_SUPPORTS_SOURCE_LOCATION
+
+#endif // DEBUG
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
new file mode 100644
index 0000000000..76df1dbf58
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -0,0 +1,188 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marker.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/member.h"
+#include "include/cppgc/persistent.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class MarkerTest : public testing::TestWithHeap {
+ public:
+ using MarkingConfig = Marker::MarkingConfig;
+
+ void DoMarking(MarkingConfig config) {
+ Marker marker(Heap::From(GetHeap()));
+ marker.StartMarking(config);
+ marker.FinishMarking();
+ marker.ProcessWeakness();
+ }
+};
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void SetChild(GCed* child) { child_ = child; }
+ void SetWeakChild(GCed* child) { weak_child_ = child; }
+ GCed* child() const { return child_.Get(); }
+ GCed* weak_child() const { return weak_child_.Get(); }
+ void Trace(cppgc::Visitor* visitor) const {
+ visitor->Trace(child_);
+ visitor->Trace(weak_child_);
+ }
+
+ private:
+ Member<GCed> child_;
+ WeakMember<GCed> weak_child_;
+};
+
+template <typename T>
+V8_NOINLINE T access(volatile const T& t) {
+ return t;
+}
+
+} // namespace
+
+TEST_F(MarkerTest, PersistentIsMarked) {
+ Persistent<GCed> object = MakeGarbageCollected<GCed>(GetHeap());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+ EXPECT_FALSE(header.IsMarked());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(header.IsMarked());
+}
+
+TEST_F(MarkerTest, ReachableMemberIsMarked) {
+ Persistent<GCed> parent = MakeGarbageCollected<GCed>(GetHeap());
+ parent->SetChild(MakeGarbageCollected<GCed>(GetHeap()));
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(parent->child());
+ EXPECT_FALSE(header.IsMarked());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(header.IsMarked());
+}
+
+TEST_F(MarkerTest, UnreachableMemberIsNotMarked) {
+ Member<GCed> object = MakeGarbageCollected<GCed>(GetHeap());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+ EXPECT_FALSE(header.IsMarked());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_FALSE(header.IsMarked());
+}
+
+TEST_F(MarkerTest, ObjectReachableFromStackIsMarked) {
+ GCed* object = MakeGarbageCollected<GCed>(GetHeap());
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(object).IsMarked());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kMayContainHeapPointers));
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(object).IsMarked());
+ access(object);
+}
+
+TEST_F(MarkerTest, ObjectReachableOnlyFromStackIsNotMarkedIfStackIsEmpty) {
+ GCed* object = MakeGarbageCollected<GCed>(GetHeap());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+ EXPECT_FALSE(header.IsMarked());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_FALSE(header.IsMarked());
+ access(object);
+}
+
+TEST_F(MarkerTest, WeakReferenceToUnreachableObjectIsCleared) {
+ {
+ WeakPersistent<GCed> weak_object = MakeGarbageCollected<GCed>(GetHeap());
+ EXPECT_TRUE(weak_object);
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_FALSE(weak_object);
+ }
+ {
+ Persistent<GCed> parent = MakeGarbageCollected<GCed>(GetHeap());
+ parent->SetWeakChild(MakeGarbageCollected<GCed>(GetHeap()));
+ EXPECT_TRUE(parent->weak_child());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_FALSE(parent->weak_child());
+ }
+}
+
+TEST_F(MarkerTest, WeakReferenceToReachableObjectIsNotCleared) {
+ // Reachable from Persistent
+ {
+ Persistent<GCed> object = MakeGarbageCollected<GCed>(GetHeap());
+ WeakPersistent<GCed> weak_object(object);
+ EXPECT_TRUE(weak_object);
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(weak_object);
+ }
+ {
+ Persistent<GCed> object = MakeGarbageCollected<GCed>(GetHeap());
+ Persistent<GCed> parent = MakeGarbageCollected<GCed>(GetHeap());
+ parent->SetWeakChild(object);
+ EXPECT_TRUE(parent->weak_child());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(parent->weak_child());
+ }
+ // Reachable from Member
+ {
+ Persistent<GCed> parent = MakeGarbageCollected<GCed>(GetHeap());
+ WeakPersistent<GCed> weak_object(MakeGarbageCollected<GCed>(GetHeap()));
+ parent->SetChild(weak_object);
+ EXPECT_TRUE(weak_object);
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(weak_object);
+ }
+ {
+ Persistent<GCed> parent = MakeGarbageCollected<GCed>(GetHeap());
+ parent->SetChild(MakeGarbageCollected<GCed>(GetHeap()));
+ parent->SetWeakChild(parent->child());
+ EXPECT_TRUE(parent->weak_child());
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(parent->weak_child());
+ }
+ // Reachable from stack
+ {
+ GCed* object = MakeGarbageCollected<GCed>(GetHeap());
+ WeakPersistent<GCed> weak_object(object);
+ EXPECT_TRUE(weak_object);
+ DoMarking(
+ MarkingConfig(MarkingConfig::StackState::kMayContainHeapPointers));
+ EXPECT_TRUE(weak_object);
+ access(object);
+ }
+ {
+ GCed* object = MakeGarbageCollected<GCed>(GetHeap());
+ Persistent<GCed> parent = MakeGarbageCollected<GCed>(GetHeap());
+ parent->SetWeakChild(object);
+ EXPECT_TRUE(parent->weak_child());
+ DoMarking(
+ MarkingConfig(MarkingConfig::StackState::kMayContainHeapPointers));
+ EXPECT_TRUE(parent->weak_child());
+ access(object);
+ }
+}
+
+TEST_F(MarkerTest, DeepHierarchyIsMarked) {
+ static constexpr int kHierarchyDepth = 10;
+ Persistent<GCed> root = MakeGarbageCollected<GCed>(GetHeap());
+ GCed* parent = root;
+ for (int i = 0; i < kHierarchyDepth; ++i) {
+ parent->SetChild(MakeGarbageCollected<GCed>(GetHeap()));
+ parent->SetWeakChild(parent->child());
+ parent = parent->child();
+ }
+ DoMarking(MarkingConfig(MarkingConfig::StackState::kNoHeapPointers));
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(root).IsMarked());
+ parent = root;
+ for (int i = 0; i < kHierarchyDepth; ++i) {
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(parent->child()).IsMarked());
+ EXPECT_TRUE(parent->weak_child());
+ parent = parent->child();
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
new file mode 100644
index 0000000000..2ff04ee89c
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
@@ -0,0 +1,285 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marking-visitor.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/member.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/source-location.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/marker.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class MarkingVisitorTest : public testing::TestWithHeap {
+ public:
+ MarkingVisitorTest()
+ : marker_(std::make_unique<Marker>(Heap::From(GetHeap()))) {}
+ ~MarkingVisitorTest() { marker_->ClearAllWorklistsForTesting(); }
+
+ Marker* GetMarker() { return marker_.get(); }
+
+ private:
+ std::unique_ptr<Marker> marker_;
+};
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void Trace(cppgc::Visitor*) const {}
+};
+
+class Mixin : public GarbageCollectedMixin {};
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
+ USING_GARBAGE_COLLECTED_MIXIN();
+
+ public:
+ void Trace(cppgc::Visitor*) const override {}
+};
+
+} // namespace
+
+// Strong refernces are marked.
+
+TEST_F(MarkingVisitorTest, MarkMember) {
+ Member<GCed> object(MakeGarbageCollected<GCed>(GetHeap()));
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.Trace(object);
+
+ EXPECT_TRUE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, MarkMemberMixin) {
+ GCedWithMixin* object(MakeGarbageCollected<GCedWithMixin>(GetHeap()));
+ Member<Mixin> mixin(object);
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.Trace(mixin);
+
+ EXPECT_TRUE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, MarkPersistent) {
+ Persistent<GCed> object(MakeGarbageCollected<GCed>(GetHeap()));
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.TraceRoot(object, SourceLocation::Current());
+
+ EXPECT_TRUE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, MarkPersistentMixin) {
+ GCedWithMixin* object(MakeGarbageCollected<GCedWithMixin>(GetHeap()));
+ Persistent<Mixin> mixin(object);
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.TraceRoot(mixin, SourceLocation::Current());
+
+ EXPECT_TRUE(header.IsMarked());
+}
+
+// Weak references are not marked.
+
+TEST_F(MarkingVisitorTest, DontMarkWeakMember) {
+ WeakMember<GCed> object(MakeGarbageCollected<GCed>(GetHeap()));
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.Trace(object);
+
+ EXPECT_FALSE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakMemberMixin) {
+ GCedWithMixin* object(MakeGarbageCollected<GCedWithMixin>(GetHeap()));
+ WeakMember<Mixin> mixin(object);
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.Trace(mixin);
+
+ EXPECT_FALSE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakPersistent) {
+ WeakPersistent<GCed> object(MakeGarbageCollected<GCed>(GetHeap()));
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.TraceRoot(object, SourceLocation::Current());
+
+ EXPECT_FALSE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakPersistentMixin) {
+ GCedWithMixin* object(MakeGarbageCollected<GCedWithMixin>(GetHeap()));
+ WeakPersistent<Mixin> mixin(object);
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.TraceRoot(mixin, SourceLocation::Current());
+
+ EXPECT_FALSE(header.IsMarked());
+}
+
+// In construction objects are not marked.
+
+namespace {
+
+class GCedWithInConstructionCallback
+ : public GarbageCollected<GCedWithInConstructionCallback> {
+ public:
+ template <typename Callback>
+ explicit GCedWithInConstructionCallback(Callback callback) {
+ callback(this);
+ }
+ void Trace(cppgc::Visitor*) const {}
+};
+
+class MixinWithInConstructionCallback : public GarbageCollectedMixin {
+ public:
+ template <typename Callback>
+ explicit MixinWithInConstructionCallback(Callback callback) {
+ callback(this);
+ }
+};
+class GCedWithMixinWithInConstructionCallback
+ : public GarbageCollected<GCedWithMixinWithInConstructionCallback>,
+ public MixinWithInConstructionCallback {
+ USING_GARBAGE_COLLECTED_MIXIN();
+
+ public:
+ template <typename Callback>
+ explicit GCedWithMixinWithInConstructionCallback(Callback callback)
+ : MixinWithInConstructionCallback(callback) {}
+ void Trace(cppgc::Visitor*) const override {}
+};
+
+} // namespace
+
+TEST_F(MarkingVisitorTest, DontMarkMemberInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithInConstructionCallback>(
+ GetHeap(), [&visitor](GCedWithInConstructionCallback* obj) {
+ Member<GCedWithInConstructionCallback> object(obj);
+ visitor.Trace(object);
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkMemberMixinInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithMixinWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithMixinWithInConstructionCallback>(
+ GetHeap(), [&visitor](MixinWithInConstructionCallback* obj) {
+ Member<MixinWithInConstructionCallback> mixin(obj);
+ visitor.Trace(mixin);
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakMemberInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithInConstructionCallback>(
+ GetHeap(), [&visitor](GCedWithInConstructionCallback* obj) {
+ WeakMember<GCedWithInConstructionCallback> object(obj);
+ visitor.Trace(object);
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakMemberMixinInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithMixinWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithMixinWithInConstructionCallback>(
+ GetHeap(), [&visitor](MixinWithInConstructionCallback* obj) {
+ WeakMember<MixinWithInConstructionCallback> mixin(obj);
+ visitor.Trace(mixin);
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkPersistentInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithInConstructionCallback>(
+ GetHeap(), [&visitor](GCedWithInConstructionCallback* obj) {
+ Persistent<GCedWithInConstructionCallback> object(obj);
+ visitor.TraceRoot(object, SourceLocation::Current());
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkPersistentMixinInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithMixinWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithMixinWithInConstructionCallback>(
+ GetHeap(), [&visitor](MixinWithInConstructionCallback* obj) {
+ Persistent<MixinWithInConstructionCallback> mixin(obj);
+ visitor.TraceRoot(mixin, SourceLocation::Current());
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakPersistentInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithInConstructionCallback>(
+ GetHeap(), [&visitor](GCedWithInConstructionCallback* obj) {
+ WeakPersistent<GCedWithInConstructionCallback> object(obj);
+ visitor.TraceRoot(object, SourceLocation::Current());
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, DontMarkWeakPersistentMixinInConstruction) {
+ MutatorThreadMarkingVisitor visitor(GetMarker());
+ GCedWithMixinWithInConstructionCallback* gced =
+ MakeGarbageCollected<GCedWithMixinWithInConstructionCallback>(
+ GetHeap(), [&visitor](MixinWithInConstructionCallback* obj) {
+ WeakPersistent<MixinWithInConstructionCallback> mixin(obj);
+ visitor.TraceRoot(mixin, SourceLocation::Current());
+ });
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/member-unittest.cc b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
new file mode 100644
index 0000000000..1741498649
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/member-unittest.cc
@@ -0,0 +1,304 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/member.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/type-traits.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+struct GCed : GarbageCollected<GCed> {
+ virtual void Trace(cppgc::Visitor*) const {}
+};
+struct DerivedGCed : GCed {
+ void Trace(cppgc::Visitor* v) const override { GCed::Trace(v); }
+};
+
+// Compile tests.
+static_assert(!IsWeakV<Member<GCed>>, "Member is always strong.");
+static_assert(IsWeakV<WeakMember<GCed>>, "WeakMember is always weak.");
+
+struct CustomWriteBarrierPolicy {
+ static size_t InitializingWriteBarriersTriggered;
+ static size_t AssigningWriteBarriersTriggered;
+ static void InitializingBarrier(const void* slot, const void* value) {
+ ++InitializingWriteBarriersTriggered;
+ }
+ static void AssigningBarrier(const void* slot, const void* value) {
+ ++AssigningWriteBarriersTriggered;
+ }
+};
+size_t CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered = 0;
+size_t CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered = 0;
+
+using MemberWithCustomBarrier =
+ BasicMember<GCed, StrongMemberTag, CustomWriteBarrierPolicy>;
+
+struct CustomCheckingPolicy {
+ static std::vector<GCed*> Cached;
+ static size_t ChecksTriggered;
+ void CheckPointer(const void* ptr) {
+ EXPECT_NE(Cached.cend(), std::find(Cached.cbegin(), Cached.cend(), ptr));
+ ++ChecksTriggered;
+ }
+};
+std::vector<GCed*> CustomCheckingPolicy::Cached;
+size_t CustomCheckingPolicy::ChecksTriggered = 0;
+
+using MemberWithCustomChecking =
+ BasicMember<GCed, StrongMemberTag, DijkstraWriteBarrierPolicy,
+ CustomCheckingPolicy>;
+
+class MemberTest : public testing::TestSupportingAllocationOnly {};
+
+} // namespace
+
+template <template <typename> class MemberType>
+void EmptyTest() {
+ {
+ MemberType<GCed> empty;
+ EXPECT_EQ(nullptr, empty.Get());
+ EXPECT_EQ(nullptr, empty.Release());
+ }
+ {
+ MemberType<GCed> empty = nullptr;
+ EXPECT_EQ(nullptr, empty.Get());
+ EXPECT_EQ(nullptr, empty.Release());
+ }
+}
+
+TEST_F(MemberTest, Empty) {
+ EmptyTest<Member>();
+ EmptyTest<WeakMember>();
+ EmptyTest<UntracedMember>();
+}
+
+template <template <typename> class MemberType>
+void ClearTest(cppgc::Heap* heap) {
+ MemberType<GCed> member = MakeGarbageCollected<GCed>(heap);
+ EXPECT_NE(nullptr, member.Get());
+ member.Clear();
+ EXPECT_EQ(nullptr, member.Get());
+}
+
+TEST_F(MemberTest, Clear) {
+ cppgc::Heap* heap = GetHeap();
+ ClearTest<Member>(heap);
+ ClearTest<WeakMember>(heap);
+ ClearTest<UntracedMember>(heap);
+}
+
+template <template <typename> class MemberType>
+void ReleaseTest(cppgc::Heap* heap) {
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ MemberType<GCed> member = gced;
+ EXPECT_NE(nullptr, member.Get());
+ GCed* raw = member.Release();
+ EXPECT_EQ(gced, raw);
+ EXPECT_EQ(nullptr, member.Get());
+}
+
+TEST_F(MemberTest, Release) {
+ cppgc::Heap* heap = GetHeap();
+ ReleaseTest<Member>(heap);
+ ReleaseTest<WeakMember>(heap);
+ ReleaseTest<UntracedMember>(heap);
+}
+
+template <template <typename> class MemberType1,
+ template <typename> class MemberType2>
+void SwapTest(cppgc::Heap* heap) {
+ GCed* gced1 = MakeGarbageCollected<GCed>(heap);
+ GCed* gced2 = MakeGarbageCollected<GCed>(heap);
+ MemberType1<GCed> member1 = gced1;
+ MemberType2<GCed> member2 = gced2;
+ EXPECT_EQ(gced1, member1.Get());
+ EXPECT_EQ(gced2, member2.Get());
+ member1.Swap(member2);
+ EXPECT_EQ(gced2, member1.Get());
+ EXPECT_EQ(gced1, member2.Get());
+}
+
+TEST_F(MemberTest, Swap) {
+ cppgc::Heap* heap = GetHeap();
+ SwapTest<Member, Member>(heap);
+ SwapTest<Member, WeakMember>(heap);
+ SwapTest<Member, UntracedMember>(heap);
+ SwapTest<WeakMember, Member>(heap);
+ SwapTest<WeakMember, WeakMember>(heap);
+ SwapTest<WeakMember, UntracedMember>(heap);
+ SwapTest<UntracedMember, Member>(heap);
+ SwapTest<UntracedMember, WeakMember>(heap);
+ SwapTest<UntracedMember, UntracedMember>(heap);
+}
+
+template <template <typename> class MemberType1,
+ template <typename> class MemberType2>
+void HeterogeneousConversionTest(cppgc::Heap* heap) {
+ {
+ MemberType1<GCed> member1 = MakeGarbageCollected<GCed>(heap);
+ MemberType2<GCed> member2 = member1;
+ EXPECT_EQ(member1.Get(), member2.Get());
+ }
+ {
+ MemberType1<DerivedGCed> member1 = MakeGarbageCollected<DerivedGCed>(heap);
+ MemberType2<GCed> member2 = member1;
+ EXPECT_EQ(member1.Get(), member2.Get());
+ }
+ {
+ MemberType1<GCed> member1 = MakeGarbageCollected<GCed>(heap);
+ MemberType2<GCed> member2;
+ member2 = member1;
+ EXPECT_EQ(member1.Get(), member2.Get());
+ }
+ {
+ MemberType1<DerivedGCed> member1 = MakeGarbageCollected<DerivedGCed>(heap);
+ MemberType2<GCed> member2;
+ member2 = member1;
+ EXPECT_EQ(member1.Get(), member2.Get());
+ }
+}
+
+TEST_F(MemberTest, HeterogeneousInterface) {
+ cppgc::Heap* heap = GetHeap();
+ HeterogeneousConversionTest<Member, Member>(heap);
+ HeterogeneousConversionTest<Member, WeakMember>(heap);
+ HeterogeneousConversionTest<Member, UntracedMember>(heap);
+ HeterogeneousConversionTest<WeakMember, Member>(heap);
+ HeterogeneousConversionTest<WeakMember, WeakMember>(heap);
+ HeterogeneousConversionTest<WeakMember, UntracedMember>(heap);
+ HeterogeneousConversionTest<UntracedMember, Member>(heap);
+ HeterogeneousConversionTest<UntracedMember, WeakMember>(heap);
+ HeterogeneousConversionTest<UntracedMember, UntracedMember>(heap);
+}
+
+template <template <typename> class MemberType,
+ template <typename> class PersistentType>
+void PersistentConversionTest(cppgc::Heap* heap) {
+ {
+ PersistentType<GCed> persistent = MakeGarbageCollected<GCed>(heap);
+ MemberType<GCed> member = persistent;
+ EXPECT_EQ(persistent.Get(), member.Get());
+ }
+ {
+ PersistentType<DerivedGCed> persistent =
+ MakeGarbageCollected<DerivedGCed>(heap);
+ MemberType<GCed> member = persistent;
+ EXPECT_EQ(persistent.Get(), member.Get());
+ }
+ {
+ PersistentType<GCed> persistent = MakeGarbageCollected<GCed>(heap);
+ MemberType<GCed> member;
+ member = persistent;
+ EXPECT_EQ(persistent.Get(), member.Get());
+ }
+ {
+ PersistentType<DerivedGCed> persistent =
+ MakeGarbageCollected<DerivedGCed>(heap);
+ MemberType<GCed> member;
+ member = persistent;
+ EXPECT_EQ(persistent.Get(), member.Get());
+ }
+}
+
+TEST_F(MemberTest, PersistentConversion) {
+ cppgc::Heap* heap = GetHeap();
+ PersistentConversionTest<Member, Persistent>(heap);
+ PersistentConversionTest<Member, WeakPersistent>(heap);
+ PersistentConversionTest<WeakMember, Persistent>(heap);
+ PersistentConversionTest<WeakMember, WeakPersistent>(heap);
+ PersistentConversionTest<UntracedMember, Persistent>(heap);
+ PersistentConversionTest<UntracedMember, WeakPersistent>(heap);
+}
+
+template <template <typename> class MemberType1,
+ template <typename> class MemberType2>
+void EqualityTest(cppgc::Heap* heap) {
+ {
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ MemberType1<GCed> member1 = gced;
+ MemberType2<GCed> member2 = gced;
+ EXPECT_TRUE(member1 == member2);
+ EXPECT_FALSE(member1 != member2);
+ member2 = member1;
+ EXPECT_TRUE(member1 == member2);
+ EXPECT_FALSE(member1 != member2);
+ }
+ {
+ MemberType1<GCed> member1 = MakeGarbageCollected<GCed>(heap);
+ MemberType2<GCed> member2 = MakeGarbageCollected<GCed>(heap);
+ EXPECT_TRUE(member1 != member2);
+ EXPECT_FALSE(member1 == member2);
+ }
+}
+
+TEST_F(MemberTest, EqualityTest) {
+ cppgc::Heap* heap = GetHeap();
+ EqualityTest<Member, Member>(heap);
+ EqualityTest<Member, WeakMember>(heap);
+ EqualityTest<Member, UntracedMember>(heap);
+ EqualityTest<WeakMember, Member>(heap);
+ EqualityTest<WeakMember, WeakMember>(heap);
+ EqualityTest<WeakMember, UntracedMember>(heap);
+ EqualityTest<UntracedMember, Member>(heap);
+ EqualityTest<UntracedMember, WeakMember>(heap);
+ EqualityTest<UntracedMember, UntracedMember>(heap);
+}
+
+TEST_F(MemberTest, WriteBarrierTriggered) {
+ CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered = 0;
+ CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered = 0;
+ GCed* gced = MakeGarbageCollected<GCed>(GetHeap());
+ MemberWithCustomBarrier member1 = gced;
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered);
+ EXPECT_EQ(0u, CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered);
+ member1 = gced;
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered);
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered);
+ member1 = nullptr;
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered);
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered);
+ MemberWithCustomBarrier member2 = nullptr;
+ // No initializing barriers for std::nullptr_t.
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered);
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered);
+ member2 = kSentinelPointer;
+ EXPECT_EQ(kSentinelPointer, member2.Get());
+ EXPECT_EQ(kSentinelPointer, member2);
+ // No initializing barriers for pointer sentinel.
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::InitializingWriteBarriersTriggered);
+ EXPECT_EQ(1u, CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered);
+ member2.Swap(member1);
+ EXPECT_EQ(3u, CustomWriteBarrierPolicy::AssigningWriteBarriersTriggered);
+}
+
+TEST_F(MemberTest, CheckingPolicy) {
+ static constexpr size_t kElements = 64u;
+ CustomCheckingPolicy::ChecksTriggered = 0u;
+
+ for (std::size_t i = 0; i < kElements; ++i) {
+ CustomCheckingPolicy::Cached.push_back(
+ MakeGarbageCollected<GCed>(GetHeap()));
+ }
+
+ MemberWithCustomChecking member;
+ for (GCed* item : CustomCheckingPolicy::Cached) {
+ member = item;
+ }
+ EXPECT_EQ(CustomCheckingPolicy::Cached.size(),
+ CustomCheckingPolicy::ChecksTriggered);
+}
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
new file mode 100644
index 0000000000..2425889c1c
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/object-start-bitmap-unittest.cc
@@ -0,0 +1,189 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/object-start-bitmap.h"
+
+#include "include/cppgc/allocation.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+bool IsEmpty(const ObjectStartBitmap& bitmap) {
+ size_t count = 0;
+ bitmap.Iterate([&count](Address) { count++; });
+ return count == 0;
+}
+
+// Abstraction for objects that hides ObjectStartBitmap::kGranularity and
+// the base address as getting either of it wrong will result in failed DCHECKs.
+class Object {
+ public:
+ static Address kBaseOffset;
+
+ explicit Object(size_t number) : number_(number) {
+ const size_t max_entries = ObjectStartBitmap::MaxEntries();
+ EXPECT_GE(max_entries, number_);
+ }
+
+ Address address() const {
+ return kBaseOffset + ObjectStartBitmap::Granularity() * number_;
+ }
+
+ HeapObjectHeader* header() const {
+ return reinterpret_cast<HeapObjectHeader*>(address());
+ }
+
+ // Allow implicitly converting Object to Address.
+ operator Address() const { return address(); } // NOLINT
+
+ private:
+ const size_t number_;
+};
+
+Address Object::kBaseOffset = reinterpret_cast<Address>(0x4000);
+
+} // namespace
+
+TEST(ObjectStartBitmapTest, MoreThanZeroEntriesPossible) {
+ const size_t max_entries = ObjectStartBitmap::MaxEntries();
+ EXPECT_LT(0u, max_entries);
+}
+
+TEST(ObjectStartBitmapTest, InitialEmpty) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ EXPECT_TRUE(IsEmpty(bitmap));
+}
+
+TEST(ObjectStartBitmapTest, SetBitImpliesNonEmpty) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ bitmap.SetBit(Object(0));
+ EXPECT_FALSE(IsEmpty(bitmap));
+}
+
+TEST(ObjectStartBitmapTest, SetBitCheckBit) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object(7);
+ bitmap.SetBit(object);
+ EXPECT_TRUE(bitmap.CheckBit(object));
+}
+
+TEST(ObjectStartBitmapTest, SetBitClearbitCheckBit) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object(77);
+ bitmap.SetBit(object);
+ bitmap.ClearBit(object);
+ EXPECT_FALSE(bitmap.CheckBit(object));
+}
+
+TEST(ObjectStartBitmapTest, SetBitClearBitImpliesEmpty) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object(123);
+ bitmap.SetBit(object);
+ bitmap.ClearBit(object);
+ EXPECT_TRUE(IsEmpty(bitmap));
+}
+
+TEST(ObjectStartBitmapTest, AdjacentObjectsAtBegin) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object0(0);
+ Object object1(1);
+ bitmap.SetBit(object0);
+ bitmap.SetBit(object1);
+ EXPECT_FALSE(bitmap.CheckBit(Object(3)));
+ size_t count = 0;
+ bitmap.Iterate([&count, object0, object1](Address current) {
+ if (count == 0) {
+ EXPECT_EQ(object0.address(), current);
+ } else if (count == 1) {
+ EXPECT_EQ(object1.address(), current);
+ }
+ count++;
+ });
+ EXPECT_EQ(2u, count);
+}
+
+#if defined(V8_CC_MSVC)
+#define MAYBE_AdjacentObjectsAtEnd DISABLED_AdjacentObjectsAtEnd
+#else // !defined(V8_CC_MSVC)
+#define MAYBE_AdjacentObjectsAtEnd AdjacentObjectsAtEnd
+#endif // !defined(V8_CC_MSVC)
+TEST(ObjectStartBitmapTest, MAYBE_AdjacentObjectsAtEnd) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ const size_t last_entry_index = ObjectStartBitmap::MaxEntries() - 1;
+ Object object0(last_entry_index - 1);
+ Object object1(last_entry_index);
+ bitmap.SetBit(object0);
+ bitmap.SetBit(object1);
+ EXPECT_FALSE(bitmap.CheckBit(Object(last_entry_index - 2)));
+ size_t count = 0;
+ bitmap.Iterate([&count, object0, object1](Address current) {
+ if (count == 0) {
+ EXPECT_EQ(object0.address(), current);
+ } else if (count == 1) {
+ EXPECT_EQ(object1.address(), current);
+ }
+ count++;
+ });
+ EXPECT_EQ(2u, count);
+}
+
+TEST(ObjectStartBitmapTest, FindHeaderExact) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object(654);
+ bitmap.SetBit(object);
+ EXPECT_EQ(object.header(), bitmap.FindHeader(object.address()));
+}
+
+TEST(ObjectStartBitmapTest, FindHeaderApproximate) {
+ static const size_t kInternalDelta = 37;
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object(654);
+ bitmap.SetBit(object);
+ EXPECT_EQ(object.header(),
+ bitmap.FindHeader(object.address() + kInternalDelta));
+}
+
+TEST(ObjectStartBitmapTest, FindHeaderIteratingWholeBitmap) {
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object_to_find(Object(0));
+ Address hint_index = Object(ObjectStartBitmap::MaxEntries() - 1);
+ bitmap.SetBit(object_to_find);
+ EXPECT_EQ(object_to_find.header(), bitmap.FindHeader(hint_index));
+}
+
+TEST(ObjectStartBitmapTest, FindHeaderNextCell) {
+ // This white box test makes use of the fact that cells are of type uint8_t.
+ const size_t kCellSize = sizeof(uint8_t);
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object_to_find(Object(kCellSize - 1));
+ Address hint = Object(kCellSize);
+ bitmap.SetBit(Object(0));
+ bitmap.SetBit(object_to_find);
+ EXPECT_EQ(object_to_find.header(), bitmap.FindHeader(hint));
+}
+
+TEST(ObjectStartBitmapTest, FindHeaderSameCell) {
+ // This white box test makes use of the fact that cells are of type uint8_t.
+ const size_t kCellSize = sizeof(uint8_t);
+ ObjectStartBitmap bitmap(Object::kBaseOffset);
+ Object object_to_find(Object(kCellSize - 1));
+ bitmap.SetBit(Object(0));
+ bitmap.SetBit(object_to_find);
+ EXPECT_EQ(object_to_find.header(),
+ bitmap.FindHeader(object_to_find.address()));
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
new file mode 100644
index 0000000000..1265d6994f
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
@@ -0,0 +1,308 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/page-memory.h"
+
+#include "src/base/page-allocator.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+TEST(MemoryRegionTest, Construct) {
+ constexpr size_t kSize = 17;
+ uint8_t dummy[kSize];
+ const MemoryRegion region(dummy, kSize);
+ EXPECT_EQ(dummy, region.base());
+ EXPECT_EQ(kSize, region.size());
+ EXPECT_EQ(dummy + kSize, region.end());
+}
+
+namespace {
+
+Address AtOffset(uint8_t* base, intptr_t offset) {
+ return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(base) + offset);
+}
+
+} // namespace
+
+TEST(MemoryRegionTest, ContainsAddress) {
+ constexpr size_t kSize = 7;
+ uint8_t dummy[kSize];
+ const MemoryRegion region(dummy, kSize);
+ EXPECT_FALSE(region.Contains(AtOffset(dummy, -1)));
+ EXPECT_TRUE(region.Contains(dummy));
+ EXPECT_TRUE(region.Contains(dummy + kSize - 1));
+ EXPECT_FALSE(region.Contains(AtOffset(dummy, kSize)));
+}
+
+TEST(MemoryRegionTest, ContainsMemoryRegion) {
+ constexpr size_t kSize = 7;
+ uint8_t dummy[kSize];
+ const MemoryRegion region(dummy, kSize);
+ const MemoryRegion contained_region1(dummy, kSize - 1);
+ EXPECT_TRUE(region.Contains(contained_region1));
+ const MemoryRegion contained_region2(dummy + 1, kSize - 1);
+ EXPECT_TRUE(region.Contains(contained_region2));
+ const MemoryRegion not_contained_region1(AtOffset(dummy, -1), kSize);
+ EXPECT_FALSE(region.Contains(not_contained_region1));
+ const MemoryRegion not_contained_region2(AtOffset(dummy, kSize), 1);
+ EXPECT_FALSE(region.Contains(not_contained_region2));
+}
+
+TEST(PageMemoryTest, Construct) {
+ constexpr size_t kOverallSize = 17;
+ uint8_t dummy[kOverallSize];
+ const MemoryRegion overall_region(dummy, kOverallSize);
+ const MemoryRegion writeable_region(dummy + 1, kOverallSize - 2);
+ const PageMemory page_memory(overall_region, writeable_region);
+ EXPECT_EQ(dummy, page_memory.overall_region().base());
+ EXPECT_EQ(dummy + kOverallSize, page_memory.overall_region().end());
+ EXPECT_EQ(dummy + 1, page_memory.writeable_region().base());
+ EXPECT_EQ(dummy + kOverallSize - 1, page_memory.writeable_region().end());
+}
+
+#if DEBUG
+
+TEST(PageMemoryDeathTest, ConstructNonContainedRegions) {
+ constexpr size_t kOverallSize = 17;
+ uint8_t dummy[kOverallSize];
+ const MemoryRegion overall_region(dummy, kOverallSize);
+ const MemoryRegion writeable_region(dummy + 1, kOverallSize);
+ EXPECT_DEATH_IF_SUPPORTED(PageMemory(overall_region, writeable_region), "");
+}
+
+#endif // DEBUG
+
+TEST(PageMemoryRegionTest, NormalPageMemoryRegion) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ pmr->UnprotectForTesting();
+ MemoryRegion prev_overall;
+ for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
+ const PageMemory pm = pmr->GetPageMemory(i);
+ // Previous PageMemory aligns with the current one.
+ if (prev_overall.base()) {
+ EXPECT_EQ(prev_overall.end(), pm.overall_region().base());
+ }
+ prev_overall =
+ MemoryRegion(pm.overall_region().base(), pm.overall_region().size());
+ // Writeable region is contained in overall region.
+ EXPECT_TRUE(pm.overall_region().Contains(pm.writeable_region()));
+ EXPECT_EQ(0u, pm.writeable_region().base()[0]);
+ EXPECT_EQ(0u, pm.writeable_region().end()[-1]);
+ // Front guard page.
+ EXPECT_EQ(pm.writeable_region().base(),
+ pm.overall_region().base() + kGuardPageSize);
+ // Back guard page.
+ EXPECT_EQ(pm.overall_region().end(),
+ pm.writeable_region().end() + kGuardPageSize);
+ }
+}
+
+TEST(PageMemoryRegionTest, LargePageMemoryRegion) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
+ pmr->UnprotectForTesting();
+ const PageMemory pm = pmr->GetPageMemory();
+ EXPECT_LE(1024u, pm.writeable_region().size());
+ EXPECT_EQ(0u, pm.writeable_region().base()[0]);
+ EXPECT_EQ(0u, pm.writeable_region().end()[-1]);
+}
+
+TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
+ // This tests that the testing allocator actually uses protected guard
+ // regions.
+ v8::base::PageAllocator allocator;
+#if defined(V8_HOST_ARCH_PPC64) && !defined(_AIX)
+ EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
+#else // !V8_HOST_ARCH_PPC64
+ EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
+#endif // !V8_HOST_ARCH_PPC64
+}
+
+namespace {
+
+V8_NOINLINE uint8_t access(volatile const uint8_t& u) { return u; }
+
+} // namespace
+
+TEST(PageMemoryRegionDeathTest, ReservationIsFreed) {
+ // Full sequence as part of the death test macro as otherwise, the macro
+ // may expand to statements that re-purpose the previously freed memory
+ // and thus not crash.
+ EXPECT_DEATH_IF_SUPPORTED(
+ v8::base::PageAllocator allocator; Address base; {
+ auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
+ base = pmr->reserved_region().base();
+ } access(base[0]);
+ , "");
+}
+
+TEST(PageMemoryRegionDeathTest, FrontGuardPageAccessCrashes) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ if (SupportsCommittingGuardPages(&allocator)) {
+ EXPECT_DEATH_IF_SUPPORTED(
+ access(pmr->GetPageMemory(0).overall_region().base()[0]), "");
+ }
+}
+
+TEST(PageMemoryRegionDeathTest, BackGuardPageAccessCrashes) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ if (SupportsCommittingGuardPages(&allocator)) {
+ EXPECT_DEATH_IF_SUPPORTED(
+ access(pmr->GetPageMemory(0).writeable_region().end()[0]), "");
+ }
+}
+
+TEST(PageMemoryRegionTreeTest, AddNormalLookupRemove) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ PageMemoryRegionTree tree;
+ tree.Add(pmr.get());
+ ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
+ ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().end() - 1));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base() - 1));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end()));
+ tree.Remove(pmr.get());
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base()));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end() - 1));
+}
+
+TEST(PageMemoryRegionTreeTest, AddLargeLookupRemove) {
+ v8::base::PageAllocator allocator;
+ constexpr size_t kLargeSize = 5012;
+ auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
+ PageMemoryRegionTree tree;
+ tree.Add(pmr.get());
+ ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
+ ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().end() - 1));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base() - 1));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end()));
+ tree.Remove(pmr.get());
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().base()));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr->reserved_region().end() - 1));
+}
+
+TEST(PageMemoryRegionTreeTest, AddLookupRemoveMultiple) {
+ v8::base::PageAllocator allocator;
+ auto pmr1 = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ constexpr size_t kLargeSize = 3127;
+ auto pmr2 = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
+ PageMemoryRegionTree tree;
+ tree.Add(pmr1.get());
+ tree.Add(pmr2.get());
+ ASSERT_EQ(pmr1.get(), tree.Lookup(pmr1->reserved_region().base()));
+ ASSERT_EQ(pmr1.get(), tree.Lookup(pmr1->reserved_region().end() - 1));
+ ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().base()));
+ ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().end() - 1));
+ tree.Remove(pmr1.get());
+ ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().base()));
+ ASSERT_EQ(pmr2.get(), tree.Lookup(pmr2->reserved_region().end() - 1));
+ tree.Remove(pmr2.get());
+ ASSERT_EQ(nullptr, tree.Lookup(pmr2->reserved_region().base()));
+ ASSERT_EQ(nullptr, tree.Lookup(pmr2->reserved_region().end() - 1));
+}
+
+TEST(NormalPageMemoryPool, ConstructorEmpty) {
+ v8::base::PageAllocator allocator;
+ NormalPageMemoryPool pool;
+ constexpr size_t kBucket = 0;
+ EXPECT_EQ(NormalPageMemoryPool::Result(nullptr, nullptr), pool.Take(kBucket));
+}
+
+TEST(NormalPageMemoryPool, AddTakeSameBucket) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ const PageMemory pm = pmr->GetPageMemory(0);
+ NormalPageMemoryPool pool;
+ constexpr size_t kBucket = 0;
+ pool.Add(kBucket, pmr.get(), pm.writeable_region().base());
+ EXPECT_EQ(
+ NormalPageMemoryPool::Result(pmr.get(), pm.writeable_region().base()),
+ pool.Take(kBucket));
+}
+
+TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
+ v8::base::PageAllocator allocator;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ const PageMemory pm = pmr->GetPageMemory(0);
+ NormalPageMemoryPool pool;
+ constexpr size_t kFirstBucket = 0;
+ constexpr size_t kSecondBucket = 1;
+ pool.Add(kFirstBucket, pmr.get(), pm.writeable_region().base());
+ EXPECT_EQ(NormalPageMemoryPool::Result(nullptr, nullptr),
+ pool.Take(kSecondBucket));
+ EXPECT_EQ(
+ NormalPageMemoryPool::Result(pmr.get(), pm.writeable_region().base()),
+ pool.Take(kFirstBucket));
+}
+
+TEST(PageBackendTest, AllocateNormalUsesPool) {
+ v8::base::PageAllocator allocator;
+ PageBackend backend(&allocator);
+ constexpr size_t kBucket = 0;
+ Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket);
+ EXPECT_NE(nullptr, writeable_base1);
+ backend.FreeNormalPageMemory(kBucket, writeable_base1);
+ Address writeable_base2 = backend.AllocateNormalPageMemory(kBucket);
+ EXPECT_NE(nullptr, writeable_base2);
+ EXPECT_EQ(writeable_base1, writeable_base2);
+}
+
+TEST(PageBackendTest, AllocateLarge) {
+ v8::base::PageAllocator allocator;
+ PageBackend backend(&allocator);
+ Address writeable_base1 = backend.AllocateLargePageMemory(13731);
+ EXPECT_NE(nullptr, writeable_base1);
+ Address writeable_base2 = backend.AllocateLargePageMemory(9478);
+ EXPECT_NE(nullptr, writeable_base2);
+ EXPECT_NE(writeable_base1, writeable_base2);
+ backend.FreeLargePageMemory(writeable_base1);
+ backend.FreeLargePageMemory(writeable_base2);
+}
+
+TEST(PageBackendTest, LookupNormal) {
+ v8::base::PageAllocator allocator;
+ PageBackend backend(&allocator);
+ constexpr size_t kBucket = 0;
+ Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
+ EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
+ EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
+ EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
+ EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kPageSize -
+ 2 * kGuardPageSize - 1));
+ EXPECT_EQ(nullptr,
+ backend.Lookup(writeable_base + kPageSize - 2 * kGuardPageSize));
+ EXPECT_EQ(nullptr,
+ backend.Lookup(writeable_base - kGuardPageSize + kPageSize - 1));
+}
+
+TEST(PageBackendTest, LookupLarge) {
+ v8::base::PageAllocator allocator;
+ PageBackend backend(&allocator);
+ constexpr size_t kSize = 7934;
+ Address writeable_base = backend.AllocateLargePageMemory(kSize);
+ EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
+ EXPECT_EQ(nullptr, backend.Lookup(writeable_base - 1));
+ EXPECT_EQ(writeable_base, backend.Lookup(writeable_base));
+ EXPECT_EQ(writeable_base, backend.Lookup(writeable_base + kSize - 1));
+}
+
+TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
+ v8::base::PageAllocator allocator;
+ Address base;
+ {
+ PageBackend backend(&allocator);
+ constexpr size_t kBucket = 0;
+ base = backend.AllocateNormalPageMemory(kBucket);
+ }
+ EXPECT_DEATH_IF_SUPPORTED(access(base[0]), "");
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc b/deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc
new file mode 100644
index 0000000000..e18872ebce
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc
@@ -0,0 +1,658 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/persistent.h"
+
+#include <vector>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/member.h"
+#include "include/cppgc/type-traits.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/visitor.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+struct GCed : GarbageCollected<GCed> {
+ static size_t trace_call_count;
+ virtual void Trace(cppgc::Visitor*) const { ++trace_call_count; }
+};
+size_t GCed::trace_call_count = 0;
+
+struct DerivedGCed : GCed {
+ void Trace(cppgc::Visitor* v) const override { GCed::Trace(v); }
+};
+
+template <template <typename> class PersistentType>
+PersistentRegion& GetRegion(cppgc::Heap* heap) {
+ auto* heap_impl = internal::Heap::From(heap);
+ return IsWeak<PersistentType<GCed>>::value
+ ? heap_impl->GetWeakPersistentRegion()
+ : heap_impl->GetStrongPersistentRegion();
+}
+
+template <typename T>
+using LocalizedPersistent =
+ internal::BasicPersistent<T, internal::StrongPersistentPolicy,
+ internal::KeepLocationPolicy,
+ internal::DefaultCheckingPolicy>;
+
+class RootVisitor final : public VisitorBase {
+ public:
+ RootVisitor() = default;
+
+ const auto& WeakCallbacks() const { return weak_callbacks_; }
+
+ void ProcessWeakCallbacks() {
+ const auto info = LivenessBrokerFactory::Create();
+ for (const auto& cb : weak_callbacks_) {
+ cb.first(info, cb.second);
+ }
+ weak_callbacks_.clear();
+ }
+
+ protected:
+ void VisitRoot(const void* t, TraceDescriptor desc) final {
+ desc.callback(this, desc.base_object_payload);
+ }
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback callback,
+ const void* object) final {
+ weak_callbacks_.emplace_back(callback, object);
+ }
+
+ private:
+ std::vector<std::pair<WeakCallback, const void*>> weak_callbacks_;
+};
+class PersistentTest : public testing::TestSupportingAllocationOnly {};
+
+} // namespace
+
+template <template <typename> class PersistentType>
+void NullStateCtor(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ {
+ PersistentType<GCed> empty;
+ EXPECT_EQ(nullptr, empty.Get());
+ EXPECT_EQ(nullptr, empty.Release());
+ EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ }
+ {
+ PersistentType<GCed> empty = nullptr;
+ EXPECT_EQ(nullptr, empty.Get());
+ EXPECT_EQ(nullptr, empty.Release());
+ EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ }
+ {
+ PersistentType<GCed> empty = kSentinelPointer;
+ EXPECT_EQ(kSentinelPointer, empty);
+ EXPECT_EQ(kSentinelPointer, empty.Release());
+ EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ }
+ {
+ // Runtime null must not allocated associated node.
+ PersistentType<GCed> empty = static_cast<GCed*>(0);
+ EXPECT_EQ(nullptr, empty.Get());
+ EXPECT_EQ(nullptr, empty.Release());
+ EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, NullStateCtor) {
+ auto* heap = GetHeap();
+ NullStateCtor<Persistent>(heap);
+ NullStateCtor<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void RawCtor(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ {
+ PersistentType<GCed> p = gced;
+ EXPECT_EQ(gced, p.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p = *gced;
+ EXPECT_EQ(gced, p.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, RawCtor) {
+ auto* heap = GetHeap();
+ RawCtor<Persistent>(heap);
+ RawCtor<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void CopyCtor(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p1 = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ PersistentType<GCed> p2 = p1;
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p1;
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ PersistentType<GCed> p2 = p1;
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(nullptr, p1.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<DerivedGCed> p1 = MakeGarbageCollected<DerivedGCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ PersistentType<GCed> p2 = p1;
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ static constexpr size_t kSlots = 512u;
+ const PersistentType<GCed> prototype = MakeGarbageCollected<GCed>(heap);
+ std::vector<PersistentType<GCed>> vector;
+ vector.reserve(kSlots);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ for (size_t i = 0; i < kSlots; ++i) {
+ vector.emplace_back(prototype);
+ EXPECT_EQ(i + 2, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ vector.clear();
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, CopyCtor) {
+ auto* heap = GetHeap();
+ CopyCtor<Persistent>(heap);
+ CopyCtor<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void MoveCtor(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p1 = gced;
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ PersistentType<GCed> p2 = std::move(p1);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(gced, p2.Get());
+ // Moved-from-object is in the valid specified (nullptr) state.
+ EXPECT_EQ(nullptr, p1.Get());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<DerivedGCed> p1 = MakeGarbageCollected<DerivedGCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ // Move ctor is not heterogeneous - fall back to copy ctor.
+ PersistentType<GCed> p2 = std::move(p1);
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p1;
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ PersistentType<GCed> p2 = std::move(p1);
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(nullptr, p1.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, MoveCtor) {
+ auto* heap = GetHeap();
+ MoveCtor<Persistent>(heap);
+ MoveCtor<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType,
+ template <typename> class MemberType>
+void MemberCtor(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ MemberType<GCed> m = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p = m;
+ EXPECT_EQ(m.Get(), p.Get());
+ EXPECT_EQ(m, p);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, MemberCtor) {
+ auto* heap = GetHeap();
+ MemberCtor<Persistent, Member>(heap);
+ MemberCtor<Persistent, WeakMember>(heap);
+ MemberCtor<Persistent, UntracedMember>(heap);
+ MemberCtor<WeakPersistent, Member>(heap);
+ MemberCtor<WeakPersistent, WeakMember>(heap);
+ MemberCtor<WeakPersistent, UntracedMember>(heap);
+}
+
+template <template <typename> class PersistentType>
+void NullStateAssignemnt(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p = nullptr;
+ EXPECT_EQ(nullptr, p.Get());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ {
+ PersistentType<GCed> p = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p = kSentinelPointer;
+ EXPECT_EQ(kSentinelPointer, p);
+ EXPECT_EQ(kSentinelPointer, p.Get());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ {
+ PersistentType<GCed> p = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p = static_cast<GCed*>(0);
+ EXPECT_EQ(nullptr, p.Get());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+}
+
+TEST_F(PersistentTest, NullStateAssignemnt) {
+ auto* heap = GetHeap();
+ NullStateAssignemnt<Persistent>(heap);
+ NullStateAssignemnt<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void RawAssignment(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ {
+ PersistentType<GCed> p;
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ p = gced;
+ EXPECT_EQ(gced, p.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p;
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ p = *gced;
+ EXPECT_EQ(gced, p.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, RawAssignment) {
+ auto* heap = GetHeap();
+ RawAssignment<Persistent>(heap);
+ RawAssignment<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void CopyAssignment(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p1 = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p2;
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p2 = p1;
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p1 = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p2 = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ p2 = p1;
+ // The old node from p2 must be dropped.
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<DerivedGCed> p1 = MakeGarbageCollected<DerivedGCed>(heap);
+ PersistentType<GCed> p2;
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p2 = p1;
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ static constexpr size_t kSlots = 512u;
+ const PersistentType<GCed> prototype = MakeGarbageCollected<GCed>(heap);
+ std::vector<PersistentType<GCed>> vector(kSlots);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ size_t i = 0;
+ for (auto& p : vector) {
+ p = prototype;
+ EXPECT_EQ(i + 2, GetRegion<PersistentType>(heap).NodesInUse());
+ ++i;
+ }
+ vector.clear();
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, CopyAssignment) {
+ auto* heap = GetHeap();
+ CopyAssignment<Persistent>(heap);
+ CopyAssignment<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void MoveAssignment(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p1 = gced;
+ PersistentType<GCed> p2;
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p2 = std::move(p1);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(gced, p2.Get());
+ // Moved-from-object is in the valid specified (nullptr) state.
+ EXPECT_EQ(nullptr, p1.Get());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<GCed> p1;
+ PersistentType<GCed> p2 = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ p2 = std::move(p1);
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ // Moved-from-object is in the valid specified (nullptr) state.
+ EXPECT_EQ(nullptr, p2.Get());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p1 = gced;
+ PersistentType<GCed> p2 = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ p2 = std::move(p1);
+ // The old node from p2 must be dropped.
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(gced, p2.Get());
+ // Moved-from-object is in the valid specified (nullptr) state.
+ EXPECT_EQ(nullptr, p1.Get());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ PersistentType<DerivedGCed> p1 = MakeGarbageCollected<DerivedGCed>(heap);
+ PersistentType<GCed> p2;
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ // Move ctor is not heterogeneous - fall back to copy assignment.
+ p2 = std::move(p1);
+ EXPECT_EQ(2u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_EQ(p1.Get(), p2.Get());
+ EXPECT_EQ(p1, p2);
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, MoveAssignment) {
+ auto* heap = GetHeap();
+ MoveAssignment<Persistent>(heap);
+ MoveAssignment<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType,
+ template <typename> class MemberType>
+void MemberAssignment(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ {
+ MemberType<GCed> m = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p;
+ p = m;
+ EXPECT_EQ(m.Get(), p.Get());
+ EXPECT_EQ(m, p);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, MemberAssignment) {
+ auto* heap = GetHeap();
+ MemberAssignment<Persistent, Member>(heap);
+ MemberAssignment<Persistent, WeakMember>(heap);
+ MemberAssignment<Persistent, UntracedMember>(heap);
+ MemberAssignment<WeakPersistent, Member>(heap);
+ MemberAssignment<WeakPersistent, WeakMember>(heap);
+ MemberAssignment<WeakPersistent, UntracedMember>(heap);
+}
+
+template <template <typename> class PersistentType>
+void ClearTest(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ PersistentType<GCed> p = MakeGarbageCollected<GCed>(heap);
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_NE(nullptr, p.Get());
+ p.Clear();
+ EXPECT_EQ(nullptr, p.Get());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, Clear) {
+ auto* heap = GetHeap();
+ ClearTest<Persistent>(heap);
+ ClearTest<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType>
+void ReleaseTest(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+ GCed* gced = MakeGarbageCollected<GCed>(heap);
+ PersistentType<GCed> p = gced;
+ EXPECT_EQ(1u, GetRegion<PersistentType>(heap).NodesInUse());
+ EXPECT_NE(nullptr, p.Get());
+ GCed* raw = p.Release();
+ EXPECT_EQ(gced, raw);
+ EXPECT_EQ(nullptr, p.Get());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, Release) {
+ auto* heap = GetHeap();
+ ReleaseTest<Persistent>(heap);
+ ReleaseTest<WeakPersistent>(heap);
+}
+
+template <template <typename> class PersistentType1,
+ template <typename> class PersistentType2>
+void HeterogeneousConversion(cppgc::Heap* heap) {
+ EXPECT_EQ(0u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType2>(heap).NodesInUse());
+ {
+ PersistentType1<GCed> persistent1 = MakeGarbageCollected<GCed>(heap);
+ PersistentType2<GCed> persistent2 = persistent1;
+ EXPECT_EQ(persistent1.Get(), persistent2.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(1u, GetRegion<PersistentType2>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType2>(heap).NodesInUse());
+ {
+ PersistentType1<DerivedGCed> persistent1 =
+ MakeGarbageCollected<DerivedGCed>(heap);
+ PersistentType2<GCed> persistent2 = persistent1;
+ EXPECT_EQ(persistent1.Get(), persistent2.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(1u, GetRegion<PersistentType2>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType2>(heap).NodesInUse());
+ {
+ PersistentType1<GCed> persistent1 = MakeGarbageCollected<GCed>(heap);
+ PersistentType2<GCed> persistent2;
+ persistent2 = persistent1;
+ EXPECT_EQ(persistent1.Get(), persistent2.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(1u, GetRegion<PersistentType2>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType2>(heap).NodesInUse());
+ {
+ PersistentType1<DerivedGCed> persistent1 =
+ MakeGarbageCollected<DerivedGCed>(heap);
+ PersistentType2<GCed> persistent2;
+ persistent2 = persistent1;
+ EXPECT_EQ(persistent1.Get(), persistent2.Get());
+ EXPECT_EQ(1u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(1u, GetRegion<PersistentType2>(heap).NodesInUse());
+ }
+ EXPECT_EQ(0u, GetRegion<PersistentType1>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType2>(heap).NodesInUse());
+}
+
+TEST_F(PersistentTest, HeterogeneousConversion) {
+ auto* heap = GetHeap();
+ HeterogeneousConversion<Persistent, WeakPersistent>(heap);
+ HeterogeneousConversion<WeakPersistent, Persistent>(heap);
+}
+
+TEST_F(PersistentTest, TraceStrong) {
+ auto* heap = GetHeap();
+ static constexpr size_t kItems = 512;
+ std::vector<Persistent<GCed>> vec(kItems);
+ for (auto& p : vec) {
+ p = MakeGarbageCollected<GCed>(heap);
+ }
+ {
+ GCed::trace_call_count = 0;
+ RootVisitor v;
+ GetRegion<Persistent>(heap).Trace(&v);
+ EXPECT_EQ(kItems, GCed::trace_call_count);
+ EXPECT_EQ(kItems, GetRegion<Persistent>(heap).NodesInUse());
+ }
+ {
+ GCed::trace_call_count = 0;
+ vec[0].Clear();
+ vec[kItems / 2].Clear();
+ vec[kItems / 4].Clear();
+ vec[kItems - 1].Clear();
+ RootVisitor v;
+ GetRegion<Persistent>(heap).Trace(&v);
+ EXPECT_EQ(kItems - 4, GCed::trace_call_count);
+ EXPECT_EQ(kItems - 4, GetRegion<Persistent>(heap).NodesInUse());
+ }
+ {
+ GCed::trace_call_count = 0;
+ vec.clear();
+ RootVisitor v;
+ GetRegion<Persistent>(heap).Trace(&v);
+ EXPECT_EQ(0u, GCed::trace_call_count);
+ EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ }
+}
+
+TEST_F(PersistentTest, TraceWeak) {
+ auto* heap = GetHeap();
+ static constexpr size_t kItems = 512;
+ std::vector<WeakPersistent<GCed>> vec(kItems);
+ for (auto& p : vec) {
+ p = MakeGarbageCollected<GCed>(heap);
+ }
+ GCed::trace_call_count = 0;
+ RootVisitor v;
+ GetRegion<WeakPersistent>(heap).Trace(&v);
+ const auto& callbacks = v.WeakCallbacks();
+ EXPECT_EQ(kItems, callbacks.size());
+ EXPECT_EQ(kItems, GetRegion<WeakPersistent>(heap).NodesInUse());
+
+ v.ProcessWeakCallbacks();
+ for (const auto& p : vec) {
+ EXPECT_EQ(nullptr, p.Get());
+ }
+ EXPECT_EQ(0u, GetRegion<WeakPersistent>(heap).NodesInUse());
+}
+
+#if CPPGC_SUPPORTS_SOURCE_LOCATION
+TEST_F(PersistentTest, LocalizedPersistent) {
+ GCed* gced = MakeGarbageCollected<GCed>(GetHeap());
+ {
+ const auto expected_loc = SourceLocation::Current();
+ LocalizedPersistent<GCed> p = gced;
+ const auto actual_loc = p.Location();
+ EXPECT_STREQ(expected_loc.Function(), actual_loc.Function());
+ EXPECT_STREQ(expected_loc.FileName(), actual_loc.FileName());
+ EXPECT_EQ(expected_loc.Line() + 1, actual_loc.Line());
+ }
+ {
+ // Copy ctor doesn't copy source location.
+ LocalizedPersistent<GCed> p1 = gced;
+ LocalizedPersistent<GCed> p2 = p1;
+ EXPECT_STREQ(p1.Location().Function(), p2.Location().Function());
+ EXPECT_STREQ(p1.Location().FileName(), p2.Location().FileName());
+ EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
+ }
+ {
+ // Copy assignment doesn't copy source location.
+ LocalizedPersistent<GCed> p1 = gced;
+ LocalizedPersistent<GCed> p2;
+ p2 = p1;
+ EXPECT_STREQ(p1.Location().Function(), p2.Location().Function());
+ EXPECT_STREQ(p1.Location().FileName(), p2.Location().FileName());
+ EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
+ }
+ {
+ // Clearing doesn't clear source location.
+ LocalizedPersistent<GCed> p1 = gced;
+ LocalizedPersistent<GCed> p2 = gced;
+ p2.Clear();
+ EXPECT_STREQ(p1.Location().Function(), p2.Location().Function());
+ EXPECT_STREQ(p1.Location().FileName(), p2.Location().FileName());
+ EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
+ }
+ {
+ LocalizedPersistent<GCed> p1 = gced;
+ const auto expected_loc = p1.Location();
+ LocalizedPersistent<GCed> p2 = std::move(p1);
+ EXPECT_STREQ(expected_loc.Function(), p2.Location().Function());
+ EXPECT_STREQ(expected_loc.FileName(), p2.Location().FileName());
+ EXPECT_EQ(expected_loc.Line(), p2.Location().Line());
+ }
+ {
+ LocalizedPersistent<GCed> p1 = gced;
+ const auto expected_loc = p1.Location();
+ LocalizedPersistent<GCed> p2;
+ p2 = std::move(p1);
+ EXPECT_STREQ(expected_loc.Function(), p2.Location().Function());
+ EXPECT_STREQ(expected_loc.FileName(), p2.Location().FileName());
+ EXPECT_EQ(expected_loc.Line(), p2.Location().Line());
+ }
+}
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
new file mode 100644
index 0000000000..3cd845ec2f
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
@@ -0,0 +1,199 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/prefinalizer.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class PrefinalizerTest : public testing::TestWithHeap {};
+
+class GCed : public GarbageCollected<GCed> {
+ CPPGC_USING_PRE_FINALIZER(GCed, PreFinalizer);
+
+ public:
+ void Trace(Visitor*) const {}
+ void PreFinalizer() { ++prefinalizer_callcount; }
+
+ static size_t prefinalizer_callcount;
+};
+size_t GCed::prefinalizer_callcount = 0;
+
+} // namespace
+
+TEST_F(PrefinalizerTest, PrefinalizerCalledOnDeadObject) {
+ GCed::prefinalizer_callcount = 0;
+ auto* object = MakeGarbageCollected<GCed>(GetHeap());
+ USE(object);
+ EXPECT_EQ(0u, GCed::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, GCed::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, GCed::prefinalizer_callcount);
+}
+
+TEST_F(PrefinalizerTest, PrefinalizerNotCalledOnLiveObject) {
+ GCed::prefinalizer_callcount = 0;
+ auto* object = MakeGarbageCollected<GCed>(GetHeap());
+ HeapObjectHeader::FromPayload(object).TryMarkAtomic();
+ EXPECT_EQ(0u, GCed::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(0u, GCed::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, GCed::prefinalizer_callcount);
+}
+
+namespace {
+
+class Mixin : public GarbageCollectedMixin {
+ CPPGC_USING_PRE_FINALIZER(Mixin, PreFinalizer);
+
+ public:
+ void PreFinalizer() { ++prefinalizer_callcount; }
+
+ static size_t prefinalizer_callcount;
+};
+size_t Mixin::prefinalizer_callcount = 0;
+
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
+ USING_GARBAGE_COLLECTED_MIXIN();
+};
+
+} // namespace
+
+TEST_F(PrefinalizerTest, PrefinalizerCalledOnDeadMixinObject) {
+ Mixin::prefinalizer_callcount = 0;
+ auto* object = MakeGarbageCollected<GCedWithMixin>(GetHeap());
+ USE(object);
+ EXPECT_EQ(0u, Mixin::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Mixin::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Mixin::prefinalizer_callcount);
+}
+
+TEST_F(PrefinalizerTest, PrefinalizerNotCalledOnLiveMixinObject) {
+ Mixin::prefinalizer_callcount = 0;
+ auto* object = MakeGarbageCollected<GCedWithMixin>(GetHeap());
+ HeapObjectHeader::FromPayload(object).TryMarkAtomic();
+ EXPECT_EQ(0u, Mixin::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(0u, Mixin::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, Mixin::prefinalizer_callcount);
+}
+
+namespace {
+
+class BaseMixin : public GarbageCollectedMixin {
+ CPPGC_USING_PRE_FINALIZER(BaseMixin, PreFinalizer);
+
+ public:
+ void PreFinalizer();
+
+ static size_t prefinalizer_callcount;
+};
+size_t BaseMixin::prefinalizer_callcount = 0;
+
+class InheritingMixin : public BaseMixin {
+ CPPGC_USING_PRE_FINALIZER(InheritingMixin, PreFinalizer);
+
+ public:
+ void PreFinalizer();
+
+ static size_t prefinalizer_callcount;
+};
+size_t InheritingMixin::prefinalizer_callcount = 0;
+
+class GCedWithMixins : public GarbageCollected<GCedWithMixins>,
+ public InheritingMixin {
+ USING_GARBAGE_COLLECTED_MIXIN();
+ CPPGC_USING_PRE_FINALIZER(GCedWithMixins, PreFinalizer);
+
+ public:
+ void PreFinalizer();
+
+ static size_t prefinalizer_callcount;
+};
+size_t GCedWithMixins::prefinalizer_callcount = 0;
+
+void BaseMixin::PreFinalizer() {
+ EXPECT_EQ(1u, GCedWithMixins::prefinalizer_callcount);
+ EXPECT_EQ(1u, InheritingMixin::prefinalizer_callcount);
+ EXPECT_EQ(0u, BaseMixin::prefinalizer_callcount);
+ ++BaseMixin::prefinalizer_callcount;
+}
+
+void InheritingMixin::PreFinalizer() {
+ EXPECT_EQ(1u, GCedWithMixins::prefinalizer_callcount);
+ EXPECT_EQ(0u, InheritingMixin::prefinalizer_callcount);
+ EXPECT_EQ(0u, BaseMixin::prefinalizer_callcount);
+ InheritingMixin::prefinalizer_callcount = true;
+}
+
+void GCedWithMixins::PreFinalizer() {
+ EXPECT_EQ(0u, GCedWithMixins::prefinalizer_callcount);
+ EXPECT_EQ(0u, InheritingMixin::prefinalizer_callcount);
+ EXPECT_EQ(0u, BaseMixin::prefinalizer_callcount);
+ GCedWithMixins::prefinalizer_callcount = true;
+}
+} // namespace
+
+TEST_F(PrefinalizerTest, PrefinalizerInvocationPreservesOrder) {
+ BaseMixin::prefinalizer_callcount = 0;
+ InheritingMixin::prefinalizer_callcount = 0;
+ GCedWithMixins::prefinalizer_callcount = 0;
+ auto* object = MakeGarbageCollected<GCedWithMixins>(GetHeap());
+ USE(object);
+ EXPECT_EQ(0u, GCedWithMixins::prefinalizer_callcount);
+ EXPECT_EQ(0u, InheritingMixin::prefinalizer_callcount);
+ EXPECT_EQ(0u, BaseMixin::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, GCedWithMixins::prefinalizer_callcount);
+ EXPECT_EQ(1u, InheritingMixin::prefinalizer_callcount);
+ EXPECT_EQ(1u, BaseMixin::prefinalizer_callcount);
+ PreciseGC();
+ EXPECT_EQ(1u, GCedWithMixins::prefinalizer_callcount);
+ EXPECT_EQ(1u, InheritingMixin::prefinalizer_callcount);
+ EXPECT_EQ(1u, BaseMixin::prefinalizer_callcount);
+}
+
+namespace {
+
+class AllocatingPrefinalizer : public GarbageCollected<AllocatingPrefinalizer> {
+ CPPGC_USING_PRE_FINALIZER(AllocatingPrefinalizer, PreFinalizer);
+
+ public:
+ explicit AllocatingPrefinalizer(cppgc::Heap* heap) : heap_(heap) {}
+ void Trace(Visitor*) const {}
+ void PreFinalizer() { MakeGarbageCollected<GCed>(heap_); }
+
+ private:
+ cppgc::Heap* heap_;
+};
+
+} // namespace
+
+#ifdef DEBUG
+
+TEST_F(PrefinalizerTest, PrefinalizerFailsOnAllcoation) {
+ auto* object =
+ MakeGarbageCollected<AllocatingPrefinalizer>(GetHeap(), GetHeap());
+ USE(object);
+ EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
+}
+
+#endif // DEBUG
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/source-location-unittest.cc b/deps/v8/test/unittests/heap/cppgc/source-location-unittest.cc
new file mode 100644
index 0000000000..b477dc167d
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/source-location-unittest.cc
@@ -0,0 +1,61 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/source-location.h"
+
+#include "src/base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+constexpr char kFileName[] = "source-location-unittest.cc";
+
+bool Contains(const std::string& base_string, const std::string& substring) {
+ return base_string.find(substring) != std::string::npos;
+}
+
+} // namespace
+
+TEST(SourceLocationTest, DefaultCtor) {
+ constexpr SourceLocation loc;
+ EXPECT_EQ(nullptr, loc.Function());
+ EXPECT_EQ(nullptr, loc.FileName());
+ EXPECT_EQ(0u, loc.Line());
+}
+
+void TestSourceLocationCurrent() {
+ static constexpr char kFunctionName[] = "TestSourceLocationCurrent";
+ static constexpr size_t kNextLine = __LINE__ + 1;
+ constexpr auto loc = SourceLocation::Current();
+#if !CPPGC_SUPPORTS_SOURCE_LOCATION
+ EXPECT_EQ(nullptr, loc.Function());
+ EXPECT_EQ(nullptr, loc.FileName());
+ EXPECT_EQ(0u, loc.Line());
+ USE(kNextLine);
+ return;
+#endif
+ EXPECT_EQ(kNextLine, loc.Line());
+ EXPECT_TRUE(Contains(loc.FileName(), kFileName));
+ EXPECT_TRUE(Contains(loc.Function(), kFunctionName));
+}
+
+TEST(SourceLocationTest, Current) { TestSourceLocationCurrent(); }
+
+void TestToString() {
+ static const std::string kDescriptor = std::string(__func__) + "@" +
+ __FILE__ + ":" +
+ std::to_string(__LINE__ + 1);
+ constexpr auto loc = SourceLocation::Current();
+ const auto string = loc.ToString();
+ EXPECT_EQ(kDescriptor, string);
+}
+
+#if CPPGC_SUPPORTS_SOURCE_LOCATION
+TEST(SourceLocationTest, ToString) { TestToString(); }
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc
new file mode 100644
index 0000000000..7ff5274a19
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc
@@ -0,0 +1,357 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/stack.h"
+
+#include <memory>
+#include <ostream>
+
+#include "include/v8config.h"
+#include "src/base/platform/platform.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include <xmmintrin.h>
+#endif
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class GCStackTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ stack_.reset(new Stack(v8::base::Stack::GetStackStart()));
+ }
+
+ void TearDown() override { stack_.reset(); }
+
+ Stack* GetStack() const { return stack_.get(); }
+
+ private:
+ std::unique_ptr<Stack> stack_;
+};
+
+} // namespace
+
+TEST_F(GCStackTest, IsOnStackForStackValue) {
+ void* dummy;
+ EXPECT_TRUE(GetStack()->IsOnStack(&dummy));
+}
+
+TEST_F(GCStackTest, IsOnStackForHeapValue) {
+ auto dummy = std::make_unique<int>();
+ EXPECT_FALSE(GetStack()->IsOnStack(dummy.get()));
+}
+
+namespace {
+
+class StackScanner final : public StackVisitor {
+ public:
+ struct Container {
+ std::unique_ptr<int> value;
+ };
+
+ StackScanner() : container_(new Container{}) {
+ container_->value = std::make_unique<int>();
+ }
+
+ void VisitPointer(const void* address) final {
+ if (address == container_->value.get()) found_ = true;
+ }
+
+ void Reset() { found_ = false; }
+ bool found() const { return found_; }
+ int* needle() const { return container_->value.get(); }
+
+ private:
+ std::unique_ptr<Container> container_;
+ bool found_ = false;
+};
+
+} // namespace
+
+TEST_F(GCStackTest, IteratePointersFindsOnStackValue) {
+ auto scanner = std::make_unique<StackScanner>();
+
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of temporaries after setting it up through StackScanner.
+ {
+ int* volatile tmp = scanner->needle();
+ USE(tmp);
+ GetStack()->IteratePointers(scanner.get());
+ EXPECT_TRUE(scanner->found());
+ }
+}
+
+TEST_F(GCStackTest, IteratePointersFindsOnStackValuePotentiallyUnaligned) {
+ auto scanner = std::make_unique<StackScanner>();
+
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of temporaries after setting it up through StackScanner.
+ {
+ char a = 'c';
+ USE(a);
+ int* volatile tmp = scanner->needle();
+ USE(tmp);
+ GetStack()->IteratePointers(scanner.get());
+ EXPECT_TRUE(scanner->found());
+ }
+}
+
+namespace {
+
+// Prevent inlining as that would allow the compiler to prove that the parameter
+// must not actually be materialized.
+//
+// Parameter positiosn are explicit to test various calling conventions.
+V8_NOINLINE void* RecursivelyPassOnParameterImpl(void* p1, void* p2, void* p3,
+ void* p4, void* p5, void* p6,
+ void* p7, void* p8,
+ Stack* stack,
+ StackVisitor* visitor) {
+ if (p1) {
+ return RecursivelyPassOnParameterImpl(nullptr, p1, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr,
+ stack, visitor);
+ } else if (p2) {
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, p2, nullptr,
+ nullptr, nullptr, nullptr, nullptr,
+ stack, visitor);
+ } else if (p3) {
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, p3,
+ nullptr, nullptr, nullptr, nullptr,
+ stack, visitor);
+ } else if (p4) {
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ p4, nullptr, nullptr, nullptr, stack,
+ visitor);
+ } else if (p5) {
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ nullptr, p5, nullptr, nullptr, stack,
+ visitor);
+ } else if (p6) {
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, p6, nullptr, stack,
+ visitor);
+ } else if (p7) {
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, p7, stack,
+ visitor);
+ } else if (p8) {
+ stack->IteratePointers(visitor);
+ return p8;
+ }
+ return nullptr;
+}
+
+V8_NOINLINE void* RecursivelyPassOnParameter(size_t num, void* parameter,
+ Stack* stack,
+ StackVisitor* visitor) {
+ switch (num) {
+ case 0:
+ stack->IteratePointers(visitor);
+ return parameter;
+ case 1:
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr,
+ parameter, stack, visitor);
+ case 2:
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, parameter,
+ nullptr, stack, visitor);
+ case 3:
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ nullptr, parameter, nullptr,
+ nullptr, stack, visitor);
+ case 4:
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr, nullptr,
+ parameter, nullptr, nullptr,
+ nullptr, stack, visitor);
+ case 5:
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, nullptr,
+ parameter, nullptr, nullptr,
+ nullptr, nullptr, stack, visitor);
+ case 6:
+ return RecursivelyPassOnParameterImpl(nullptr, nullptr, parameter,
+ nullptr, nullptr, nullptr, nullptr,
+ nullptr, stack, visitor);
+ case 7:
+ return RecursivelyPassOnParameterImpl(nullptr, parameter, nullptr,
+ nullptr, nullptr, nullptr, nullptr,
+ nullptr, stack, visitor);
+ case 8:
+ return RecursivelyPassOnParameterImpl(parameter, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr,
+ nullptr, stack, visitor);
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting0) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(0, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting1) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(1, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting2) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(2, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting3) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(3, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting4) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(4, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting5) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(5, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting6) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(6, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting7) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(7, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+TEST_F(GCStackTest, IteratePointersFindsParameterNesting8) {
+ auto scanner = std::make_unique<StackScanner>();
+ void* needle = RecursivelyPassOnParameter(8, scanner->needle(), GetStack(),
+ scanner.get());
+ EXPECT_EQ(scanner->needle(), needle);
+ EXPECT_TRUE(scanner->found());
+}
+
+// The following test uses inline assembly and has been checked to work on clang
+// to verify that the stack-scanning trampoline pushes callee-saved registers.
+//
+// The test uses a macro loop as asm() can only be passed string literals.
+#ifdef __clang__
+#ifdef V8_TARGET_ARCH_X64
+#ifdef V8_OS_WIN
+
+// Excluded from test: rbp
+#define FOR_ALL_CALLEE_SAVED_REGS(V) \
+ V("rdi") \
+ V("rsi") \
+ V("rbx") \
+ V("r12") \
+ V("r13") \
+ V("r14") \
+ V("r15")
+
+#else // !V8_OS_WIN
+
+// Excluded from test: rbp
+#define FOR_ALL_CALLEE_SAVED_REGS(V) \
+ V("rbx") \
+ V("r12") \
+ V("r13") \
+ V("r14") \
+ V("r15")
+
+#endif // !V8_OS_WIN
+#endif // V8_TARGET_ARCH_X64
+#endif // __clang__
+
+#ifdef FOR_ALL_CALLEE_SAVED_REGS
+
+TEST_F(GCStackTest, IteratePointersFindsCalleeSavedRegisters) {
+ auto scanner = std::make_unique<StackScanner>();
+
+ // No check that the needle is initially not found as on some platforms it
+ // may be part of temporaries after setting it up through StackScanner.
+
+// First, clear all callee-saved registers.
+#define CLEAR_REGISTER(reg) asm("mov $0, %%" reg : : : reg);
+
+ FOR_ALL_CALLEE_SAVED_REGS(CLEAR_REGISTER)
+#undef CLEAR_REGISTER
+
+ // Keep local raw pointers to keep instruction sequences small below.
+ auto* local_stack = GetStack();
+ auto* local_scanner = scanner.get();
+
+// Moves |local_scanner->needle()| into a callee-saved register, leaving the
+// callee-saved register as the only register referencing the needle.
+// (Ignoring implementation-dependent dirty registers/stack.)
+#define KEEP_ALIVE_FROM_CALLEE_SAVED(reg) \
+ local_scanner->Reset(); \
+ /* This moves the temporary into the calee-saved register. */ \
+ asm("mov %0, %%" reg : : "r"(local_scanner->needle()) : reg); \
+ /* Register is unprotected from here till the actual invocation. */ \
+ local_stack->IteratePointers(local_scanner); \
+ EXPECT_TRUE(local_scanner->found()) \
+ << "pointer in callee-saved register not found. register: " << reg \
+ << std::endl; \
+ /* Clear out the register again */ \
+ asm("mov $0, %%" reg : : : reg);
+
+ FOR_ALL_CALLEE_SAVED_REGS(KEEP_ALIVE_FROM_CALLEE_SAVED)
+#undef KEEP_ALIVE_FROM_CALLEE_SAVED
+#undef FOR_ALL_CALLEE_SAVED_REGS
+}
+#endif // FOR_ALL_CALLEE_SAVED_REGS
+
+#if V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+class CheckStackAlignmentVisitor final : public StackVisitor {
+ public:
+ void VisitPointer(const void*) final {
+ float f[4] = {0.};
+ volatile auto xmm = ::_mm_load_ps(f);
+ USE(xmm);
+ }
+};
+
+TEST_F(GCStackTest, StackAlignment) {
+ auto checker = std::make_unique<CheckStackAlignmentVisitor>();
+ GetStack()->IteratePointers(checker.get());
+}
+#endif // V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/stack_unittest.cc b/deps/v8/test/unittests/heap/cppgc/stack_unittest.cc
deleted file mode 100644
index 435c06f83f..0000000000
--- a/deps/v8/test/unittests/heap/cppgc/stack_unittest.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/cppgc/stack.h"
-
-#include <memory>
-#include <ostream>
-
-#include "include/v8config.h"
-#include "src/base/platform/platform.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include <xmmintrin.h>
-#endif
-
-namespace cppgc {
-namespace internal {
-
-namespace {
-
-class GCStackTest : public ::testing::Test {
- protected:
- void SetUp() override {
- stack_.reset(new Stack(v8::base::Stack::GetStackStart()));
- }
-
- void TearDown() override { stack_.reset(); }
-
- Stack* GetStack() const { return stack_.get(); }
-
- private:
- std::unique_ptr<Stack> stack_;
-};
-
-} // namespace
-
-TEST_F(GCStackTest, IsOnStackForStackValue) {
- void* dummy;
- EXPECT_TRUE(GetStack()->IsOnStack(&dummy));
-}
-
-TEST_F(GCStackTest, IsOnStackForHeapValue) {
- auto dummy = std::make_unique<int>();
- EXPECT_FALSE(GetStack()->IsOnStack(dummy.get()));
-}
-
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
-namespace {
-
-class StackScanner final : public StackVisitor {
- public:
- struct Container {
- std::unique_ptr<int> value;
- };
-
- StackScanner() : container_(new Container{}) {
- container_->value = std::make_unique<int>();
- }
-
- void VisitPointer(const void* address) final {
- if (address == container_->value.get()) found_ = true;
- }
-
- void Reset() { found_ = false; }
- bool found() const { return found_; }
- int* needle() const { return container_->value.get(); }
-
- private:
- std::unique_ptr<Container> container_;
- bool found_ = false;
-};
-
-} // namespace
-
-TEST_F(GCStackTest, IteratePointersFindsOnStackValue) {
- auto scanner = std::make_unique<StackScanner>();
-
- // No check that the needle is initially not found as on some platforms it
- // may be part of the redzone or temporaries after setting it up throuhg
- // StackScanner.
- {
- int* volatile tmp = scanner->needle();
- USE(tmp);
- GetStack()->IteratePointers(scanner.get());
- EXPECT_TRUE(scanner->found());
- }
-}
-
-TEST_F(GCStackTest, IteratePointersFindsOnStackValuePotentiallyUnaligned) {
- auto scanner = std::make_unique<StackScanner>();
-
- // No check that the needle is initially not found as on some platforms it
- // may be part of the redzone or temporaries after setting it up throuhg
- // StackScanner.
- {
- char a = 'c';
- USE(a);
- int* volatile tmp = scanner->needle();
- USE(tmp);
- GetStack()->IteratePointers(scanner.get());
- EXPECT_TRUE(scanner->found());
- }
-}
-
-namespace {
-
-void RecursivelyPassOnParameter(int* volatile p1, int* volatile p2,
- int* volatile p3, int* volatile p4,
- int* volatile p5, int* volatile p6,
- int* volatile p7, int* volatile p8,
- Stack* stack, StackVisitor* visitor) {
- if (p1) {
- RecursivelyPassOnParameter(nullptr, p1, nullptr, nullptr, nullptr, nullptr,
- nullptr, nullptr, stack, visitor);
- } else if (p2) {
- RecursivelyPassOnParameter(nullptr, nullptr, p2, nullptr, nullptr, nullptr,
- nullptr, nullptr, stack, visitor);
- } else if (p3) {
- RecursivelyPassOnParameter(nullptr, nullptr, nullptr, p3, nullptr, nullptr,
- nullptr, nullptr, stack, visitor);
- } else if (p4) {
- RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, p4, nullptr,
- nullptr, nullptr, stack, visitor);
- } else if (p5) {
- RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr, p5,
- nullptr, nullptr, stack, visitor);
- } else if (p6) {
- RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr,
- nullptr, p6, nullptr, stack, visitor);
- } else if (p7) {
- RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr,
- nullptr, nullptr, p7, stack, visitor);
- } else if (p8) {
- stack->IteratePointers(visitor);
- }
-}
-
-} // namespace
-
-TEST_F(GCStackTest, IteratePointersFindsParameter) {
- auto scanner = std::make_unique<StackScanner>();
- // No check that the needle is initially not found as on some platforms it
- // may be part of the redzone or temporaries after setting it up throuhg
- // StackScanner.
- RecursivelyPassOnParameter(nullptr, nullptr, nullptr, nullptr, nullptr,
- nullptr, nullptr, scanner->needle(), GetStack(),
- scanner.get());
- EXPECT_TRUE(scanner->found());
-}
-
-TEST_F(GCStackTest, IteratePointersFindsParameterInNestedFunction) {
- auto scanner = std::make_unique<StackScanner>();
- // No check that the needle is initially not found as on some platforms it
- // may be part of the redzone or temporaries after setting it up throuhg
- // StackScanner.
- RecursivelyPassOnParameter(scanner->needle(), nullptr, nullptr, nullptr,
- nullptr, nullptr, nullptr, nullptr, GetStack(),
- scanner.get());
- EXPECT_TRUE(scanner->found());
-}
-
-// The following test uses inline assembly and has been checked to work on clang
-// to verify that the stack-scanning trampoline pushes callee-saved registers.
-//
-// The test uses a macro loop as asm() can only be passed string literals.
-#ifdef __clang__
-#ifdef V8_TARGET_ARCH_X64
-#ifdef V8_OS_WIN
-
-// Excluded from test: rbp
-#define FOR_ALL_CALLEE_SAVED_REGS(V) \
- V("rdi") \
- V("rsi") \
- V("rbx") \
- V("r12") \
- V("r13") \
- V("r14") \
- V("r15")
-
-#else // !V8_OS_WIN
-
-// Excluded from test: rbp
-#define FOR_ALL_CALLEE_SAVED_REGS(V) \
- V("rbx") \
- V("r12") \
- V("r13") \
- V("r14") \
- V("r15")
-
-#endif // !V8_OS_WIN
-#endif // V8_TARGET_ARCH_X64
-#endif // __clang__
-
-#ifdef FOR_ALL_CALLEE_SAVED_REGS
-
-TEST_F(GCStackTest, IteratePointersFindsCalleeSavedRegisters) {
- auto scanner = std::make_unique<StackScanner>();
-
- // No check that the needle is initially not found as on some platforms it
- // may be part of the redzone or temporaries after setting it up throuhg
- // StackScanner.
-
-// First, clear all callee-saved registers.
-#define CLEAR_REGISTER(reg) asm("mov $0, %%" reg : : : reg);
-
- FOR_ALL_CALLEE_SAVED_REGS(CLEAR_REGISTER)
-#undef CLEAR_REGISTER
-
- // Keep local raw pointers to keep instruction sequences small below.
- auto* local_stack = GetStack();
- auto* local_scanner = scanner.get();
-
-// Moves |local_scanner->needle()| into a callee-saved register, leaving the
-// callee-saved register as the only register referencing the needle.
-// (Ignoring implementation-dependent dirty registers/stack.)
-#define KEEP_ALIVE_FROM_CALLEE_SAVED(reg) \
- local_scanner->Reset(); \
- /* This moves the temporary into the calee-saved register. */ \
- asm("mov %0, %%" reg : : "r"(local_scanner->needle()) : reg); \
- /* Register is unprotected from here till the actual invocation. */ \
- local_stack->IteratePointers(local_scanner); \
- EXPECT_TRUE(local_scanner->found()) \
- << "pointer in callee-saved register not found. register: " << reg \
- << std::endl; \
- /* Clear out the register again */ \
- asm("mov $0, %%" reg : : : reg);
-
- FOR_ALL_CALLEE_SAVED_REGS(KEEP_ALIVE_FROM_CALLEE_SAVED)
-#undef KEEP_ALIVE_FROM_CALLEE_SAVED
-#undef FOR_ALL_CALLEE_SAVED_REGS
-}
-#endif // FOR_ALL_CALLEE_SAVED_REGS
-
-#if V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-class CheckStackAlignmentVisitor final : public StackVisitor {
- public:
- void VisitPointer(const void*) final {
- float f[4] = {0.};
- volatile auto xmm = ::_mm_load_ps(f);
- USE(xmm);
- }
-};
-
-TEST_F(GCStackTest, StackAlignment) {
- auto checker = std::make_unique<CheckStackAlignmentVisitor>();
- GetStack()->IteratePointers(checker.get());
-}
-#endif // V8_OS_LINUX && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
-} // namespace internal
-} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
new file mode 100644
index 0000000000..4a5232b8b4
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
@@ -0,0 +1,230 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/sweeper.h"
+
+#include <algorithm>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/persistent.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+size_t g_destructor_callcount;
+
+template <size_t Size>
+class GCed : public GarbageCollected<GCed<Size>> {
+ public:
+ virtual ~GCed() { ++g_destructor_callcount; }
+
+ virtual void Trace(cppgc::Visitor*) const {}
+
+ private:
+ char array[Size];
+};
+
+class SweeperTest : public testing::TestWithHeap {
+ public:
+ SweeperTest() { g_destructor_callcount = 0; }
+
+ void Sweep() {
+ Sweeper& sweeper = Heap::From(GetHeap())->sweeper();
+ sweeper.Start(Sweeper::Config::kAtomic);
+ sweeper.Finish();
+ }
+
+ void MarkObject(void* payload) {
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(payload);
+ header.TryMarkAtomic();
+ }
+
+ PageBackend* GetBackend() { return Heap::From(GetHeap())->page_backend(); }
+};
+
+} // namespace
+
+TEST_F(SweeperTest, SweepUnmarkedNormalObject) {
+ constexpr size_t kObjectSize = 8;
+ using Type = GCed<kObjectSize>;
+
+ MakeGarbageCollected<Type>(GetHeap());
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(1u, g_destructor_callcount);
+}
+
+TEST_F(SweeperTest, DontSweepMarkedNormalObject) {
+ constexpr size_t kObjectSize = 8;
+ using Type = GCed<kObjectSize>;
+
+ auto* object = MakeGarbageCollected<Type>(GetHeap());
+ MarkObject(object);
+ BasePage* page = BasePage::FromPayload(object);
+ BaseSpace* space = page->space();
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+ // Check that page is returned back to the space.
+ EXPECT_NE(space->end(), std::find(space->begin(), space->end(), page));
+ EXPECT_NE(nullptr, GetBackend()->Lookup(reinterpret_cast<Address>(object)));
+}
+
+TEST_F(SweeperTest, SweepUnmarkedLargeObject) {
+ constexpr size_t kObjectSize = kLargeObjectSizeThreshold * 2;
+ using Type = GCed<kObjectSize>;
+
+ auto* object = MakeGarbageCollected<Type>(GetHeap());
+ BasePage* page = BasePage::FromPayload(object);
+ BaseSpace* space = page->space();
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(1u, g_destructor_callcount);
+ // Check that page is gone.
+ EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ EXPECT_EQ(nullptr, GetBackend()->Lookup(reinterpret_cast<Address>(object)));
+}
+
+TEST_F(SweeperTest, DontSweepMarkedLargeObject) {
+ constexpr size_t kObjectSize = kLargeObjectSizeThreshold * 2;
+ using Type = GCed<kObjectSize>;
+
+ auto* object = MakeGarbageCollected<Type>(GetHeap());
+ MarkObject(object);
+ BasePage* page = BasePage::FromPayload(object);
+ BaseSpace* space = page->space();
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+ // Check that page is returned back to the space.
+ EXPECT_NE(space->end(), std::find(space->begin(), space->end(), page));
+ EXPECT_NE(nullptr, GetBackend()->Lookup(reinterpret_cast<Address>(object)));
+}
+
+TEST_F(SweeperTest, SweepMultipleObjectsOnPage) {
+ constexpr size_t kObjectSize = 8;
+ using Type = GCed<kObjectSize>;
+ const size_t kNumberOfObjects =
+ NormalPage::PayloadSize() / (sizeof(Type) + sizeof(HeapObjectHeader));
+
+ for (size_t i = 0; i < kNumberOfObjects; ++i) {
+ MakeGarbageCollected<Type>(GetHeap());
+ }
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(kNumberOfObjects, g_destructor_callcount);
+}
+
+TEST_F(SweeperTest, SweepObjectsOnAllArenas) {
+ MakeGarbageCollected<GCed<1>>(GetHeap());
+ MakeGarbageCollected<GCed<32>>(GetHeap());
+ MakeGarbageCollected<GCed<64>>(GetHeap());
+ MakeGarbageCollected<GCed<128>>(GetHeap());
+ MakeGarbageCollected<GCed<2 * kLargeObjectSizeThreshold>>(GetHeap());
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(5u, g_destructor_callcount);
+}
+
+TEST_F(SweeperTest, SweepMultiplePagesInSingleSpace) {
+ MakeGarbageCollected<GCed<2 * kLargeObjectSizeThreshold>>(GetHeap());
+ MakeGarbageCollected<GCed<2 * kLargeObjectSizeThreshold>>(GetHeap());
+ MakeGarbageCollected<GCed<2 * kLargeObjectSizeThreshold>>(GetHeap());
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+
+ Sweep();
+
+ EXPECT_EQ(3u, g_destructor_callcount);
+}
+
+TEST_F(SweeperTest, CoalesceFreeListEntries) {
+ constexpr size_t kObjectSize = 32;
+ using Type = GCed<kObjectSize>;
+
+ auto* object1 = MakeGarbageCollected<Type>(GetHeap());
+ auto* object2 = MakeGarbageCollected<Type>(GetHeap());
+ auto* object3 = MakeGarbageCollected<Type>(GetHeap());
+ auto* object4 = MakeGarbageCollected<Type>(GetHeap());
+
+ MarkObject(object1);
+ MarkObject(object4);
+
+ Address object2_start =
+ reinterpret_cast<Address>(&HeapObjectHeader::FromPayload(object2));
+ Address object3_end =
+ reinterpret_cast<Address>(&HeapObjectHeader::FromPayload(object3)) +
+ HeapObjectHeader::FromPayload(object3).GetSize();
+
+ const BasePage* page = BasePage::FromPayload(object2);
+ const FreeList& freelist = NormalPageSpace::From(page->space())->free_list();
+
+ const FreeList::Block coalesced_block = {object2_start,
+ object3_end - object2_start};
+
+ EXPECT_EQ(0u, g_destructor_callcount);
+ EXPECT_FALSE(freelist.Contains(coalesced_block));
+
+ Sweep();
+
+ EXPECT_EQ(2u, g_destructor_callcount);
+ EXPECT_TRUE(freelist.Contains(coalesced_block));
+}
+
+namespace {
+
+class GCInDestructor final : public GarbageCollected<GCInDestructor> {
+ public:
+ explicit GCInDestructor(Heap* heap) : heap_(heap) {}
+ ~GCInDestructor() {
+ // Instead of directly calling GC, allocations should be supported here as
+ // well.
+ heap_->CollectGarbage(internal::Heap::GCConfig::Default());
+ }
+
+ private:
+ Heap* heap_;
+};
+
+} // namespace
+
+TEST_F(SweeperTest, SweepDoesNotTriggerRecursiveGC) {
+ auto* internal_heap = internal::Heap::From(GetHeap());
+ size_t saved_epoch = internal_heap->epoch();
+ MakeGarbageCollected<GCInDestructor>(GetHeap(), internal_heap);
+ PreciseGC();
+ EXPECT_EQ(saved_epoch + 1, internal_heap->epoch());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.cc b/deps/v8/test/unittests/heap/cppgc/tests.cc
index e67ac730d6..8d94369e40 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.cc
+++ b/deps/v8/test/unittests/heap/cppgc/tests.cc
@@ -4,7 +4,10 @@
#include "test/unittests/heap/cppgc/tests.h"
+#include <memory>
+
namespace cppgc {
+namespace internal {
namespace testing {
// static
@@ -12,7 +15,7 @@ std::unique_ptr<cppgc::PageAllocator> TestWithPlatform::page_allocator_;
// static
void TestWithPlatform::SetUpTestSuite() {
- page_allocator_.reset(new v8::base::PageAllocator());
+ page_allocator_ = std::make_unique<v8::base::PageAllocator>();
cppgc::InitializePlatform(page_allocator_.get());
}
@@ -22,15 +25,11 @@ void TestWithPlatform::TearDownTestSuite() {
page_allocator_.reset();
}
-void TestWithHeap::SetUp() {
- heap_ = Heap::Create();
- TestWithPlatform::SetUp();
-}
+TestWithHeap::TestWithHeap() : heap_(Heap::Create()) {}
-void TestWithHeap::TearDown() {
- heap_.reset();
- TestWithPlatform::TearDown();
-}
+TestSupportingAllocationOnly::TestSupportingAllocationOnly()
+ : no_gc_scope_(internal::Heap::From(GetHeap())) {}
} // namespace testing
+} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index d21f256444..835c24e7be 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -8,9 +8,11 @@
#include "include/cppgc/heap.h"
#include "include/cppgc/platform.h"
#include "src/base/page-allocator.h"
+#include "src/heap/cppgc/heap.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
+namespace internal {
namespace testing {
class TestWithPlatform : public ::testing::Test {
@@ -24,16 +26,33 @@ class TestWithPlatform : public ::testing::Test {
class TestWithHeap : public TestWithPlatform {
protected:
- void SetUp() override;
- void TearDown() override;
+ TestWithHeap();
- Heap* GetHeap() const { return heap_.get(); }
+ void PreciseGC() {
+ heap_->ForceGarbageCollectionSlow(
+ "TestWithHeap", "Testing", Heap::GCConfig::StackState::kNoHeapPointers);
+ }
+
+ cppgc::Heap* GetHeap() const { return heap_.get(); }
private:
std::unique_ptr<cppgc::Heap> heap_;
};
+// Restrictive test fixture that supports allocation but will make sure no
+// garbage collection is triggered. This is useful for writing idiomatic
+// tests where object are allocated on the managed heap while still avoiding
+// far reaching test consquences of full garbage collection calls.
+class TestSupportingAllocationOnly : public TestWithHeap {
+ protected:
+ TestSupportingAllocationOnly();
+
+ private:
+ Heap::NoGCScope no_gc_scope_;
+};
+
} // namespace testing
+} // namespace internal
} // namespace cppgc
#endif // V8_UNITTESTS_HEAP_CPPGC_TESTS_H_
diff --git a/deps/v8/test/unittests/heap/cppgc/visitor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/visitor-unittest.cc
new file mode 100644
index 0000000000..d4eb4b1fd1
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/visitor-unittest.cc
@@ -0,0 +1,232 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/visitor.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/trace-trait.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/heap.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class TraceTraitTest : public testing::TestSupportingAllocationOnly {};
+class VisitorTest : public testing::TestSupportingAllocationOnly {};
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ static size_t trace_callcount;
+
+ GCed() { trace_callcount = 0; }
+
+ virtual void Trace(cppgc::Visitor* visitor) const { trace_callcount++; }
+};
+
+size_t GCed::trace_callcount;
+
+class GCedMixin : public GarbageCollectedMixin {};
+
+class OtherPayload {
+ public:
+ virtual void* GetDummy() const { return nullptr; }
+};
+
+class GCedMixinApplication : public GCed,
+ public OtherPayload,
+ public GCedMixin {
+ USING_GARBAGE_COLLECTED_MIXIN();
+
+ public:
+ void Trace(cppgc::Visitor* visitor) const override {
+ GCed::Trace(visitor);
+ GCedMixin::Trace(visitor);
+ }
+};
+
+} // namespace
+
+TEST_F(TraceTraitTest, GetObjectStartGCed) {
+ auto* gced = MakeGarbageCollected<GCed>(GetHeap());
+ EXPECT_EQ(gced,
+ TraceTrait<GCed>::GetTraceDescriptor(gced).base_object_payload);
+}
+
+TEST_F(TraceTraitTest, GetObjectStartGCedMixin) {
+ auto* gced_mixin_app = MakeGarbageCollected<GCedMixinApplication>(GetHeap());
+ auto* gced_mixin = static_cast<GCedMixin*>(gced_mixin_app);
+ EXPECT_EQ(gced_mixin_app,
+ TraceTrait<GCedMixin>::GetTraceDescriptor(gced_mixin)
+ .base_object_payload);
+}
+
+TEST_F(TraceTraitTest, TraceGCed) {
+ auto* gced = MakeGarbageCollected<GCed>(GetHeap());
+ EXPECT_EQ(0u, GCed::trace_callcount);
+ TraceTrait<GCed>::Trace(nullptr, gced);
+ EXPECT_EQ(1u, GCed::trace_callcount);
+}
+
+TEST_F(TraceTraitTest, TraceGCedMixin) {
+ auto* gced_mixin_app = MakeGarbageCollected<GCedMixinApplication>(GetHeap());
+ auto* gced_mixin = static_cast<GCedMixin*>(gced_mixin_app);
+ EXPECT_EQ(0u, GCed::trace_callcount);
+ TraceTrait<GCedMixin>::Trace(nullptr, gced_mixin);
+ EXPECT_EQ(1u, GCed::trace_callcount);
+}
+
+TEST_F(TraceTraitTest, TraceGCedThroughTraceDescriptor) {
+ auto* gced = MakeGarbageCollected<GCed>(GetHeap());
+ EXPECT_EQ(0u, GCed::trace_callcount);
+ TraceDescriptor desc = TraceTrait<GCed>::GetTraceDescriptor(gced);
+ desc.callback(nullptr, desc.base_object_payload);
+ EXPECT_EQ(1u, GCed::trace_callcount);
+}
+
+TEST_F(TraceTraitTest, TraceGCedMixinThroughTraceDescriptor) {
+ auto* gced_mixin_app = MakeGarbageCollected<GCedMixinApplication>(GetHeap());
+ auto* gced_mixin = static_cast<GCedMixin*>(gced_mixin_app);
+ EXPECT_EQ(0u, GCed::trace_callcount);
+ TraceDescriptor desc = TraceTrait<GCedMixin>::GetTraceDescriptor(gced_mixin);
+ desc.callback(nullptr, desc.base_object_payload);
+ EXPECT_EQ(1u, GCed::trace_callcount);
+}
+
+namespace {
+
+class DispatchingVisitor final : public VisitorBase {
+ public:
+ DispatchingVisitor(const void* object, const void* payload)
+ : object_(object), payload_(payload) {}
+
+ protected:
+ void Visit(const void* t, TraceDescriptor desc) final {
+ EXPECT_EQ(object_, t);
+ EXPECT_EQ(payload_, desc.base_object_payload);
+ desc.callback(this, desc.base_object_payload);
+ }
+
+ void VisitWeak(const void* t, TraceDescriptor desc, WeakCallback callback,
+ const void* weak_member) final {
+ EXPECT_EQ(object_, t);
+ EXPECT_EQ(payload_, desc.base_object_payload);
+ LivenessBroker broker = LivenessBrokerFactory::Create();
+ callback(broker, weak_member);
+ }
+
+ private:
+ const void* object_;
+ const void* payload_;
+};
+
+} // namespace
+
+TEST_F(VisitorTest, DispatchTraceGCed) {
+ Member<GCed> ref = MakeGarbageCollected<GCed>(GetHeap());
+ DispatchingVisitor visitor(ref, ref);
+ EXPECT_EQ(0u, GCed::trace_callcount);
+ visitor.Trace(ref);
+ EXPECT_EQ(1u, GCed::trace_callcount);
+}
+
+TEST_F(VisitorTest, DispatchTraceGCedMixin) {
+ auto* gced_mixin_app = MakeGarbageCollected<GCedMixinApplication>(GetHeap());
+ auto* gced_mixin = static_cast<GCedMixin*>(gced_mixin_app);
+ // Ensure that we indeed test dispatching an inner object.
+ EXPECT_NE(static_cast<void*>(gced_mixin_app), static_cast<void*>(gced_mixin));
+ Member<GCedMixin> ref = gced_mixin;
+ DispatchingVisitor visitor(gced_mixin, gced_mixin_app);
+ EXPECT_EQ(0u, GCed::trace_callcount);
+ visitor.Trace(ref);
+ EXPECT_EQ(1u, GCed::trace_callcount);
+}
+
+TEST_F(VisitorTest, DispatchTraceWeakGCed) {
+ WeakMember<GCed> ref = MakeGarbageCollected<GCed>(GetHeap());
+ DispatchingVisitor visitor(ref, ref);
+ visitor.Trace(ref);
+ // No marking, so reference should be cleared.
+ EXPECT_EQ(nullptr, ref.Get());
+}
+
+TEST_F(VisitorTest, DispatchTraceWeakGCedMixin) {
+ auto* gced_mixin_app = MakeGarbageCollected<GCedMixinApplication>(GetHeap());
+ auto* gced_mixin = static_cast<GCedMixin*>(gced_mixin_app);
+ // Ensure that we indeed test dispatching an inner object.
+ EXPECT_NE(static_cast<void*>(gced_mixin_app), static_cast<void*>(gced_mixin));
+ WeakMember<GCedMixin> ref = gced_mixin;
+ DispatchingVisitor visitor(gced_mixin, gced_mixin_app);
+ visitor.Trace(ref);
+ // No marking, so reference should be cleared.
+ EXPECT_EQ(nullptr, ref.Get());
+}
+
+namespace {
+
+class WeakCallbackVisitor final : public VisitorBase {
+ public:
+ void RegisterWeakCallback(WeakCallback callback, const void* param) final {
+ LivenessBroker broker = LivenessBrokerFactory::Create();
+ callback(broker, param);
+ }
+};
+
+struct WeakCallbackDispatcher {
+ static size_t callback_callcount;
+ static const void* callback_param;
+
+ static void Setup(const void* expected_param) {
+ callback_callcount = 0;
+ callback_param = expected_param;
+ }
+
+ static void Call(const LivenessBroker& broker, const void* param) {
+ EXPECT_EQ(callback_param, param);
+ callback_callcount++;
+ }
+};
+
+size_t WeakCallbackDispatcher::callback_callcount;
+const void* WeakCallbackDispatcher::callback_param;
+
+class GCedWithCustomWeakCallback final
+ : public GarbageCollected<GCedWithCustomWeakCallback> {
+ public:
+ void CustomWeakCallbackMethod(const LivenessBroker& broker) {
+ WeakCallbackDispatcher::Call(broker, this);
+ }
+
+ void Trace(cppgc::Visitor* visitor) {
+ visitor->RegisterWeakCallbackMethod<
+ GCedWithCustomWeakCallback,
+ &GCedWithCustomWeakCallback::CustomWeakCallbackMethod>(this);
+ }
+};
+
+} // namespace
+
+TEST_F(VisitorTest, DispatchRegisterWeakCallback) {
+ WeakCallbackVisitor visitor;
+ WeakCallbackDispatcher::Setup(&visitor);
+ EXPECT_EQ(0u, WeakCallbackDispatcher::callback_callcount);
+ visitor.RegisterWeakCallback(WeakCallbackDispatcher::Call, &visitor);
+ EXPECT_EQ(1u, WeakCallbackDispatcher::callback_callcount);
+}
+
+TEST_F(VisitorTest, DispatchRegisterWeakCallbackMethod) {
+ WeakCallbackVisitor visitor;
+ auto* gced = MakeGarbageCollected<GCedWithCustomWeakCallback>(GetHeap());
+ WeakCallbackDispatcher::Setup(gced);
+ EXPECT_EQ(0u, WeakCallbackDispatcher::callback_callcount);
+ gced->Trace(&visitor);
+ EXPECT_EQ(1u, WeakCallbackDispatcher::callback_callcount);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/worklist-unittest.cc b/deps/v8/test/unittests/heap/cppgc/worklist-unittest.cc
new file mode 100644
index 0000000000..b9e8843666
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/worklist-unittest.cc
@@ -0,0 +1,346 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/worklist.h"
+
+#include "test/unittests/heap/cppgc/tests.h"
+
+namespace cppgc {
+namespace internal {
+
+class SomeObject {};
+
+using TestWorklist = Worklist<SomeObject*, 64>;
+
+TEST(CppgcWorkListTest, SegmentCreate) {
+ TestWorklist::Segment segment;
+ EXPECT_TRUE(segment.IsEmpty());
+ EXPECT_EQ(0u, segment.Size());
+ EXPECT_FALSE(segment.IsFull());
+}
+
+TEST(CppgcWorkListTest, SegmentPush) {
+ TestWorklist::Segment segment;
+ EXPECT_EQ(0u, segment.Size());
+ EXPECT_TRUE(segment.Push(nullptr));
+ EXPECT_EQ(1u, segment.Size());
+}
+
+TEST(CppgcWorkListTest, SegmentPushPop) {
+ TestWorklist::Segment segment;
+ EXPECT_TRUE(segment.Push(nullptr));
+ EXPECT_EQ(1u, segment.Size());
+ SomeObject dummy;
+ SomeObject* object = &dummy;
+ EXPECT_TRUE(segment.Pop(&object));
+ EXPECT_EQ(0u, segment.Size());
+ EXPECT_EQ(nullptr, object);
+}
+
+TEST(CppgcWorkListTest, SegmentIsEmpty) {
+ TestWorklist::Segment segment;
+ EXPECT_TRUE(segment.IsEmpty());
+ EXPECT_TRUE(segment.Push(nullptr));
+ EXPECT_FALSE(segment.IsEmpty());
+}
+
+TEST(CppgcWorkListTest, SegmentIsFull) {
+ TestWorklist::Segment segment;
+ EXPECT_FALSE(segment.IsFull());
+ for (size_t i = 0; i < TestWorklist::Segment::kCapacity; i++) {
+ EXPECT_TRUE(segment.Push(nullptr));
+ }
+ EXPECT_TRUE(segment.IsFull());
+}
+
+TEST(CppgcWorkListTest, SegmentClear) {
+ TestWorklist::Segment segment;
+ EXPECT_TRUE(segment.Push(nullptr));
+ EXPECT_FALSE(segment.IsEmpty());
+ segment.Clear();
+ EXPECT_TRUE(segment.IsEmpty());
+ for (size_t i = 0; i < TestWorklist::Segment::kCapacity; i++) {
+ EXPECT_TRUE(segment.Push(nullptr));
+ }
+}
+
+TEST(CppgcWorkListTest, SegmentFullPushFails) {
+ TestWorklist::Segment segment;
+ EXPECT_FALSE(segment.IsFull());
+ for (size_t i = 0; i < TestWorklist::Segment::kCapacity; i++) {
+ EXPECT_TRUE(segment.Push(nullptr));
+ }
+ EXPECT_TRUE(segment.IsFull());
+ EXPECT_FALSE(segment.Push(nullptr));
+}
+
+TEST(CppgcWorkListTest, SegmentEmptyPopFails) {
+ TestWorklist::Segment segment;
+ EXPECT_TRUE(segment.IsEmpty());
+ SomeObject* object;
+ EXPECT_FALSE(segment.Pop(&object));
+}
+
+TEST(CppgcWorkListTest, SegmentUpdateFalse) {
+ TestWorklist::Segment segment;
+ SomeObject* object;
+ object = reinterpret_cast<SomeObject*>(&object);
+ EXPECT_TRUE(segment.Push(object));
+ segment.Update([](SomeObject* object, SomeObject** out) { return false; });
+ EXPECT_TRUE(segment.IsEmpty());
+}
+
+TEST(CppgcWorkListTest, SegmentUpdate) {
+ TestWorklist::Segment segment;
+ SomeObject* objectA;
+ objectA = reinterpret_cast<SomeObject*>(&objectA);
+ SomeObject* objectB;
+ objectB = reinterpret_cast<SomeObject*>(&objectB);
+ EXPECT_TRUE(segment.Push(objectA));
+ segment.Update([objectB](SomeObject* object, SomeObject** out) {
+ *out = objectB;
+ return true;
+ });
+ SomeObject* object;
+ EXPECT_TRUE(segment.Pop(&object));
+ EXPECT_EQ(object, objectB);
+}
+
+TEST(CppgcWorkListTest, CreateEmpty) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view(&worklist, 0);
+ EXPECT_TRUE(worklist_view.IsLocalEmpty());
+ EXPECT_TRUE(worklist.IsEmpty());
+}
+
+TEST(CppgcWorkListTest, LocalPushPop) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view(&worklist, 0);
+ SomeObject dummy;
+ SomeObject* retrieved = nullptr;
+ EXPECT_TRUE(worklist_view.Push(&dummy));
+ EXPECT_FALSE(worklist_view.IsLocalEmpty());
+ EXPECT_TRUE(worklist_view.Pop(&retrieved));
+ EXPECT_EQ(&dummy, retrieved);
+}
+
+TEST(CppgcWorkListTest, LocalIsBasedOnId) {
+ TestWorklist worklist;
+ // Use the same id.
+ TestWorklist::View worklist_view1(&worklist, 0);
+ TestWorklist::View worklist_view2(&worklist, 0);
+ SomeObject dummy;
+ SomeObject* retrieved = nullptr;
+ EXPECT_TRUE(worklist_view1.Push(&dummy));
+ EXPECT_FALSE(worklist_view1.IsLocalEmpty());
+ EXPECT_FALSE(worklist_view2.IsLocalEmpty());
+ EXPECT_TRUE(worklist_view2.Pop(&retrieved));
+ EXPECT_EQ(&dummy, retrieved);
+ EXPECT_TRUE(worklist_view1.IsLocalEmpty());
+ EXPECT_TRUE(worklist_view2.IsLocalEmpty());
+}
+
+TEST(CppgcWorkListTest, LocalPushStaysPrivate) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view1(&worklist, 0);
+ TestWorklist::View worklist_view2(&worklist, 1);
+ SomeObject dummy;
+ SomeObject* retrieved = nullptr;
+ EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+ EXPECT_TRUE(worklist_view1.Push(&dummy));
+ EXPECT_FALSE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+ EXPECT_FALSE(worklist_view2.Pop(&retrieved));
+ EXPECT_EQ(nullptr, retrieved);
+ EXPECT_TRUE(worklist_view1.Pop(&retrieved));
+ EXPECT_EQ(&dummy, retrieved);
+ EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+}
+
+TEST(CppgcWorkListTest, GlobalUpdateNull) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view(&worklist, 0);
+ SomeObject* object;
+ object = reinterpret_cast<SomeObject*>(&object);
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view.Push(object));
+ }
+ EXPECT_TRUE(worklist_view.Push(object));
+ worklist.Update([](SomeObject* object, SomeObject** out) { return false; });
+ EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+}
+
+TEST(CppgcWorkListTest, GlobalUpdate) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view(&worklist, 0);
+ SomeObject* objectA = nullptr;
+ objectA = reinterpret_cast<SomeObject*>(&objectA);
+ SomeObject* objectB = nullptr;
+ objectB = reinterpret_cast<SomeObject*>(&objectB);
+ SomeObject* objectC = nullptr;
+ objectC = reinterpret_cast<SomeObject*>(&objectC);
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view.Push(objectA));
+ }
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view.Push(objectB));
+ }
+ EXPECT_TRUE(worklist_view.Push(objectA));
+ worklist.Update([objectA, objectC](SomeObject* object, SomeObject** out) {
+ if (object != objectA) {
+ *out = objectC;
+ return true;
+ }
+ return false;
+ });
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ SomeObject* object;
+ EXPECT_TRUE(worklist_view.Pop(&object));
+ EXPECT_EQ(object, objectC);
+ }
+}
+
+TEST(CppgcWorkListTest, FlushToGlobalPushSegment) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view0(&worklist, 0);
+ TestWorklist::View worklist_view1(&worklist, 1);
+ SomeObject* object = nullptr;
+ SomeObject* objectA = nullptr;
+ objectA = reinterpret_cast<SomeObject*>(&objectA);
+ EXPECT_TRUE(worklist_view0.Push(objectA));
+ worklist.FlushToGlobal(0);
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
+ EXPECT_TRUE(worklist_view1.Pop(&object));
+}
+
+TEST(CppgcWorkListTest, FlushToGlobalPopSegment) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view0(&worklist, 0);
+ TestWorklist::View worklist_view1(&worklist, 1);
+ SomeObject* object = nullptr;
+ SomeObject* objectA = nullptr;
+ objectA = reinterpret_cast<SomeObject*>(&objectA);
+ EXPECT_TRUE(worklist_view0.Push(objectA));
+ EXPECT_TRUE(worklist_view0.Push(objectA));
+ EXPECT_TRUE(worklist_view0.Pop(&object));
+ worklist.FlushToGlobal(0);
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
+ EXPECT_TRUE(worklist_view1.Pop(&object));
+}
+
+TEST(CppgcWorkListTest, Clear) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view(&worklist, 0);
+ SomeObject* object;
+ object = reinterpret_cast<SomeObject*>(&object);
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view.Push(object));
+ }
+ EXPECT_TRUE(worklist_view.Push(object));
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
+ worklist.Clear();
+ EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+}
+
+TEST(CppgcWorkListTest, SingleSegmentSteal) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view1(&worklist, 0);
+ TestWorklist::View worklist_view2(&worklist, 1);
+ SomeObject dummy;
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view1.Push(&dummy));
+ }
+ SomeObject* retrieved = nullptr;
+ // One more push/pop to publish the full segment.
+ EXPECT_TRUE(worklist_view1.Push(nullptr));
+ EXPECT_TRUE(worklist_view1.Pop(&retrieved));
+ EXPECT_EQ(nullptr, retrieved);
+ EXPECT_EQ(1U, worklist.GlobalPoolSize());
+ // Stealing.
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view2.Pop(&retrieved));
+ EXPECT_EQ(&dummy, retrieved);
+ EXPECT_FALSE(worklist_view1.Pop(&retrieved));
+ }
+ EXPECT_TRUE(worklist.IsEmpty());
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+}
+
+TEST(CppgcWorkListTest, MultipleSegmentsStolen) {
+ TestWorklist worklist;
+ TestWorklist::View worklist_view1(&worklist, 0);
+ TestWorklist::View worklist_view2(&worklist, 1);
+ TestWorklist::View worklist_view3(&worklist, 2);
+ SomeObject dummy1;
+ SomeObject dummy2;
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view1.Push(&dummy1));
+ }
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view1.Push(&dummy2));
+ }
+ SomeObject* retrieved = nullptr;
+ SomeObject dummy3;
+ // One more push/pop to publish the full segment.
+ EXPECT_TRUE(worklist_view1.Push(&dummy3));
+ EXPECT_TRUE(worklist_view1.Pop(&retrieved));
+ EXPECT_EQ(&dummy3, retrieved);
+ EXPECT_EQ(2U, worklist.GlobalPoolSize());
+ // Stealing.
+ EXPECT_TRUE(worklist_view2.Pop(&retrieved));
+ SomeObject* const expect_bag2 = retrieved;
+ EXPECT_TRUE(worklist_view3.Pop(&retrieved));
+ SomeObject* const expect_bag3 = retrieved;
+ EXPECT_EQ(0U, worklist.GlobalPoolSize());
+ EXPECT_NE(expect_bag2, expect_bag3);
+ EXPECT_TRUE(expect_bag2 == &dummy1 || expect_bag2 == &dummy2);
+ EXPECT_TRUE(expect_bag3 == &dummy1 || expect_bag3 == &dummy2);
+ for (size_t i = 1; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view2.Pop(&retrieved));
+ EXPECT_EQ(expect_bag2, retrieved);
+ EXPECT_FALSE(worklist_view1.Pop(&retrieved));
+ }
+ for (size_t i = 1; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view3.Pop(&retrieved));
+ EXPECT_EQ(expect_bag3, retrieved);
+ EXPECT_FALSE(worklist_view1.Pop(&retrieved));
+ }
+ EXPECT_TRUE(worklist.IsEmpty());
+}
+
+TEST(CppgcWorkListTest, MergeGlobalPool) {
+ TestWorklist worklist1;
+ TestWorklist::View worklist_view1(&worklist1, 0);
+ SomeObject dummy;
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view1.Push(&dummy));
+ }
+ SomeObject* retrieved = nullptr;
+ // One more push/pop to publish the full segment.
+ EXPECT_TRUE(worklist_view1.Push(nullptr));
+ EXPECT_TRUE(worklist_view1.Pop(&retrieved));
+ EXPECT_EQ(nullptr, retrieved);
+ EXPECT_EQ(1U, worklist1.GlobalPoolSize());
+ // Merging global pool into a new Worklist.
+ TestWorklist worklist2;
+ TestWorklist::View worklist_view2(&worklist2, 0);
+ EXPECT_EQ(0U, worklist2.GlobalPoolSize());
+ worklist2.MergeGlobalPool(&worklist1);
+ EXPECT_EQ(1U, worklist2.GlobalPoolSize());
+ EXPECT_FALSE(worklist2.IsEmpty());
+ for (size_t i = 0; i < TestWorklist::kSegmentCapacity; i++) {
+ EXPECT_TRUE(worklist_view2.Pop(&retrieved));
+ EXPECT_EQ(&dummy, retrieved);
+ EXPECT_FALSE(worklist_view1.Pop(&retrieved));
+ }
+ EXPECT_TRUE(worklist1.IsEmpty());
+ EXPECT_TRUE(worklist2.IsEmpty());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index 829096ab23..371527e740 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -33,7 +33,7 @@ class MockEmbedderHeapTracer : public EmbedderHeapTracer {
public:
MOCK_METHOD1(TracePrologue, void(EmbedderHeapTracer::TraceFlags));
MOCK_METHOD1(TraceEpilogue, void(EmbedderHeapTracer::TraceSummary*));
- MOCK_METHOD1(EnterFinalPause, void(EmbedderHeapTracer::EmbedderStackState));
+ MOCK_METHOD1(EnterFinalPause, void(EmbedderStackState));
MOCK_METHOD0(IsTracingDone, bool());
MOCK_METHOD1(RegisterV8References,
void(const std::vector<std::pair<void*, void*> >&));
@@ -105,7 +105,10 @@ TEST(LocalEmbedderHeapTracer, EnterFinalPauseDefaultStackStateUnkown) {
LocalEmbedderHeapTracer local_tracer(nullptr);
local_tracer.SetRemoteTracer(&remote_tracer);
// The default stack state is expected to be unkown.
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kUnknown));
+ EXPECT_CALL(
+ remote_tracer,
+ EnterFinalPause(
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
local_tracer.EnterFinalPause();
}
@@ -115,8 +118,10 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
LocalEmbedderHeapTracer local_tracer(isolate());
local_tracer.SetRemoteTracer(&remote_tracer);
local_tracer.SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::kEmpty);
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EXPECT_CALL(
+ remote_tracer,
+ EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
local_tracer.EnterFinalPause();
}
@@ -126,8 +131,11 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, TemporaryEmbedderStackState) {
local_tracer.SetRemoteTracer(&remote_tracer);
// Default is unknown, see above.
{
- EmbedderStackStateScope scope(&local_tracer, EmbedderHeapTracer::kEmpty);
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ EmbedderStackStateScope scope(
+ &local_tracer, EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EXPECT_CALL(remote_tracer,
+ EnterFinalPause(
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
local_tracer.EnterFinalPause();
}
}
@@ -139,14 +147,21 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
local_tracer.SetRemoteTracer(&remote_tracer);
// Default is unknown, see above.
{
- EmbedderStackStateScope scope(&local_tracer, EmbedderHeapTracer::kEmpty);
+ EmbedderStackStateScope scope(
+ &local_tracer, EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
{
- EmbedderStackStateScope scope(&local_tracer,
- EmbedderHeapTracer::kUnknown);
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kUnknown));
+ EmbedderStackStateScope scope(
+ &local_tracer,
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ EXPECT_CALL(
+ remote_tracer,
+ EnterFinalPause(
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
local_tracer.EnterFinalPause();
}
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ EXPECT_CALL(remote_tracer,
+ EnterFinalPause(
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
local_tracer.EnterFinalPause();
}
}
@@ -156,10 +171,15 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate, EnterFinalPauseStackStateResets) {
LocalEmbedderHeapTracer local_tracer(isolate());
local_tracer.SetRemoteTracer(&remote_tracer);
local_tracer.SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::kEmpty);
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ EXPECT_CALL(
+ remote_tracer,
+ EnterFinalPause(EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers));
local_tracer.EnterFinalPause();
- EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kUnknown));
+ EXPECT_CALL(
+ remote_tracer,
+ EnterFinalPause(
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers));
local_tracer.EnterFinalPause();
}
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 12b5d3a7fe..d96f70aec3 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/heap/heap.h"
+
#include <cmath>
#include <iostream>
#include <limits>
#include "src/handles/handles-inl.h"
-#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/base/list-unittest.cc b/deps/v8/test/unittests/heap/list-unittest.cc
index 39394e2743..9150eaaa95 100644
--- a/deps/v8/test/unittests/base/list-unittest.cc
+++ b/deps/v8/test/unittests/heap/list-unittest.cc
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/list.h"
+#include "src/heap/list.h"
#include "testing/gtest-support.h"
namespace v8 {
-namespace base {
+namespace internal {
+namespace heap {
class TestChunk {
public:
- base::ListNode<TestChunk>& list_node() { return list_node_; }
- base::ListNode<TestChunk> list_node_;
+ heap::ListNode<TestChunk>& list_node() { return list_node_; }
+ heap::ListNode<TestChunk> list_node_;
};
TEST(List, InsertAtTailAndRemove) {
@@ -88,5 +89,6 @@ TEST(List, InsertMultipleAtTailAndRemoveFromMiddle) {
EXPECT_TRUE(list.Empty());
}
-} // namespace base
+} // namespace heap
+} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc b/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc
index 0592d7b2db..f8be3f6c22 100644
--- a/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc
+++ b/deps/v8/test/unittests/heap/off-thread-factory-unittest.cc
@@ -15,6 +15,7 @@
#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles-inl.h"
#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/off-thread-factory-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/script.h"
@@ -55,7 +56,13 @@ class OffThreadFactoryTest : public TestWithIsolateAndZone {
public:
OffThreadFactoryTest()
: TestWithIsolateAndZone(),
- parse_info_(isolate()),
+ state_(isolate()),
+ parse_info_(
+ isolate(),
+ UnoptimizedCompileFlags::ForToplevelCompile(
+ isolate(), true, construct_language_mode(FLAG_use_strict),
+ REPLMode::kNo),
+ &state_),
off_thread_isolate_(isolate(), parse_info_.zone()) {}
FunctionLiteral* ParseProgram(const char* source) {
@@ -68,8 +75,6 @@ class OffThreadFactoryTest : public TestWithIsolateAndZone {
parse_info_.set_character_stream(
ScannerStream::ForTesting(utf16_source.data(), utf16_source.size()));
- parse_info_.set_toplevel();
- parse_info_.set_allow_lazy_parsing();
{
DisallowHeapAllocation no_allocation;
@@ -78,9 +83,7 @@ class OffThreadFactoryTest : public TestWithIsolateAndZone {
Parser parser(parse_info());
parser.InitializeEmptyScopeChain(parse_info());
- parser.ParseOnBackground(parse_info());
-
- CHECK(DeclarationScope::Analyze(parse_info()));
+ parser.ParseOnBackground(parse_info(), 0, 0, kFunctionLiteralIdTopLevel);
}
parse_info()->ast_value_factory()->Internalize(off_thread_isolate());
@@ -88,7 +91,7 @@ class OffThreadFactoryTest : public TestWithIsolateAndZone {
script_ = parse_info_.CreateScript(off_thread_isolate(),
off_thread_factory()->empty_string(),
- ScriptOriginOptions());
+ kNullMaybeHandle, ScriptOriginOptions());
// Create the SFI list on the script so that SFI SetScript works.
Handle<WeakFixedArray> infos = off_thread_factory()->NewWeakFixedArray(
@@ -116,6 +119,7 @@ class OffThreadFactoryTest : public TestWithIsolateAndZone {
}
private:
+ UnoptimizedCompileState state_;
ParseInfo parse_info_;
OffThreadIsolate off_thread_isolate_;
Handle<String> source_string_;
@@ -123,11 +127,11 @@ class OffThreadFactoryTest : public TestWithIsolateAndZone {
};
TEST_F(OffThreadFactoryTest, OneByteInternalizedString_IsAddedToStringTable) {
- Vector<const uint8_t> string_vector = StaticCharVector("foo");
+ Vector<const uint8_t> string_vector = StaticOneByteVector("foo");
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
string_vector.begin(), string_vector.length(), HashSeed(isolate()));
- FixedArray off_thread_wrapper;
+ OffThreadTransferHandle<FixedArray> wrapper;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
@@ -135,15 +139,15 @@ TEST_F(OffThreadFactoryTest, OneByteInternalizedString_IsAddedToStringTable) {
off_thread_factory()->NewOneByteInternalizedString(string_vector,
hash_field);
- off_thread_wrapper =
- *off_thread_factory()->StringWrapperForTest(off_thread_string);
- off_thread_factory()->FinishOffThread();
+ wrapper =
+ off_thread_isolate()->TransferHandle(WrapString(off_thread_string));
+ off_thread_isolate()->FinishOffThread();
}
- Handle<FixedArray> wrapper = handle(off_thread_wrapper, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
- Handle<String> string = handle(String::cast(wrapper->get(0)), isolate());
+ Handle<String> string =
+ handle(String::cast(wrapper.ToHandle()->get(0)), isolate());
EXPECT_TRUE(string->IsOneByteEqualTo(CStrVector("foo")));
EXPECT_TRUE(string->IsInternalizedString());
@@ -162,12 +166,12 @@ TEST_F(OffThreadFactoryTest, OneByteInternalizedString_IsAddedToStringTable) {
TEST_F(OffThreadFactoryTest,
OneByteInternalizedString_DuplicateIsDeduplicated) {
- Vector<const uint8_t> string_vector = StaticCharVector("foo");
+ Vector<const uint8_t> string_vector = StaticOneByteVector("foo");
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
string_vector.begin(), string_vector.length(), HashSeed(isolate()));
- FixedArray off_thread_wrapper_1;
- FixedArray off_thread_wrapper_2;
+ OffThreadTransferHandle<FixedArray> wrapper_1;
+ OffThreadTransferHandle<FixedArray> wrapper_2;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
@@ -178,17 +182,19 @@ TEST_F(OffThreadFactoryTest,
off_thread_factory()->NewOneByteInternalizedString(string_vector,
hash_field);
- off_thread_wrapper_1 = *WrapString(off_thread_string_1);
- off_thread_wrapper_2 = *WrapString(off_thread_string_2);
- off_thread_factory()->FinishOffThread();
+ wrapper_1 =
+ off_thread_isolate()->TransferHandle(WrapString(off_thread_string_1));
+ wrapper_2 =
+ off_thread_isolate()->TransferHandle(WrapString(off_thread_string_2));
+ off_thread_isolate()->FinishOffThread();
}
- Handle<FixedArray> wrapper_1 = handle(off_thread_wrapper_1, isolate());
- Handle<FixedArray> wrapper_2 = handle(off_thread_wrapper_2, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
- Handle<String> string_1 = handle(String::cast(wrapper_1->get(0)), isolate());
- Handle<String> string_2 = handle(String::cast(wrapper_2->get(0)), isolate());
+ Handle<String> string_1 =
+ handle(String::cast(wrapper_1.ToHandle()->get(0)), isolate());
+ Handle<String> string_2 =
+ handle(String::cast(wrapper_2.ToHandle()->get(0)), isolate());
EXPECT_TRUE(string_1->IsOneByteEqualTo(CStrVector("foo")));
EXPECT_TRUE(string_1->IsInternalizedString());
@@ -201,20 +207,21 @@ TEST_F(OffThreadFactoryTest, AstRawString_IsInternalized) {
const AstRawString* raw_string = ast_value_factory.GetOneByteString("foo");
- FixedArray off_thread_wrapper;
+ OffThreadTransferHandle<FixedArray> wrapper;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
ast_value_factory.Internalize(off_thread_isolate());
- off_thread_wrapper = *WrapString(raw_string->string());
- off_thread_factory()->FinishOffThread();
+ wrapper =
+ off_thread_isolate()->TransferHandle(WrapString(raw_string->string()));
+ off_thread_isolate()->FinishOffThread();
}
- Handle<FixedArray> wrapper = handle(off_thread_wrapper, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
- Handle<String> string = handle(String::cast(wrapper->get(0)), isolate());
+ Handle<String> string =
+ handle(String::cast(wrapper.ToHandle()->get(0)), isolate());
EXPECT_TRUE(string->IsOneByteEqualTo(CStrVector("foo")));
EXPECT_TRUE(string->IsInternalizedString());
@@ -224,7 +231,7 @@ TEST_F(OffThreadFactoryTest, AstConsString_CreatesConsString) {
AstValueFactory ast_value_factory(zone(), isolate()->ast_string_constants(),
HashSeed(isolate()));
- FixedArray off_thread_wrapper;
+ OffThreadTransferHandle<FixedArray> wrapper;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
@@ -236,15 +243,15 @@ TEST_F(OffThreadFactoryTest, AstConsString_CreatesConsString) {
ast_value_factory.Internalize(off_thread_isolate());
- off_thread_wrapper =
- *WrapString(foobar_string->GetString(off_thread_isolate()));
- off_thread_factory()->FinishOffThread();
+ wrapper = off_thread_isolate()->TransferHandle(
+ WrapString(foobar_string->GetString(off_thread_isolate())));
+ off_thread_isolate()->FinishOffThread();
}
- Handle<FixedArray> wrapper = handle(off_thread_wrapper, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
- Handle<String> string = handle(String::cast(wrapper->get(0)), isolate());
+ Handle<String> string =
+ handle(String::cast(wrapper.ToHandle()->get(0)), isolate());
EXPECT_TRUE(string->IsConsString());
EXPECT_TRUE(string->Equals(*isolate()->factory()->NewStringFromStaticChars(
@@ -254,18 +261,19 @@ TEST_F(OffThreadFactoryTest, AstConsString_CreatesConsString) {
TEST_F(OffThreadFactoryTest, EmptyScript) {
FunctionLiteral* program = ParseProgram("");
- SharedFunctionInfo shared;
+ OffThreadTransferHandle<SharedFunctionInfo> shared;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
- shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
- program, script(), true);
+ shared = off_thread_isolate()->TransferHandle(
+ off_thread_factory()->NewSharedFunctionInfoForLiteral(program, script(),
+ true));
- off_thread_factory()->FinishOffThread();
+ off_thread_isolate()->FinishOffThread();
}
- Handle<SharedFunctionInfo> root_sfi = handle(shared, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
+ Handle<SharedFunctionInfo> root_sfi = shared.ToHandle();
EXPECT_EQ(root_sfi->function_literal_id(), 0);
}
@@ -278,18 +286,19 @@ TEST_F(OffThreadFactoryTest, LazyFunction) {
->AsFunctionDeclaration()
->fun();
- SharedFunctionInfo shared;
+ OffThreadTransferHandle<SharedFunctionInfo> shared;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
- shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
- lazy, script(), true);
+ shared = off_thread_isolate()->TransferHandle(
+ off_thread_factory()->NewSharedFunctionInfoForLiteral(lazy, script(),
+ true));
- off_thread_factory()->FinishOffThread();
+ off_thread_isolate()->FinishOffThread();
}
- Handle<SharedFunctionInfo> lazy_sfi = handle(shared, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
+ Handle<SharedFunctionInfo> lazy_sfi = shared.ToHandle();
EXPECT_EQ(lazy_sfi->function_literal_id(), 1);
EXPECT_TRUE(lazy_sfi->Name().IsOneByteEqualTo(CStrVector("lazy")));
@@ -299,24 +308,28 @@ TEST_F(OffThreadFactoryTest, LazyFunction) {
TEST_F(OffThreadFactoryTest, EagerFunction) {
FunctionLiteral* program = ParseProgram("(function eager() {})");
+ // Rewritten to `.result = (function eager() {}); return .result`
FunctionLiteral* eager = program->body()
->at(0)
->AsExpressionStatement()
->expression()
+ ->AsAssignment()
+ ->value()
->AsFunctionLiteral();
- SharedFunctionInfo shared;
+ OffThreadTransferHandle<SharedFunctionInfo> shared;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
- shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
- eager, script(), true);
+ shared = off_thread_isolate()->TransferHandle(
+ off_thread_factory()->NewSharedFunctionInfoForLiteral(eager, script(),
+ true));
- off_thread_factory()->FinishOffThread();
+ off_thread_isolate()->FinishOffThread();
}
- Handle<SharedFunctionInfo> eager_sfi = handle(shared, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
+ Handle<SharedFunctionInfo> eager_sfi = shared.ToHandle();
EXPECT_EQ(eager_sfi->function_literal_id(), 1);
EXPECT_TRUE(eager_sfi->Name().IsOneByteEqualTo(CStrVector("eager")));
@@ -339,18 +352,19 @@ TEST_F(OffThreadFactoryTest, ImplicitNameFunction) {
->value()
->AsFunctionLiteral();
- SharedFunctionInfo shared;
+ OffThreadTransferHandle<SharedFunctionInfo> shared;
{
OffThreadHandleScope handle_scope(off_thread_isolate());
- shared = *off_thread_factory()->NewSharedFunctionInfoForLiteral(
- implicit_name, script(), true);
+ shared = off_thread_isolate()->TransferHandle(
+ off_thread_factory()->NewSharedFunctionInfoForLiteral(implicit_name,
+ script(), true));
- off_thread_factory()->FinishOffThread();
+ off_thread_isolate()->FinishOffThread();
}
- Handle<SharedFunctionInfo> implicit_name_sfi = handle(shared, isolate());
- off_thread_factory()->Publish(isolate());
+ off_thread_isolate()->Publish(isolate());
+ Handle<SharedFunctionInfo> implicit_name_sfi = shared.ToHandle();
EXPECT_EQ(implicit_name_sfi->function_literal_id(), 1);
EXPECT_TRUE(
diff --git a/deps/v8/test/unittests/heap/safepoint-unittest.cc b/deps/v8/test/unittests/heap/safepoint-unittest.cc
index 462992f5fd..3caf3410d4 100644
--- a/deps/v8/test/unittests/heap/safepoint-unittest.cc
+++ b/deps/v8/test/unittests/heap/safepoint-unittest.cc
@@ -17,6 +17,7 @@ using SafepointTest = TestWithIsolate;
TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) {
Heap* heap = i_isolate()->heap();
+ FLAG_local_heaps = true;
bool run = false;
{
SafepointScope scope(heap);
@@ -47,6 +48,7 @@ class ParkedThread final : public v8::base::Thread {
TEST_F(SafepointTest, StopParkedThreads) {
Heap* heap = i_isolate()->heap();
+ FLAG_local_heaps = true;
int safepoints = 0;
@@ -105,6 +107,7 @@ class RunningThread final : public v8::base::Thread {
TEST_F(SafepointTest, StopRunningThreads) {
Heap* heap = i_isolate()->heap();
+ FLAG_local_heaps = true;
const int kThreads = 10;
const int kRuns = 5;
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index b5e4a01ab4..1c0ef3ae65 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -85,7 +85,7 @@ TEST(SlotSet, Iterate) {
}
set->Iterate(
- kNullAddress, SlotSet::kBucketsRegularPage,
+ kNullAddress, 0, SlotSet::kBucketsRegularPage,
[](MaybeObjectSlot slot) {
if (slot.address() % 3 == 0) {
return KEEP_SLOT;
@@ -106,6 +106,40 @@ TEST(SlotSet, Iterate) {
SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
}
+TEST(SlotSet, IterateFromHalfway) {
+ SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
+
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
+ if (i % 7 == 0) {
+ set->Insert<AccessMode::ATOMIC>(i);
+ }
+ }
+
+ set->Iterate(
+ kNullAddress, SlotSet::kBucketsRegularPage / 2,
+ SlotSet::kBucketsRegularPage,
+ [](MaybeObjectSlot slot) {
+ if (slot.address() % 3 == 0) {
+ return KEEP_SLOT;
+ } else {
+ return REMOVE_SLOT;
+ }
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ for (int i = 0; i < Page::kPageSize; i += kTaggedSize) {
+ if (i < Page::kPageSize / 2 && i % 7 == 0) {
+ EXPECT_TRUE(set->Lookup(i));
+ } else if (i >= Page::kPageSize / 2 && i % 21 == 0) {
+ EXPECT_TRUE(set->Lookup(i));
+ } else {
+ EXPECT_FALSE(set->Lookup(i));
+ }
+ }
+
+ SlotSet::Delete(set, SlotSet::kBucketsRegularPage);
+}
+
TEST(SlotSet, Remove) {
SlotSet* set = SlotSet::Allocate(SlotSet::kBucketsRegularPage);
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 296c85d707..3f054ef66b 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -3,12 +3,16 @@
// found in the LICENSE file.
#include "src/heap/spaces.h"
+
#include <memory>
+
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/large-spaces.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/spaces-inl.h"
#include "test/unittests/test-utils.h"
@@ -167,10 +171,15 @@ TEST_F(SpacesTest, OffThreadSpaceMergeDuringIncrementalMarking) {
expected_merged_pages += pages_in_off_thread_space;
}
- heap->FinalizeIncrementalMarkingAtomically(GarbageCollectionReason::kTesting);
-
+ // Check the page size before finalizing marking, since the GC will see the
+ // empty pages and will evacuate them.
+ // TODO(leszeks): Maybe allocate real objects, and hold on to them with
+ // Handles, to make sure incremental marking finalization doesn't clear them
+ // away.
EXPECT_EQ(pages_in_old_space + expected_merged_pages,
old_space->CountTotalPages());
+
+ heap->FinalizeIncrementalMarkingAtomically(GarbageCollectionReason::kTesting);
}
class LargeOffThreadAllocationThread final : public base::Thread {
@@ -266,7 +275,10 @@ TEST_F(SpacesTest, OffThreadLargeObjectSpaceMergeDuringIncrementalMarking) {
threads[i]->Join();
}
- int pages_in_old_space = lo_space->PageCount();
+ heap->StartIncrementalMarking(Heap::kNoGCFlags,
+ GarbageCollectionReason::kTesting);
+
+ int pages_in_lo_space = lo_space->PageCount();
int expected_merged_pages = 0;
for (int i = 0; i < kNumThreads; ++i) {
@@ -276,7 +288,14 @@ TEST_F(SpacesTest, OffThreadLargeObjectSpaceMergeDuringIncrementalMarking) {
expected_merged_pages += pages_in_off_thread_space;
}
- EXPECT_EQ(pages_in_old_space + expected_merged_pages, lo_space->PageCount());
+ // Check the page size before finalizing marking, since the GC will see the
+ // empty pages and will evacuate them.
+ // TODO(leszeks): Maybe allocate real objects, and hold on to them with
+ // Handles, to make sure incremental marking finalization doesn't clear them
+ // away.
+ EXPECT_EQ(pages_in_lo_space + expected_merged_pages, lo_space->PageCount());
+
+ heap->FinalizeIncrementalMarkingAtomically(GarbageCollectionReason::kTesting);
}
TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
diff --git a/deps/v8/test/unittests/libplatform/default-job-unittest.cc b/deps/v8/test/unittests/libplatform/default-job-unittest.cc
new file mode 100644
index 0000000000..b54e589fca
--- /dev/null
+++ b/deps/v8/test/unittests/libplatform/default-job-unittest.cc
@@ -0,0 +1,233 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-job.h"
+
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/platform.h"
+#include "src/libplatform/default-platform.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace platform {
+namespace default_job_unittest {
+
+// Verify that Cancel() on a job stops running the worker task and causes
+// current workers to yield.
+TEST(DefaultJobTest, CancelJob) {
+ static constexpr size_t kTooManyTasks = 1000;
+ static constexpr size_t kMaxTask = 4;
+ DefaultPlatform platform(kMaxTask);
+
+ // This Job notifies |threads_running| once started and loops until
+ // ShouldYield() returns true, and then returns.
+ class JobTest : public JobTask {
+ public:
+ ~JobTest() override = default;
+
+ void Run(JobDelegate* delegate) override {
+ {
+ base::MutexGuard guard(&mutex);
+ worker_count++;
+ }
+ threads_running.NotifyOne();
+ while (!delegate->ShouldYield()) {
+ }
+ }
+
+ size_t GetMaxConcurrency() const override {
+ return max_concurrency.load(std::memory_order_relaxed);
+ }
+
+ base::Mutex mutex;
+ base::ConditionVariable threads_running;
+ size_t worker_count = 0;
+ std::atomic_size_t max_concurrency{kTooManyTasks};
+ };
+
+ auto job = std::make_unique<JobTest>();
+ JobTest* job_raw = job.get();
+ auto state = std::make_shared<DefaultJobState>(
+ &platform, std::move(job), TaskPriority::kUserVisible, kMaxTask);
+ state->NotifyConcurrencyIncrease();
+
+ {
+ base::MutexGuard guard(&job_raw->mutex);
+ while (job_raw->worker_count < kMaxTask) {
+ job_raw->threads_running.Wait(&job_raw->mutex);
+ }
+ EXPECT_EQ(kMaxTask, job_raw->worker_count);
+ }
+ state->CancelAndWait();
+ // Workers should return and this test should not hang.
+}
+
+// Verify that Join() on a job contributes to max concurrency and waits for all
+// workers to return.
+TEST(DefaultJobTest, JoinJobContributes) {
+ static constexpr size_t kMaxTask = 4;
+ DefaultPlatform platform(kMaxTask);
+
+ // This Job notifies |threads_running| once started and blocks on a barrier
+ // until kMaxTask + 1 threads reach that point, and then returns.
+ class JobTest : public JobTask {
+ public:
+ ~JobTest() override = default;
+
+ void Run(JobDelegate* delegate) override {
+ base::MutexGuard guard(&mutex);
+ worker_count++;
+ threads_running.NotifyAll();
+ while (worker_count < kMaxTask + 1) threads_running.Wait(&mutex);
+ --max_concurrency;
+ }
+
+ size_t GetMaxConcurrency() const override {
+ return max_concurrency.load(std::memory_order_relaxed);
+ }
+
+ base::Mutex mutex;
+ base::ConditionVariable threads_running;
+ size_t worker_count = 0;
+ std::atomic_size_t max_concurrency{kMaxTask + 1};
+ };
+
+ auto job = std::make_unique<JobTest>();
+ JobTest* job_raw = job.get();
+ auto state = std::make_shared<DefaultJobState>(
+ &platform, std::move(job), TaskPriority::kUserVisible, kMaxTask);
+ state->NotifyConcurrencyIncrease();
+
+ // The main thread contributing is necessary for |worker_count| to reach
+ // kMaxTask + 1 thus, Join() should not hang.
+ state->Join();
+ EXPECT_EQ(0U, job_raw->max_concurrency);
+}
+
+// Verify that calling NotifyConcurrencyIncrease() (re-)schedules tasks with the
+// intended concurrency.
+TEST(DefaultJobTest, JobNotifyConcurrencyIncrease) {
+ static constexpr size_t kMaxTask = 4;
+ DefaultPlatform platform(kMaxTask);
+
+ // This Job notifies |threads_running| once started and blocks on a barrier
+ // until kMaxTask threads reach that point, and then returns.
+ class JobTest : public JobTask {
+ public:
+ ~JobTest() override = default;
+
+ void Run(JobDelegate* delegate) override {
+ base::MutexGuard guard(&mutex);
+ worker_count++;
+ threads_running.NotifyAll();
+ // Wait synchronously until |kMaxTask| workers reach this point.
+ while (worker_count < kMaxTask) threads_running.Wait(&mutex);
+ --max_concurrency;
+ }
+
+ size_t GetMaxConcurrency() const override {
+ return max_concurrency.load(std::memory_order_relaxed);
+ }
+
+ base::Mutex mutex;
+ base::ConditionVariable threads_running;
+ bool continue_flag = false;
+ size_t worker_count = 0;
+ std::atomic_size_t max_concurrency{kMaxTask / 2};
+ };
+
+ auto job = std::make_unique<JobTest>();
+ JobTest* job_raw = job.get();
+ auto state = std::make_shared<DefaultJobState>(
+ &platform, std::move(job), TaskPriority::kUserVisible, kMaxTask);
+ state->NotifyConcurrencyIncrease();
+
+ {
+ base::MutexGuard guard(&job_raw->mutex);
+ while (job_raw->worker_count < kMaxTask / 2)
+ job_raw->threads_running.Wait(&job_raw->mutex);
+ EXPECT_EQ(kMaxTask / 2, job_raw->worker_count);
+
+ job_raw->max_concurrency = kMaxTask;
+ }
+ state->NotifyConcurrencyIncrease();
+ // Workers should reach |continue_flag| and eventually return thus, Join()
+ // should not hang.
+ state->Join();
+ EXPECT_EQ(0U, job_raw->max_concurrency);
+}
+
+// Verify that Join() doesn't contribute if the Job is already finished.
+TEST(DefaultJobTest, FinishBeforeJoin) {
+ static constexpr size_t kMaxTask = 4;
+ DefaultPlatform platform(kMaxTask);
+
+ // This Job notifies |threads_running| once started and returns.
+ class JobTest : public JobTask {
+ public:
+ ~JobTest() override = default;
+
+ void Run(JobDelegate* delegate) override {
+ base::MutexGuard guard(&mutex);
+ worker_count++;
+ // Assert that main thread doesn't contribute in this test.
+ EXPECT_NE(main_thread_id, base::OS::GetCurrentThreadId());
+ worker_ran.NotifyAll();
+ --max_concurrency;
+ }
+
+ size_t GetMaxConcurrency() const override {
+ return max_concurrency.load(std::memory_order_relaxed);
+ }
+
+ const int main_thread_id = base::OS::GetCurrentThreadId();
+ base::Mutex mutex;
+ base::ConditionVariable worker_ran;
+ size_t worker_count = 0;
+ std::atomic_size_t max_concurrency{kMaxTask * 5};
+ };
+
+ auto job = std::make_unique<JobTest>();
+ JobTest* job_raw = job.get();
+ auto state = std::make_shared<DefaultJobState>(
+ &platform, std::move(job), TaskPriority::kUserVisible, kMaxTask);
+ state->NotifyConcurrencyIncrease();
+
+ {
+ base::MutexGuard guard(&job_raw->mutex);
+ while (job_raw->worker_count < kMaxTask * 5)
+ job_raw->worker_ran.Wait(&job_raw->mutex);
+ EXPECT_EQ(kMaxTask * 5, job_raw->worker_count);
+ }
+
+ state->Join();
+ EXPECT_EQ(0U, job_raw->max_concurrency);
+}
+
+// Verify that destroying a DefaultJobHandle triggers a DCHECK if neither Join()
+// or Cancel() was called.
+TEST(DefaultJobTest, LeakHandle) {
+ class JobTest : public JobTask {
+ public:
+ ~JobTest() override = default;
+
+ void Run(JobDelegate* delegate) override {}
+
+ size_t GetMaxConcurrency() const override { return 0; }
+ };
+
+ DefaultPlatform platform(0);
+ auto job = std::make_unique<JobTest>();
+ auto state = std::make_shared<DefaultJobState>(&platform, std::move(job),
+ TaskPriority::kUserVisible, 1);
+ auto handle = std::make_unique<DefaultJobHandle>(std::move(state));
+#ifdef DEBUG
+ EXPECT_DEATH_IF_SUPPORTED({ handle.reset(); }, "");
+#endif // DEBUG
+ handle->Join();
+}
+
+} // namespace default_job_unittest
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
index 2aa43de1ad..49aa0fe1cb 100644
--- a/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-platform-unittest.cc
@@ -32,8 +32,8 @@ struct MockIdleTask : public IdleTask {
class DefaultPlatformWithMockTime : public DefaultPlatform {
public:
- DefaultPlatformWithMockTime()
- : DefaultPlatform(IdleTaskSupport::kEnabled, nullptr) {
+ explicit DefaultPlatformWithMockTime(int thread_pool_size = 0)
+ : DefaultPlatform(thread_pool_size, IdleTaskSupport::kEnabled, nullptr) {
mock_time_ = 0.0;
SetTimeFunctionForTesting([]() { return mock_time_; });
}
@@ -240,8 +240,7 @@ class TestBackgroundTask : public Task {
} // namespace
TEST(CustomDefaultPlatformTest, RunBackgroundTask) {
- DefaultPlatform platform;
- platform.SetThreadPoolSize(1);
+ DefaultPlatform platform(1);
base::Semaphore sem(0);
bool task_executed = false;
@@ -256,12 +255,11 @@ TEST(CustomDefaultPlatformTest, RunBackgroundTask) {
TEST(CustomDefaultPlatformTest, PostForegroundTaskAfterPlatformTermination) {
std::shared_ptr<TaskRunner> foreground_taskrunner;
{
- DefaultPlatformWithMockTime platform;
+ DefaultPlatformWithMockTime platform(1);
int dummy;
Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
- platform.SetThreadPoolSize(1);
foreground_taskrunner = platform.GetForegroundTaskRunner(isolate);
}
// It should still be possible to post foreground tasks, even when the
diff --git a/deps/v8/test/unittests/objects/backing-store-unittest.cc b/deps/v8/test/unittests/objects/backing-store-unittest.cc
index d00f5632fe..b31669a79c 100644
--- a/deps/v8/test/unittests/objects/backing-store-unittest.cc
+++ b/deps/v8/test/unittests/objects/backing-store-unittest.cc
@@ -21,8 +21,10 @@ TEST_F(BackingStoreTest, GrowWasmMemoryInPlace) {
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_capacity());
- bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 1, 2);
- EXPECT_TRUE(success);
+ base::Optional<size_t> result =
+ backing_store->GrowWasmMemoryInPlace(isolate(), 1, 2);
+ EXPECT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), 1u);
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
}
@@ -34,8 +36,9 @@ TEST_F(BackingStoreTest, GrowWasmMemoryInPlace_neg) {
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_capacity());
- bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 2, 2);
- EXPECT_FALSE(success);
+ base::Optional<size_t> result =
+ backing_store->GrowWasmMemoryInPlace(isolate(), 2, 2);
+ EXPECT_FALSE(result.has_value());
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
}
@@ -47,8 +50,10 @@ TEST_F(BackingStoreTest, GrowSharedWasmMemoryInPlace) {
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
EXPECT_EQ(3 * wasm::kWasmPageSize, backing_store->byte_capacity());
- bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 1, 3);
- EXPECT_TRUE(success);
+ base::Optional<size_t> result =
+ backing_store->GrowWasmMemoryInPlace(isolate(), 1, 3);
+ EXPECT_TRUE(result.has_value());
+ EXPECT_EQ(result.value(), 2u);
EXPECT_EQ(3 * wasm::kWasmPageSize, backing_store->byte_length());
}
@@ -81,10 +86,11 @@ class GrowerThread : public base::Thread {
while (true) {
size_t current_length = backing_store_->byte_length();
if (current_length >= max_length) break;
- bool result =
+ base::Optional<size_t> result =
backing_store_->GrowWasmMemoryInPlace(isolate_, increment_, max_);
size_t new_length = backing_store_->byte_length();
- if (result) {
+ if (result.has_value()) {
+ CHECK_LE(current_length / wasm::kWasmPageSize, result.value());
CHECK_GE(new_length, current_length + increment_);
} else {
CHECK_EQ(max_length, new_length);
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index a9a0fac66b..dd7519230a 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -48,8 +48,9 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
BackgroundCompileTask* NewBackgroundCompileTask(
Isolate* isolate, Handle<SharedFunctionInfo> shared,
size_t stack_size = FLAG_stack_size) {
+ UnoptimizedCompileState state(isolate);
std::unique_ptr<ParseInfo> outer_parse_info =
- test::OuterParseInfoForShared(isolate, shared);
+ test::OuterParseInfoForShared(isolate, shared, &state);
AstValueFactory* ast_value_factory =
outer_parse_info->GetOrCreateAstValueFactory();
AstNodeFactory ast_node_factory(ast_value_factory,
@@ -75,7 +76,7 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
shared->function_literal_id(), nullptr);
return new BackgroundCompileTask(
- allocator(), outer_parse_info.get(), function_name, function_literal,
+ outer_parse_info.get(), function_name, function_literal,
isolate->counters()->worker_thread_runtime_call_stats(),
isolate->counters()->compile_function_on_background(), FLAG_stack_size);
}
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 2d9149402f..6691784a71 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -54,13 +54,15 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
}
std::unique_ptr<ParseInfo> OuterParseInfoForShared(
- Isolate* isolate, Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ UnoptimizedCompileState* state) {
Script script = Script::cast(shared->script());
- std::unique_ptr<ParseInfo> result =
- std::make_unique<ParseInfo>(isolate, script);
+ std::unique_ptr<ParseInfo> result = std::make_unique<ParseInfo>(
+ isolate, i::UnoptimizedCompileFlags::ForScriptCompile(isolate, script),
+ state);
// Create a character stream to simulate the parser having done so for the
- // to-level ParseProgram.
+ // top-level ParseProgram.
Handle<String> source(String::cast(script.source()), isolate);
std::unique_ptr<Utf16CharacterStream> stream(
ScannerStream::For(isolate, source));
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index 8a4d9f02ce..5cd1beb761 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -46,7 +46,8 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Isolate* isolate,
v8::String::ExternalOneByteStringResource* maybe_resource);
std::unique_ptr<ParseInfo> OuterParseInfoForShared(
- Isolate* isolate, Handle<SharedFunctionInfo> shared);
+ Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ UnoptimizedCompileState* state);
} // namespace test
} // namespace internal
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 5ac44b3d57..86e2854596 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -15,50 +15,61 @@
namespace v8 {
-IsolateWrapper::IsolateWrapper(CounterLookupCallback counter_lookup_callback,
- bool enforce_pointer_compression)
+namespace {
+// counter_lookup_callback doesn't pass through any state information about
+// the current Isolate, so we have to store the current counter map somewhere.
+// Fortunately tests run serially, so we can just store it in a static global.
+CounterMap* kCurrentCounterMap = nullptr;
+} // namespace
+
+IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
+ PointerCompressionMode pointer_compression_mode)
: array_buffer_allocator_(
v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
+ CHECK_NULL(kCurrentCounterMap);
+
v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = array_buffer_allocator_;
- create_params.counter_lookup_callback = counter_lookup_callback;
- if (enforce_pointer_compression) {
+ create_params.array_buffer_allocator = array_buffer_allocator_.get();
+
+ if (counters_mode == kEnableCounters) {
+ counter_map_ = std::make_unique<CounterMap>();
+ kCurrentCounterMap = counter_map_.get();
+
+ create_params.counter_lookup_callback = [](const char* name) {
+ CHECK_NOT_NULL(kCurrentCounterMap);
+ // If the name doesn't exist in the counter map, operator[] will default
+ // initialize it to zero.
+ return &(*kCurrentCounterMap)[name];
+ };
+ } else {
+ create_params.counter_lookup_callback = [](const char* name) -> int* {
+ return nullptr;
+ };
+ }
+
+ if (pointer_compression_mode == kEnforcePointerCompression) {
isolate_ = reinterpret_cast<v8::Isolate*>(
i::Isolate::New(i::IsolateAllocationMode::kInV8Heap));
- v8::Isolate::Initialize(isolate_, create_params);
+ v8::Isolate::Initialize(isolate(), create_params);
} else {
isolate_ = v8::Isolate::New(create_params);
}
- CHECK_NOT_NULL(isolate_);
+ CHECK_NOT_NULL(isolate());
}
IsolateWrapper::~IsolateWrapper() {
v8::Platform* platform = internal::V8::GetCurrentPlatform();
CHECK_NOT_NULL(platform);
- while (platform::PumpMessageLoop(platform, isolate_)) continue;
+ while (platform::PumpMessageLoop(platform, isolate())) continue;
isolate_->Dispose();
- delete array_buffer_allocator_;
-}
-
-// static
-v8::IsolateWrapper* SharedIsolateHolder::isolate_wrapper_ = nullptr;
-
-// static
-int* SharedIsolateAndCountersHolder::LookupCounter(const char* name) {
- DCHECK_NOT_NULL(counter_map_);
- auto map_entry = counter_map_->find(name);
- if (map_entry == counter_map_->end()) {
- counter_map_->emplace(name, 0);
+ if (counter_map_) {
+ CHECK_EQ(kCurrentCounterMap, counter_map_.get());
+ kCurrentCounterMap = nullptr;
+ } else {
+ CHECK_NULL(kCurrentCounterMap);
}
- return &counter_map_->at(name);
}
-// static
-v8::IsolateWrapper* SharedIsolateAndCountersHolder::isolate_wrapper_ = nullptr;
-
-// static
-CounterMap* SharedIsolateAndCountersHolder::counter_map_ = nullptr;
-
namespace internal {
SaveFlags::SaveFlags() {
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index c2ffbf1561..30de25fe9a 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -5,6 +5,7 @@
#ifndef V8_UNITTESTS_TEST_UTILS_H_
#define V8_UNITTESTS_TEST_UTILS_H_
+#include <memory>
#include <vector>
#include "include/v8.h"
@@ -24,144 +25,67 @@ class ArrayBufferAllocator;
using CounterMap = std::map<std::string, int>;
+enum CountersMode { kNoCounters, kEnableCounters };
+
+// When PointerCompressionMode is kEnforcePointerCompression, the Isolate is
+// created with pointer compression force enabled. When it's
+// kDefaultPointerCompression then the Isolate is created with the default
+// pointer compression state for the current build.
+enum PointerCompressionMode {
+ kDefaultPointerCompression,
+ kEnforcePointerCompression
+};
+
// RAII-like Isolate instance wrapper.
class IsolateWrapper final {
public:
- // When enforce_pointer_compression is true the Isolate is created with
- // enabled pointer compression. When it's false then the Isolate is created
- // with the default pointer compression state for current build.
- explicit IsolateWrapper(CounterLookupCallback counter_lookup_callback,
- bool enforce_pointer_compression = false);
+ explicit IsolateWrapper(CountersMode counters_mode,
+ PointerCompressionMode pointer_compression_mode);
~IsolateWrapper();
v8::Isolate* isolate() const { return isolate_; }
private:
- v8::ArrayBuffer::Allocator* array_buffer_allocator_;
+ std::unique_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_;
+ std::unique_ptr<CounterMap> counter_map_;
v8::Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(IsolateWrapper);
};
-class SharedIsolateHolder final {
- public:
- static v8::Isolate* isolate() { return isolate_wrapper_->isolate(); }
-
- static void CreateIsolate() {
- CHECK_NULL(isolate_wrapper_);
- isolate_wrapper_ =
- new IsolateWrapper([](const char* name) -> int* { return nullptr; });
- }
-
- static void DeleteIsolate() {
- CHECK_NOT_NULL(isolate_wrapper_);
- delete isolate_wrapper_;
- isolate_wrapper_ = nullptr;
- }
-
- private:
- static v8::IsolateWrapper* isolate_wrapper_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(SharedIsolateHolder);
-};
-
-class SharedIsolateAndCountersHolder final {
- public:
- static v8::Isolate* isolate() { return isolate_wrapper_->isolate(); }
-
- static void CreateIsolate() {
- CHECK_NULL(counter_map_);
- CHECK_NULL(isolate_wrapper_);
- counter_map_ = new CounterMap();
- isolate_wrapper_ = new IsolateWrapper(LookupCounter);
- }
-
- static void DeleteIsolate() {
- CHECK_NOT_NULL(counter_map_);
- CHECK_NOT_NULL(isolate_wrapper_);
- delete isolate_wrapper_;
- isolate_wrapper_ = nullptr;
- delete counter_map_;
- counter_map_ = nullptr;
- }
-
- private:
- static int* LookupCounter(const char* name);
- static CounterMap* counter_map_;
- static v8::IsolateWrapper* isolate_wrapper_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(SharedIsolateAndCountersHolder);
-};
-
//
// A set of mixins from which the test fixtures will be constructed.
//
-template <typename TMixin>
-class WithPrivateIsolateMixin : public TMixin {
+template <typename TMixin, CountersMode kCountersMode = kNoCounters,
+ PointerCompressionMode kPointerCompressionMode =
+ kDefaultPointerCompression>
+class WithIsolateMixin : public TMixin {
public:
- explicit WithPrivateIsolateMixin(bool enforce_pointer_compression = false)
- : isolate_wrapper_([](const char* name) -> int* { return nullptr; },
- enforce_pointer_compression) {}
+ WithIsolateMixin()
+ : isolate_wrapper_(kCountersMode, kPointerCompressionMode) {}
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
- static void SetUpTestCase() { TMixin::SetUpTestCase(); }
- static void TearDownTestCase() { TMixin::TearDownTestCase(); }
-
private:
v8::IsolateWrapper isolate_wrapper_;
-
- DISALLOW_COPY_AND_ASSIGN(WithPrivateIsolateMixin);
};
-template <typename TMixin, typename TSharedIsolateHolder = SharedIsolateHolder>
-class WithSharedIsolateMixin : public TMixin {
- public:
- WithSharedIsolateMixin() = default;
-
- v8::Isolate* v8_isolate() const { return TSharedIsolateHolder::isolate(); }
-
- static void SetUpTestCase() {
- TMixin::SetUpTestCase();
- TSharedIsolateHolder::CreateIsolate();
- }
-
- static void TearDownTestCase() {
- TSharedIsolateHolder::DeleteIsolate();
- TMixin::TearDownTestCase();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WithSharedIsolateMixin);
-};
-
-template <typename TMixin>
-class WithPointerCompressionIsolateMixin
- : public WithPrivateIsolateMixin<TMixin> {
- public:
- WithPointerCompressionIsolateMixin()
- : WithPrivateIsolateMixin<TMixin>(true) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WithPointerCompressionIsolateMixin);
-};
+template <typename TMixin, CountersMode kCountersMode = kNoCounters>
+using WithPointerCompressionIsolateMixin =
+ WithIsolateMixin<TMixin, kCountersMode, kEnforcePointerCompression>;
template <typename TMixin>
class WithIsolateScopeMixin : public TMixin {
public:
WithIsolateScopeMixin()
- : isolate_scope_(v8_isolate()), handle_scope_(v8_isolate()) {}
+ : isolate_scope_(this->v8_isolate()), handle_scope_(this->v8_isolate()) {}
- v8::Isolate* isolate() const { return v8_isolate(); }
- v8::Isolate* v8_isolate() const { return TMixin::v8_isolate(); }
+ v8::Isolate* isolate() const { return this->v8_isolate(); }
v8::internal::Isolate* i_isolate() const {
- return reinterpret_cast<v8::internal::Isolate*>(v8_isolate());
+ return reinterpret_cast<v8::internal::Isolate*>(this->v8_isolate());
}
- static void SetUpTestCase() { TMixin::SetUpTestCase(); }
- static void TearDownTestCase() { TMixin::TearDownTestCase(); }
-
private:
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
@@ -173,25 +97,23 @@ template <typename TMixin>
class WithContextMixin : public TMixin {
public:
WithContextMixin()
- : context_(Context::New(v8_isolate())), context_scope_(context_) {}
-
- v8::Isolate* v8_isolate() const { return TMixin::v8_isolate(); }
+ : context_(Context::New(this->v8_isolate())), context_scope_(context_) {}
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
Local<Value> RunJS(const char* source) {
return RunJS(
- v8::String::NewFromUtf8(v8_isolate(), source).ToLocalChecked());
+ v8::String::NewFromUtf8(this->v8_isolate(), source).ToLocalChecked());
}
Local<Value> RunJS(v8::String::ExternalOneByteStringResource* source) {
- return RunJS(
- v8::String::NewExternalOneByte(v8_isolate(), source).ToLocalChecked());
+ return RunJS(v8::String::NewExternalOneByte(this->v8_isolate(), source)
+ .ToLocalChecked());
}
v8::Local<v8::String> NewString(const char* string) {
- return v8::String::NewFromUtf8(v8_isolate(), string).ToLocalChecked();
+ return v8::String::NewFromUtf8(this->v8_isolate(), string).ToLocalChecked();
}
void SetGlobalProperty(const char* name, v8::Local<v8::Value> value) {
@@ -201,12 +123,9 @@ class WithContextMixin : public TMixin {
.FromJust());
}
- static void SetUpTestCase() { TMixin::SetUpTestCase(); }
- static void TearDownTestCase() { TMixin::TearDownTestCase(); }
-
private:
Local<Value> RunJS(Local<String> source) {
- auto context = v8_isolate()->GetCurrentContext();
+ auto context = this->v8_isolate()->GetCurrentContext();
Local<Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
return script->Run(context).ToLocalChecked();
@@ -220,17 +139,17 @@ class WithContextMixin : public TMixin {
// Use v8::internal::TestWithIsolate if you are testing internals,
// aka. directly work with Handles.
-using TestWithIsolate = //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+using TestWithIsolate = //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
::testing::Test>>;
// Use v8::internal::TestWithNativeContext if you are testing internals,
// aka. directly work with Handles.
-using TestWithContext = //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+using TestWithContext = //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
::testing::Test>>>;
using TestWithIsolateAndPointerCompression = //
@@ -279,9 +198,6 @@ class WithInternalIsolateMixin : public TMixin {
return isolate()->random_number_generator();
}
- static void SetUpTestCase() { TMixin::SetUpTestCase(); }
- static void TearDownTestCase() { TMixin::TearDownTestCase(); }
-
private:
DISALLOW_COPY_AND_ASSIGN(WithInternalIsolateMixin);
};
@@ -293,9 +209,6 @@ class WithZoneMixin : public TMixin {
Zone* zone() { return &zone_; }
- static void SetUpTestCase() { TMixin::SetUpTestCase(); }
- static void TearDownTestCase() { TMixin::TearDownTestCase(); }
-
private:
v8::internal::AccountingAllocator allocator_;
Zone zone_;
@@ -303,42 +216,41 @@ class WithZoneMixin : public TMixin {
DISALLOW_COPY_AND_ASSIGN(WithZoneMixin);
};
-using TestWithIsolate = //
- WithInternalIsolateMixin< //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+using TestWithIsolate = //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
::testing::Test>>>;
using TestWithZone = WithZoneMixin<::testing::Test>;
-using TestWithIsolateAndZone = //
- WithInternalIsolateMixin< //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
- WithZoneMixin< //
+using TestWithIsolateAndZone = //
+ WithInternalIsolateMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
+ WithZoneMixin< //
::testing::Test>>>>;
-using TestWithNativeContext = //
- WithInternalIsolateMixin< //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+using TestWithNativeContext = //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
::testing::Test>>>>;
using TestWithNativeContextAndCounters = //
WithInternalIsolateMixin< //
WithContextMixin< //
WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
- ::testing::Test, //
- SharedIsolateAndCountersHolder>>>>;
-
-using TestWithNativeContextAndZone = //
- WithZoneMixin< //
- WithInternalIsolateMixin< //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithSharedIsolateMixin< //
+ WithIsolateMixin< //
+ ::testing::Test, kEnableCounters>>>>;
+
+using TestWithNativeContextAndZone = //
+ WithZoneMixin< //
+ WithInternalIsolateMixin< //
+ WithContextMixin< //
+ WithIsolateScopeMixin< //
+ WithIsolateMixin< //
::testing::Test>>>>>;
class SaveFlags {
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 778efc7641..e2d43f7dd4 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -70,6 +70,8 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
+type ExternalPointer
+ generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
type Code extends HeapObject generates 'TNode<Code>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Context extends HeapObject generates 'TNode<Context>';
@@ -334,6 +336,22 @@ TEST(Torque, ConstexprLetBindingDoesNotCrash) {
HasSubstr("Use 'const' instead of 'let' for variable 'foo'"));
}
+TEST(Torque, FailedImplicitCastFromConstexprDoesNotCrash) {
+ ExpectFailingCompilation(
+ R"(
+ extern enum SomeEnum {
+ kValue,
+ ...
+ }
+ macro Foo() {
+ Bar(SomeEnum::kValue);
+ }
+ macro Bar<T: type>(value: T) {}
+ )",
+ HasSubstr(
+ "Cannot find non-constexpr type corresponding to constexpr kValue"));
+}
+
TEST(Torque, DoubleUnderScorePrefixIllegalForIdentifiers) {
ExpectFailingCompilation(R"(
@export macro Foo() {
@@ -625,6 +643,20 @@ TEST(Torque, EnumInTypeswitch) {
}
}
)");
+
+ ExpectSuccessfulCompilation(R"(
+ extern enum MyEnum extends Smi {
+ kA,
+ kB,
+ kC,
+ ...
+ }
+
+ @export
+ macro Test(implicit context: Context)(b: bool): Smi {
+ return b ? MyEnum::kB : MyEnum::kA;
+ }
+)");
}
TEST(Torque, ConstClassFields) {
@@ -755,6 +787,20 @@ TEST(Torque, References) {
HasSubstr("cannot assign to const value"));
}
+TEST(Torque, CatchFirstHandler) {
+ ExpectFailingCompilation(
+ R"(
+ @export
+ macro Test() {
+ try {
+ } label Foo {
+ } catch (e) {}
+ }
+ )",
+ HasSubstr(
+ "catch handler always has to be first, before any label handler"));
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 8d5aac70d9..61bf5d1718 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -48,4 +48,9 @@
'DecompressionOptimizerTest.*': [SKIP],
}], # not pointer_compression
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/unittests/utils/vector-unittest.cc b/deps/v8/test/unittests/utils/vector-unittest.cc
index 75af40d6b5..6f7d6ccac3 100644
--- a/deps/v8/test/unittests/utils/vector-unittest.cc
+++ b/deps/v8/test/unittests/utils/vector-unittest.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
#include "src/utils/utils.h"
+#include "testing/gmock-support.h"
#include "testing/gtest-support.h"
namespace v8 {
@@ -32,11 +35,11 @@ TEST(VectorTest, Factories) {
TEST(VectorTest, Equals) {
auto foo1 = CStrVector("foo");
auto foo2 = ArrayVector("ffoo") + 1;
- CHECK_EQ(4, foo2.size()); // Includes trailing '\0'.
+ EXPECT_EQ(4u, foo2.size()); // Includes trailing '\0'.
foo2.Truncate(foo2.size() - 1);
// This is a requirement for the test.
- CHECK_NE(foo1.begin(), foo2.begin());
- CHECK_EQ(foo1, foo2);
+ EXPECT_NE(foo1.begin(), foo2.begin());
+ EXPECT_EQ(foo1, foo2);
// Compare Vector<char> against Vector<const char>.
char arr1[] = {'a', 'b', 'c'};
@@ -46,31 +49,48 @@ TEST(VectorTest, Equals) {
Vector<const char> vec1_const_char = vec1_char;
Vector<char> vec2_char = ArrayVector(arr2);
Vector<char> vec3_char = ArrayVector(arr3);
- CHECK_NE(vec1_char.begin(), vec2_char.begin());
- // Note: We directly call operator== and operator!= here (without CHECK_EQ or
- // CHECK_NE) to have full control over the arguments.
- CHECK(vec1_char == vec1_const_char);
- CHECK(vec1_char == vec2_char);
- CHECK(vec1_const_char == vec2_char);
- CHECK(vec1_const_char != vec3_char);
- CHECK(vec3_char != vec2_char);
- CHECK(vec3_char != vec1_const_char);
+ EXPECT_NE(vec1_char.begin(), vec2_char.begin());
+ // Note: We directly call operator== and operator!= here (without EXPECT_EQ or
+ // EXPECT_NE) to have full control over the arguments.
+ EXPECT_TRUE(vec1_char == vec1_const_char);
+ EXPECT_TRUE(vec1_char == vec2_char);
+ EXPECT_TRUE(vec1_const_char == vec2_char);
+ EXPECT_TRUE(vec1_const_char != vec3_char);
+ EXPECT_TRUE(vec3_char != vec2_char);
+ EXPECT_TRUE(vec3_char != vec1_const_char);
}
TEST(OwnedVectorConstruction, Equals) {
auto int_vec = OwnedVector<int>::New(4);
- CHECK_EQ(4, int_vec.size());
+ EXPECT_EQ(4u, int_vec.size());
auto find_non_zero = [](int i) { return i != 0; };
- CHECK_EQ(int_vec.end(),
- std::find_if(int_vec.begin(), int_vec.end(), find_non_zero));
+ EXPECT_EQ(int_vec.end(),
+ std::find_if(int_vec.begin(), int_vec.end(), find_non_zero));
constexpr int kInit[] = {4, 11, 3};
auto init_vec1 = OwnedVector<int>::Of(kInit);
// Note: {const int} should also work: We initialize the owned vector, but
// afterwards it's non-modifyable.
auto init_vec2 = OwnedVector<const int>::Of(ArrayVector(kInit));
- CHECK_EQ(init_vec1.as_vector(), ArrayVector(kInit));
- CHECK_EQ(init_vec1.as_vector(), init_vec2.as_vector());
+ EXPECT_EQ(init_vec1.as_vector(), ArrayVector(kInit));
+ EXPECT_EQ(init_vec1.as_vector(), init_vec2.as_vector());
+}
+
+// Test that the constexpr factory methods work.
+TEST(VectorTest, ConstexprFactories) {
+ static constexpr int kInit1[] = {4, 11, 3};
+ static constexpr auto kVec1 = ArrayVector(kInit1);
+ STATIC_ASSERT(kVec1.size() == 3);
+ EXPECT_THAT(kVec1, testing::ElementsAreArray(kInit1));
+
+ static constexpr auto kVec2 = VectorOf(kInit1, 2);
+ STATIC_ASSERT(kVec2.size() == 2);
+ EXPECT_THAT(kVec2, testing::ElementsAre(4, 11));
+
+ static constexpr const char kInit3[] = "foobar";
+ static constexpr auto kVec3 = StaticCharVector(kInit3);
+ STATIC_ASSERT(kVec3.size() == 6);
+ EXPECT_THAT(kVec3, testing::ElementsAreArray(kInit3, kInit3 + 6));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index e242132a14..1968fe633b 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/wasm/leb-helper.h"
#include "test/unittests/test-utils.h"
#include "src/init/v8.h"
@@ -216,9 +217,9 @@ class TestModuleBuilder {
return static_cast<byte>(mod.globals.size() - 1);
}
byte AddSignature(const FunctionSig* sig) {
- mod.signatures.push_back(sig);
- CHECK_LE(mod.signatures.size(), kMaxByteSizedLeb128);
- return static_cast<byte>(mod.signatures.size() - 1);
+ mod.add_signature(sig);
+ CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
+ return static_cast<byte>(mod.types.size() - 1);
}
byte AddFunction(const FunctionSig* sig, bool declared = true) {
mod.functions.push_back({sig, // sig
@@ -238,7 +239,7 @@ class TestModuleBuilder {
}
byte AddException(WasmExceptionSig* sig) {
mod.exceptions.emplace_back(sig);
- CHECK_LE(mod.signatures.size(), kMaxByteSizedLeb128);
+ CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.exceptions.size() - 1);
}
@@ -2398,6 +2399,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheck) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType storage[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2412,6 +2414,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll1) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll2) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType storage[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2426,6 +2429,7 @@ TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll2) {
}
TEST_F(FunctionBodyDecoderTest, Break_TypeCheckAll3) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType storage[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2456,6 +2460,7 @@ TEST_F(FunctionBodyDecoderTest, Break_Unify) {
}
TEST_F(FunctionBodyDecoderTest, BreakIf_cond_type) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType types[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j]};
@@ -2469,6 +2474,7 @@ TEST_F(FunctionBodyDecoderTest, BreakIf_cond_type) {
}
TEST_F(FunctionBodyDecoderTest, BreakIf_val_type) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueType types[] = {kValueTypes[i], kValueTypes[i], kValueTypes[j],
@@ -3628,6 +3634,20 @@ class WasmOpcodeLengthTest : public TestWithZone {
EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code)))
<< PrintOpcodes{code, code + sizeof...(bytes)};
}
+
+ // Helper to check for prefixed opcodes, which can have multiple bytes.
+ void ExpectLengthPrefixed(unsigned operands, WasmOpcode opcode) {
+ uint8_t prefix = (opcode >> 8) & 0xff;
+ DCHECK(WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(prefix)));
+ uint8_t index = opcode & 0xff;
+ uint8_t encoded[2] = {0, 0};
+ uint8_t* p = encoded;
+ unsigned len = static_cast<unsigned>(LEBHelper::sizeof_u32v(index));
+ DCHECK_GE(2, len);
+ LEBHelper::write_u32v(&p, index);
+ // length of index, + number of operands + prefix bye
+ ExpectLength(len + operands + 1, prefix, encoded[0], encoded[1]);
+ }
};
TEST_F(WasmOpcodeLengthTest, Statements) {
@@ -3754,17 +3774,15 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
}
TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
-#define TEST_SIMD(name, opcode, sig) \
- ExpectLength(2, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
+#define TEST_SIMD(name, opcode, sig) ExpectLengthPrefixed(0, kExpr##name);
FOREACH_SIMD_0_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
-#define TEST_SIMD(name, opcode, sig) \
- ExpectLength(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xFF));
+#define TEST_SIMD(name, opcode, sig) ExpectLengthPrefixed(1, kExpr##name);
FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
- ExpectLength(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xFF));
- // test for bad simd opcode
- ExpectLength(2, kSimdPrefix, 0xFF);
+ ExpectLengthPrefixed(16, kExprS8x16Shuffle);
+ // test for bad simd opcode, 0xFF is encoded in two bytes.
+ ExpectLength(3, kSimdPrefix, 0xFF, 0x1);
}
using TypesOfLocals = ZoneVector<ValueType>;
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 68a2bb6ff1..fe93210ca7 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -827,15 +827,15 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(3u, result.value()->signatures.size());
- if (result.value()->signatures.size() == 3) {
- EXPECT_EQ(0u, result.value()->signatures[0]->return_count());
- EXPECT_EQ(1u, result.value()->signatures[1]->return_count());
- EXPECT_EQ(1u, result.value()->signatures[2]->return_count());
-
- EXPECT_EQ(0u, result.value()->signatures[0]->parameter_count());
- EXPECT_EQ(1u, result.value()->signatures[1]->parameter_count());
- EXPECT_EQ(2u, result.value()->signatures[2]->parameter_count());
+ EXPECT_EQ(3u, result.value()->types.size());
+ if (result.value()->types.size() == 3) {
+ EXPECT_EQ(0u, result.value()->signature(0)->return_count());
+ EXPECT_EQ(1u, result.value()->signature(1)->return_count());
+ EXPECT_EQ(1u, result.value()->signature(2)->return_count());
+
+ EXPECT_EQ(0u, result.value()->signature(0)->parameter_count());
+ EXPECT_EQ(1u, result.value()->signature(1)->parameter_count());
+ EXPECT_EQ(2u, result.value()->signature(2)->parameter_count());
}
EXPECT_OFF_END_FAILURE(data, 1);
@@ -1037,7 +1037,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
- EXPECT_EQ(1u, result.value()->signatures.size());
+ EXPECT_EQ(1u, result.value()->types.size());
EXPECT_EQ(1u, result.value()->functions.size());
EXPECT_EQ(1u, result.value()->tables.size());
EXPECT_EQ(1u, result.value()->tables[0].initial_size);
@@ -1123,7 +1123,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.value()->signatures.size());
+ EXPECT_EQ(1u, result.value()->types.size());
EXPECT_EQ(1u, result.value()->functions.size());
EXPECT_EQ(1u, result.value()->tables.size());
EXPECT_EQ(1u, result.value()->tables[0].initial_size);
@@ -1151,7 +1151,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2u, result.value()->signatures.size());
+ EXPECT_EQ(2u, result.value()->types.size());
EXPECT_EQ(4u, result.value()->functions.size());
EXPECT_EQ(1u, result.value()->tables.size());
EXPECT_EQ(8u, result.value()->tables[0].initial_size);
@@ -1487,10 +1487,9 @@ TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
static const byte data[] = {
SIGNATURES_SECTION(1, SIG_ENTRY_v_v),
FUNCTION_SIGNATURES_SECTION(3, 0, 0, 0),
- SECTION_COMPILATION_HINTS(
- BASELINE_TIER_INTERPRETER | TOP_TIER_BASELINE,
- BASELINE_TIER_BASELINE | TOP_TIER_OPTIMIZED,
- BASELINE_TIER_INTERPRETER | TOP_TIER_INTERPRETER),
+ SECTION_COMPILATION_HINTS(BASELINE_TIER_BASELINE | TOP_TIER_BASELINE,
+ BASELINE_TIER_BASELINE | TOP_TIER_OPTIMIZED,
+ BASELINE_TIER_OPTIMIZED | TOP_TIER_OPTIMIZED),
SECTION(Code, ENTRY_COUNT(3), NOP_BODY, NOP_BODY, NOP_BODY),
};
@@ -1500,7 +1499,7 @@ TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
EXPECT_EQ(3u, result.value()->compilation_hints.size());
EXPECT_EQ(WasmCompilationHintStrategy::kDefault,
result.value()->compilation_hints[0].strategy);
- EXPECT_EQ(WasmCompilationHintTier::kInterpreter,
+ EXPECT_EQ(WasmCompilationHintTier::kBaseline,
result.value()->compilation_hints[0].baseline_tier);
EXPECT_EQ(WasmCompilationHintTier::kBaseline,
result.value()->compilation_hints[0].top_tier);
@@ -1512,9 +1511,9 @@ TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
result.value()->compilation_hints[1].top_tier);
EXPECT_EQ(WasmCompilationHintStrategy::kDefault,
result.value()->compilation_hints[2].strategy);
- EXPECT_EQ(WasmCompilationHintTier::kInterpreter,
+ EXPECT_EQ(WasmCompilationHintTier::kOptimized,
result.value()->compilation_hints[2].baseline_tier);
- EXPECT_EQ(WasmCompilationHintTier::kInterpreter,
+ EXPECT_EQ(WasmCompilationHintTier::kOptimized,
result.value()->compilation_hints[2].top_tier);
}
@@ -2429,26 +2428,40 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
TEST_F(WasmModuleVerifyTest, SourceMappingURLSection) {
static const byte data[] = {
+ WASM_MODULE_HEADER,
SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 'y', 'z', '.', 'c')};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ("src/xyz.c", result.value()->source_map_url);
+ EXPECT_EQ(WasmDebugSymbols::Type::SourceMap,
+ result.value()->debug_symbols.type);
+ ModuleWireBytes wire_bytes(data, data + sizeof(data));
+ WasmName external_url =
+ wire_bytes.GetNameOrNull(result.value()->debug_symbols.external_url);
+ EXPECT_EQ("src/xyz.c", std::string(external_url.data(), external_url.size()));
}
TEST_F(WasmModuleVerifyTest, BadSourceMappingURLSection) {
static const byte data[] = {
+ WASM_MODULE_HEADER,
SECTION_SRC_MAP('s', 'r', 'c', '/', 'x', 0xff, 'z', '.', 'c')};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ(0u, result.value()->source_map_url.size());
+ EXPECT_EQ(WasmDebugSymbols::Type::None, result.value()->debug_symbols.type);
+ EXPECT_EQ(0u, result.value()->debug_symbols.external_url.length());
}
TEST_F(WasmModuleVerifyTest, MultipleSourceMappingURLSections) {
- static const byte data[] = {SECTION_SRC_MAP('a', 'b', 'c'),
+ static const byte data[] = {WASM_MODULE_HEADER,
+ SECTION_SRC_MAP('a', 'b', 'c'),
SECTION_SRC_MAP('p', 'q', 'r')};
- ModuleResult result = DecodeModule(data, data + sizeof(data));
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_TRUE(result.ok());
- EXPECT_EQ("abc", result.value()->source_map_url);
+ EXPECT_EQ(WasmDebugSymbols::Type::SourceMap,
+ result.value()->debug_symbols.type);
+ ModuleWireBytes wire_bytes(data, data + sizeof(data));
+ WasmName external_url =
+ wire_bytes.GetNameOrNull(result.value()->debug_symbols.external_url);
+ EXPECT_EQ("abc", std::string(external_url.data(), external_url.size()));
}
TEST_F(WasmModuleVerifyTest, MultipleNameSections) {
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index e0abf7adb4..7311ad39e8 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -71,8 +71,8 @@ TEST_F(DisjointAllocationPoolTest, SimpleExtract) {
a.Merge(b);
CheckPool(a, {{1, 4}});
CHECK_EQ(a.regions().size(), 1);
- CHECK_EQ(a.regions().front().begin(), 1);
- CHECK_EQ(a.regions().front().end(), 5);
+ CHECK_EQ(a.regions().begin()->begin(), 1);
+ CHECK_EQ(a.regions().begin()->end(), 5);
}
TEST_F(DisjointAllocationPoolTest, ExtractAll) {
@@ -111,6 +111,18 @@ TEST_F(DisjointAllocationPoolTest, Merging) {
CheckPool(a, {{10, 15}});
}
+TEST_F(DisjointAllocationPoolTest, MergingFirst) {
+ DisjointAllocationPool a = Make({{10, 5}, {20, 5}});
+ a.Merge({5, 5});
+ CheckPool(a, {{5, 10}, {20, 5}});
+}
+
+TEST_F(DisjointAllocationPoolTest, MergingAbove) {
+ DisjointAllocationPool a = Make({{10, 5}, {25, 5}});
+ a.Merge({20, 5});
+ CheckPool(a, {{10, 5}, {20, 10}});
+}
+
TEST_F(DisjointAllocationPoolTest, MergingMore) {
DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}});
a.Merge({15, 5});
diff --git a/deps/v8/test/unittests/wasm/wasm-gdbserver-unittest.cc b/deps/v8/test/unittests/wasm/wasm-gdbserver-unittest.cc
new file mode 100644
index 0000000000..3d011f4b06
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-gdbserver-unittest.cc
@@ -0,0 +1,275 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "src/debug/wasm/gdb-server/packet.h"
+#include "src/debug/wasm/gdb-server/session.h"
+#include "src/debug/wasm/gdb-server/transport.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace gdb_server {
+
+using ::testing::_;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::StrEq;
+
+class WasmGdbRemoteTest : public ::testing::Test {};
+
+TEST_F(WasmGdbRemoteTest, GdbRemotePacketAddChars) {
+ Packet packet;
+
+ // Read empty packet
+ bool end_of_packet = packet.EndOfPacket();
+ EXPECT_TRUE(end_of_packet);
+
+ // Add raw chars
+ packet.AddRawChar('4');
+ packet.AddRawChar('2');
+
+ std::string str;
+ packet.GetString(&str);
+ EXPECT_EQ("42", str);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemotePacketAddBlock) {
+ static const uint8_t block[] = {0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09};
+ static const size_t kLen = sizeof(block) / sizeof(uint8_t);
+ Packet packet;
+ packet.AddBlock(block, kLen);
+
+ uint8_t buffer[kLen];
+ bool ok = packet.GetBlock(buffer, kLen);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(0, memcmp(block, buffer, kLen));
+
+ packet.Rewind();
+ std::string str;
+ ok = packet.GetString(&str);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ("010203040506070809", str);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemotePacketAddString) {
+ Packet packet;
+ packet.AddHexString("foobar");
+
+ std::string str;
+ bool ok = packet.GetString(&str);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ("666f6f626172", str);
+
+ packet.Clear();
+ packet.AddHexString("GDB");
+ ok = packet.GetString(&str);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ("474442", str);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemotePacketAddNumbers) {
+ Packet packet;
+
+ static const uint64_t u64_val = 0xdeadbeef89abcdef;
+ static const uint8_t u8_val = 0x42;
+ packet.AddNumberSep(u64_val, ';');
+ packet.AddWord8(u8_val);
+
+ std::string str;
+ packet.GetString(&str);
+ EXPECT_EQ("deadbeef89abcdef;42", str);
+
+ packet.Rewind();
+ uint64_t val = 0;
+ char sep = '\0';
+ bool ok = packet.GetNumberSep(&val, &sep);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(u64_val, val);
+ uint8_t b = 0;
+ ok = packet.GetWord8(&b);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(u8_val, b);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemotePacketSequenceNumber) {
+ Packet packet_with_sequence_num;
+ packet_with_sequence_num.AddWord8(42);
+ packet_with_sequence_num.AddRawChar(':');
+ packet_with_sequence_num.AddHexString("foobar");
+
+ int32_t sequence_num = 0;
+ packet_with_sequence_num.ParseSequence();
+ bool ok = packet_with_sequence_num.GetSequence(&sequence_num);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(42, sequence_num);
+
+ Packet packet_without_sequence_num;
+ packet_without_sequence_num.AddHexString("foobar");
+
+ packet_without_sequence_num.ParseSequence();
+ ok = packet_without_sequence_num.GetSequence(&sequence_num);
+ EXPECT_FALSE(ok);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemotePacketRunLengthEncoded) {
+ Packet packet1;
+ packet1.AddRawChar('0');
+ packet1.AddRawChar('*');
+ packet1.AddRawChar(' ');
+
+ std::string str1;
+ bool ok = packet1.GetHexString(&str1);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ("0000", std::string(packet1.GetPayload()));
+
+ Packet packet2;
+ packet2.AddRawChar('1');
+ packet2.AddRawChar('2');
+ packet2.AddRawChar('3');
+ packet2.AddRawChar('*');
+ packet2.AddRawChar(' ');
+ packet2.AddRawChar('a');
+ packet2.AddRawChar('b');
+
+ std::string str2;
+ ok = packet2.GetHexString(&str2);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ("123333ab", std::string(packet2.GetPayload()));
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemoteUtilStringSplit) {
+ std::vector<std::string> parts1 = StringSplit({}, ",");
+ EXPECT_EQ(size_t(0), parts1.size());
+
+ auto parts2 = StringSplit("a", nullptr);
+ EXPECT_EQ(size_t(1), parts2.size());
+ EXPECT_EQ("a", parts2[0]);
+
+ auto parts3 = StringSplit(";a;bc;def;", ",");
+ EXPECT_EQ(size_t(1), parts3.size());
+ EXPECT_EQ(";a;bc;def;", parts3[0]);
+
+ auto parts4 = StringSplit(";a;bc;def;", ";");
+ EXPECT_EQ(size_t(3), parts4.size());
+ EXPECT_EQ("a", parts4[0]);
+ EXPECT_EQ("bc", parts4[1]);
+ EXPECT_EQ("def", parts4[2]);
+}
+
+class MockTransport : public TransportBase {
+ public:
+ MOCK_METHOD0(AcceptConnection, bool());
+ MOCK_METHOD2(Read, bool(char*, int32_t));
+ MOCK_METHOD2(Write, bool(const char*, int32_t));
+ MOCK_METHOD0(IsDataAvailable, bool());
+ MOCK_CONST_METHOD0(IsDataAvailable, bool());
+ MOCK_METHOD0(Disconnect, void());
+ MOCK_METHOD0(Close, void());
+ MOCK_METHOD0(WaitForDebugStubEvent, void());
+ MOCK_METHOD0(SignalThreadEvent, bool());
+};
+
+TEST_F(WasmGdbRemoteTest, GdbRemoteSessionSendPacket) {
+ const char* ack_buffer = "+";
+
+ MockTransport mock_transport;
+ EXPECT_CALL(mock_transport, Write(StrEq("$474442#39"), 10))
+ .WillOnce(Return(true));
+ EXPECT_CALL(mock_transport, Read(_, _))
+ .Times(1)
+ .WillOnce(
+ DoAll(SetArrayArgument<0>(ack_buffer, ack_buffer + 1), Return(true)));
+
+ Session session(&mock_transport);
+
+ Packet packet;
+ packet.AddHexString("GDB");
+ bool ok = session.SendPacket(&packet);
+ EXPECT_TRUE(ok);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemoteSessionSendPacketDisconnectOnNoAck) {
+ MockTransport mock_transport;
+ EXPECT_CALL(mock_transport, Write(StrEq("$474442#39"), 10))
+ .Times(1)
+ .WillOnce(Return(true));
+ EXPECT_CALL(mock_transport, Read(_, _)).Times(1).WillOnce(Return(false));
+ EXPECT_CALL(mock_transport, Disconnect()).Times(1);
+
+ Session session(&mock_transport);
+
+ Packet packet;
+ packet.AddHexString("GDB");
+ bool ok = session.SendPacket(&packet);
+ EXPECT_FALSE(ok);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemoteSessionGetPacketCheckChecksum) {
+ const char* buffer_bad = "$47#00";
+ const char* buffer_ok = "$47#6b";
+
+ MockTransport mock_transport;
+ EXPECT_CALL(mock_transport, Read(_, _))
+ .WillOnce(
+ DoAll(SetArrayArgument<0>(buffer_bad, buffer_bad + 1), Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_bad + 1, buffer_bad + 2),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_bad + 2, buffer_bad + 3),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_bad + 3, buffer_bad + 4),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_bad + 4, buffer_bad + 5),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_bad + 5, buffer_bad + 6),
+ Return(true)))
+ .WillOnce(
+ DoAll(SetArrayArgument<0>(buffer_ok, buffer_ok + 1), Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_ok + 1, buffer_ok + 2),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_ok + 2, buffer_ok + 3),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_ok + 3, buffer_ok + 4),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_ok + 4, buffer_ok + 5),
+ Return(true)))
+ .WillOnce(DoAll(SetArrayArgument<0>(buffer_ok + 5, buffer_ok + 6),
+ Return(true)));
+ EXPECT_CALL(mock_transport, Write(StrEq("-"), 1)) // Signal bad packet
+ .Times(1)
+ .WillOnce(Return(true));
+ EXPECT_CALL(mock_transport, Write(StrEq("+"), 1)) // Signal ack
+ .Times(1)
+ .WillOnce(Return(true));
+
+ Session session(&mock_transport);
+
+ Packet packet;
+ bool ok = session.GetPacket(&packet);
+ EXPECT_TRUE(ok);
+ char ch;
+ ok = packet.GetBlock(&ch, 1);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ('G', ch);
+}
+
+TEST_F(WasmGdbRemoteTest, GdbRemoteSessionGetPacketDisconnectOnReadFailure) {
+ MockTransport mock_transport;
+ EXPECT_CALL(mock_transport, Read(_, _)).Times(1).WillOnce(Return(false));
+ EXPECT_CALL(mock_transport, Disconnect()).Times(1);
+
+ Session session(&mock_transport);
+ Packet packet;
+ bool ok = session.GetPacket(&packet);
+ EXPECT_FALSE(ok);
+}
+
+} // namespace gdb_server
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc b/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc
deleted file mode 100644
index 12739ff44f..0000000000
--- a/deps/v8/test/unittests/wasm/wasm-opcodes-unittest.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/unittests/test-utils.h"
-
-#include "src/wasm/wasm-opcodes.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class WasmOpcodesTest : public TestWithZone {};
-
-TEST_F(WasmOpcodesTest, EveryOpcodeHasAName) {
- static const struct {
- WasmOpcode opcode;
- const char* debug_name;
- } kValues[] = {
-#define DECLARE_ELEMENT(name, opcode, sig) {kExpr##name, "kExpr" #name},
- FOREACH_OPCODE(DECLARE_ELEMENT)};
-#undef DECLARE_ELEMENT
-
- for (size_t i = 0; i < arraysize(kValues); i++) {
- const char* result = WasmOpcodes::OpcodeName(kValues[i].opcode);
- if (strcmp("unknown", result) == 0) {
- EXPECT_TRUE(false) << "WasmOpcodes::OpcodeName(" << kValues[i].debug_name
- << ") == \"unknown\";"
- " plazz halp in src/wasm/wasm-opcodes.cc";
- }
- }
-}
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
index 350a425d47..8d53b767bd 100644
--- a/deps/v8/test/wasm-api-tests/callbacks.cc
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -31,8 +31,8 @@ own<Trap> Stage4_GC(void* env, const Val args[], Val results[]) {
printf("Stage4...\n");
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env);
isolate->heap()->PreciseCollectAllGarbage(
- i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
- v8::kGCCallbackFlagForced);
+ i::Heap::kForcedGC, i::GarbageCollectionReason::kTesting,
+ v8::kNoGCCallbackFlags);
results[0] = Val::i32(args[0].i32() + 1);
return nullptr;
}
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-tests.status b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
index a5d554103f..05488c1711 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-tests.status
+++ b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
@@ -3,9 +3,15 @@
# found in the LICENSE file.
[
+
['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'*': [SKIP],
}], # lite_mode or variant == jitless
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index a4cb262c33..61e55477f9 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -31,10 +31,6 @@ proposal_flags = [{
'name': 'JS-BigInt-integration',
'flags': ['--experimental-wasm-bigint']
},
- {
- 'name': 'multi-value',
- 'flags': ['--experimental-wasm-mv']
- },
]
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index a1ad6de73f..a1fa738d8c 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-6e091ed0e8ef57cdbfa46265b0001f19e3bb0f20 \ No newline at end of file
+39f768b7ebbd39747e7e64415214a0c5f15a2d43 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index b8fe4c068a..2dec35869a 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -3,16 +3,18 @@
# found in the LICENSE file.
[
+
[ALWAYS, {
# These are slow, and not useful to run for the proposals:
'proposals/reference-types/limits': [SKIP],
- 'proposals/multi-value/limits': [SKIP],
'proposals/bulk-memory-operations/limits': [SKIP],
'proposals/JS-BigInt-integration/limits': [SKIP],
'proposals/js-types/limits': [SKIP],
# TODO(v8:9673): Enable these spec tests once they exist, and the out-dated
# tests have been removed.
'proposals/JS-BigInt-integration/module/params-long': [FAIL],
+ # TODO(wasm): Update memory limit.
+ 'limits': [FAIL],
}], # ALWAYS
['arch == s390 or arch == s390x or system == aix', {
@@ -33,4 +35,9 @@
'*': [SKIP],
}], # lite_mode or variant == jitless
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 29c80e8ddc..4ed010abc7 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -24,11 +24,6 @@ proposal_flags = [{
'name': 'JS-BigInt-integration',
'flags': ['--experimental-wasm-bigint']
},
- {
- 'name': 'multi-value',
- 'flags': ['--experimental-wasm-mv',
- '--no-experimental-wasm-bulk-memory']
- },
]
class TestLoader(testsuite.JSTestLoader):
@@ -54,7 +49,8 @@ class TestCase(testcase.D8TestCase):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
- return []
+ # TODO(thibaudm): Remove the flag once multi-value is shipped in V8.
+ return ['--experimental-wasm-mv']
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 9f6ff04f64..2ee77e0123 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-acdfb34955e3b0e5031890aebaf552782f38f0f3 \ No newline at end of file
+5e319c4079c815b7d6fbc0b11bdfd3c0720caa0a \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 84daefc36d..d82c2f218f 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -20,10 +20,9 @@
'proposals/js-types/globals': [FAIL],
'proposals/js-types/linking': [FAIL],
- # TODO(thibaudm): Spec tests do not check multi-return functions correctly.
- 'proposals/multi-value/call': [FAIL],
- 'proposals/multi-value/if': [FAIL],
- 'proposals/multi-value/func': [FAIL],
+ # TODO(wasm): This test declares a table larger than allowed by the spec.
+ 'table': [FAIL],
+ 'proposals/reference-types/table': [FAIL],
}], # ALWAYS
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
@@ -47,13 +46,10 @@
'proposals/JS-BigInt-integration/f64': [SKIP],
'proposals/bulk-memory-operations/f32': [SKIP],
'proposals/bulk-memory-operations/f64': [SKIP],
- 'proposals/multi-value/f32': [SKIP],
- 'proposals/multi-value/f64': [SKIP],
'proposals/reference-types/f32': [SKIP],
'proposals/reference-types/f64': [SKIP],
'proposals/JS-BigInt-integration/float_misc': [SKIP],
'proposals/bulk-memory-operations/float_misc': [SKIP],
- 'proposals/multi-value/float_misc': [SKIP],
'proposals/reference-types/float_misc': [SKIP],
}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
@@ -73,8 +69,6 @@
'proposals/JS-BigInt-integration/f64': [SKIP],
'proposals/bulk-memory-operations/f32': [SKIP],
'proposals/bulk-memory-operations/f64': [SKIP],
- 'proposals/multi-value/f32': [SKIP],
- 'proposals/multi-value/f64': [SKIP],
'proposals/reference-types/f32': [SKIP],
'proposals/reference-types/f64': [SKIP],
# This test fails because ppc float to double doesn't convert sNaN to qNaN.
@@ -82,7 +76,6 @@
'proposals/js-types/conversions': [SKIP],
'proposals/JS-BigInt-integration/conversions': [SKIP],
'proposals/bulk-memory-operations/conversions': [SKIP],
- 'proposals/multi-value/conversions': [SKIP],
'proposals/reference-types/conversions': [SKIP],
}], # 'arch == ppc or arch == ppc64'
@@ -96,8 +89,6 @@
'proposals/JS-BigInt-integration/f64': [SKIP],
'proposals/bulk-memory-operations/f32': [SKIP],
'proposals/bulk-memory-operations/f64': [SKIP],
- 'proposals/multi-value/f32': [SKIP],
- 'proposals/multi-value/f64': [SKIP],
'proposals/reference-types/f32': [SKIP],
'proposals/reference-types/f64': [SKIP],
}], # 'arch == s390 or arch == s390x'
@@ -108,5 +99,9 @@
'*': [SKIP],
}], # lite_mode or variant == jitless
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
]
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index b2938c8921..162d843bda 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -136,5 +136,9 @@
'fast/js/string-capitalization': [FAIL],
}], # variant == no_wasm_traps
-##############################################################################
+################################################################################
+['variant == stress_snapshot', {
+ '*': [SKIP], # only relevant for mjsunit tests.
+}],
+
]
diff --git a/deps/v8/testing/gmock/BUILD.gn b/deps/v8/testing/gmock/BUILD.gn
index 0e203cb6ee..de5ae53909 100644
--- a/deps/v8/testing/gmock/BUILD.gn
+++ b/deps/v8/testing/gmock/BUILD.gn
@@ -12,13 +12,10 @@ source_set("gmock") {
testonly = true
sources = [
"include/gmock/gmock-actions.h",
- "include/gmock/gmock-generated-function-mockers.h",
"include/gmock/gmock-matchers.h",
"include/gmock/gmock.h",
]
- deps = [
- "//third_party/googletest:gmock",
- ]
+ deps = [ "//third_party/googletest:gmock" ]
public_configs = [
"//third_party/googletest:gmock_config",
@@ -31,7 +28,5 @@ source_set("gmock") {
# into //third_party/googletest.
source_set("gmock_main") {
testonly = true
- deps = [
- "//third_party/googletest:gmock_main",
- ]
+ deps = [ "//third_party/googletest:gmock_main" ]
}
diff --git a/deps/v8/testing/gmock/include/gmock/gmock-generated-function-mockers.h b/deps/v8/testing/gmock/include/gmock/gmock-generated-function-mockers.h
deleted file mode 100644
index b9986c7b11..0000000000
--- a/deps/v8/testing/gmock/include/gmock/gmock-generated-function-mockers.h
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The file/directory layout of Google Test is not yet considered stable. Until
-// it stabilizes, Chromium code will use forwarding headers in testing/gtest
-// and testing/gmock, instead of directly including files in
-// third_party/googletest.
-
-#include "third_party/googletest/src/googlemock/include/gmock/gmock-generated-function-mockers.h"
diff --git a/deps/v8/third_party/googletest/BUILD.gn b/deps/v8/third_party/googletest/BUILD.gn
index f105d1a6b4..cfa9205547 100644
--- a/deps/v8/third_party/googletest/BUILD.gn
+++ b/deps/v8/third_party/googletest/BUILD.gn
@@ -96,8 +96,8 @@ source_set("gtest") {
if (is_fuchsia) {
deps += [
- "//third_party/fuchsia-sdk/sdk:fdio",
- "//third_party/fuchsia-sdk/sdk:zx",
+ "//third_party/fuchsia-sdk/sdk/pkg/fdio",
+ "//third_party/fuchsia-sdk/sdk/pkg/zx",
]
}
}
@@ -106,12 +106,8 @@ source_set("gtest") {
# See README.chromium for details.
source_set("gtest_main") {
testonly = true
- sources = [
- "src/googletest/src/gtest_main.cc",
- ]
- deps = [
- ":gtest",
- ]
+ sources = [ "src/googletest/src/gtest_main.cc" ]
+ deps = [ ":gtest" ]
}
# Do NOT depend on this directly. Use //testing/gmock:gmock_main instead.
@@ -123,8 +119,6 @@ source_set("gmock") {
"src/googlemock/include/gmock/gmock-cardinalities.h",
"src/googlemock/include/gmock/gmock-function-mocker.h",
"src/googlemock/include/gmock/gmock-generated-actions.h",
- "src/googlemock/include/gmock/gmock-generated-function-mockers.h",
- "src/googlemock/include/gmock/gmock-generated-matchers.h",
"src/googlemock/include/gmock/gmock-generated-nice-strict.h",
"src/googlemock/include/gmock/gmock-matchers.h",
"src/googlemock/include/gmock/gmock-more-actions.h",
@@ -158,10 +152,6 @@ source_set("gmock") {
# See README.chromium for details.
static_library("gmock_main") {
testonly = true
- sources = [
- "src/googlemock/src/gmock_main.cc",
- ]
- deps = [
- ":gmock",
- ]
+ sources = [ "src/googlemock/src/gmock_main.cc" ]
+ deps = [ ":gmock" ]
}
diff --git a/deps/v8/third_party/jinja2/README.chromium b/deps/v8/third_party/jinja2/README.chromium
index 5246c2f84b..9b8311a675 100644
--- a/deps/v8/third_party/jinja2/README.chromium
+++ b/deps/v8/third_party/jinja2/README.chromium
@@ -2,6 +2,7 @@ Name: Jinja2 Python Template Engine
Short Name: jinja2
URL: http://jinja.pocoo.org/
Version: 2.10
+CPEPrefix: cpe:/a:pocoo:jinja2:2.10
License: BSD 3-Clause
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/jsoncpp/BUILD.gn b/deps/v8/third_party/jsoncpp/BUILD.gn
new file mode 100644
index 0000000000..411d2d62e7
--- /dev/null
+++ b/deps/v8/third_party/jsoncpp/BUILD.gn
@@ -0,0 +1,50 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+config("jsoncpp_config") {
+ include_dirs = [
+ "source/include",
+ "generated",
+ ]
+
+ # TODO(crbug.com/983223): Update JsonCpp BUILD.gn to remove deprecated
+ # declaration flag.
+ # This temporary flag allowing clients to update to the new version, and then
+ # update to the new StreamWriter and CharReader classes.
+ if (!is_win || is_clang) {
+ cflags_cc = [ "-Wno-deprecated-declarations" ]
+ }
+}
+
+source_set("jsoncpp") {
+ sources = [
+ "generated/version.h",
+ "source/include/json/assertions.h",
+ "source/include/json/autolink.h",
+ "source/include/json/config.h",
+ "source/include/json/features.h",
+ "source/include/json/forwards.h",
+ "source/include/json/json.h",
+ "source/include/json/reader.h",
+ "source/include/json/value.h",
+ "source/include/json/writer.h",
+ "source/src/lib_json/json_reader.cpp",
+ "source/src/lib_json/json_tool.h",
+ "source/src/lib_json/json_value.cpp",
+ "source/src/lib_json/json_writer.cpp",
+ ]
+
+ public_configs = [ ":jsoncpp_config" ]
+
+ defines = [
+ "JSON_USE_EXCEPTION=0",
+ "JSON_USE_NULLREF=0",
+ ]
+
+ include_dirs = [ "source/src/lib_json" ]
+
+ if (!is_win || is_clang) {
+ cflags_cc = [ "-Wno-implicit-fallthrough" ]
+ }
+}
diff --git a/deps/v8/third_party/jsoncpp/LICENSE b/deps/v8/third_party/jsoncpp/LICENSE
new file mode 100644
index 0000000000..ca2bfe1a03
--- /dev/null
+++ b/deps/v8/third_party/jsoncpp/LICENSE
@@ -0,0 +1,55 @@
+The JsonCpp library's source code, including accompanying documentation,
+tests and demonstration applications, are licensed under the following
+conditions...
+
+The author (Baptiste Lepilleur) explicitly disclaims copyright in all
+jurisdictions which recognize such a disclaimer. In such jurisdictions,
+this software is released into the Public Domain.
+
+In jurisdictions which do not recognize Public Domain property (e.g. Germany as of
+2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is
+released under the terms of the MIT License (see below).
+
+In jurisdictions which recognize Public Domain property, the user of this
+software may choose to accept it either as 1) Public Domain, 2) under the
+conditions of the MIT License (see below), or 3) under the terms of dual
+Public Domain/MIT License conditions described here, as they choose.
+
+The MIT License is about as close to Public Domain as a license can get, and is
+described in clear, concise terms at:
+
+ http://en.wikipedia.org/wiki/MIT_License
+
+The full text of the MIT License follows:
+
+========================================================================
+Copyright (c) 2007-2010 Baptiste Lepilleur
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use, copy,
+modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+========================================================================
+(END LICENSE TEXT)
+
+The MIT license is compatible with both the GPL and commercial
+software, affording one all of the rights of Public Domain with the
+minor nuisance of being required to keep the above copyright notice
+and license text in the source code. Note also that by accepting the
+Public Domain "license" you can re-license your copy using whatever
+license you like.
diff --git a/deps/v8/third_party/jsoncpp/README.chromium b/deps/v8/third_party/jsoncpp/README.chromium
new file mode 100644
index 0000000000..48bc543f41
--- /dev/null
+++ b/deps/v8/third_party/jsoncpp/README.chromium
@@ -0,0 +1,16 @@
+Name: jsoncpp
+URL: https://github.com/open-source-parsers/jsoncpp
+Version: f572e8e42e22cfcf5ab0aea26574f408943edfa4
+License: MIT
+License File: LICENSE
+Security Critical: yes
+
+Description:
+JsonCpp is used by multiple projects for parsing and generating JSON data. This
+project is mirrored here from the public GitHub project, with a custom BUILD.gn
+to allow for building with our Ninja + GN configuration. The main project uses
+Meson or CMake for building.
+
+Note: to update this project to a new version, regenerating the version.h header
+is required. This can be done by installing either CMake or Meson, building the
+project, and copying the generated version.h to the generated/ subfolder.
diff --git a/deps/v8/third_party/jsoncpp/generated/version.h b/deps/v8/third_party/jsoncpp/generated/version.h
new file mode 100644
index 0000000000..9a760821e2
--- /dev/null
+++ b/deps/v8/third_party/jsoncpp/generated/version.h
@@ -0,0 +1,22 @@
+// DO NOT EDIT. This file (and "version") is a template used by the build system
+// (either CMake or Meson) to generate a "version.h" header file.
+#ifndef JSON_VERSION_H_INCLUDED
+#define JSON_VERSION_H_INCLUDED
+
+#define JSONCPP_VERSION_STRING "1.9.0"
+#define JSONCPP_VERSION_MAJOR 1
+#define JSONCPP_VERSION_MINOR 9
+#define JSONCPP_VERSION_PATCH 0
+#define JSONCPP_VERSION_QUALIFIER
+#define JSONCPP_VERSION_HEXA \
+ ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | \
+ (JSONCPP_VERSION_PATCH << 8))
+
+#ifdef JSONCPP_USING_SECURE_MEMORY
+#undef JSONCPP_USING_SECURE_MEMORY
+#endif
+#define JSONCPP_USING_SECURE_MEMORY 0
+// If non-zero, the library zeroes any memory that it has allocated before
+// it frees its memory.
+
+#endif // JSON_VERSION_H_INCLUDED
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 2d9f33a312..03af91af95 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -14,1399 +14,1385 @@
// https://github.com/python/cpython/blob/master/Objects/listsort.txt
namespace array {
- class SortState extends HeapObject {
- macro Compare(implicit context: Context)(x: JSAny, y: JSAny): Number {
- const sortCompare: CompareBuiltinFn = this.sortComparePtr;
- return sortCompare(context, this.userCmpFn, x, y);
- }
+class SortState extends HeapObject {
+ macro Compare(implicit context: Context)(x: JSAny, y: JSAny): Number {
+ const sortCompare: CompareBuiltinFn = this.sortComparePtr;
+ return sortCompare(context, this.userCmpFn, x, y);
+ }
- macro CheckAccessor(implicit context: Context)() labels Bailout {
- const canUseSameAccessorFn: CanUseSameAccessorFn =
- this.canUseSameAccessorFn;
+ macro CheckAccessor(implicit context: Context)() labels Bailout {
+ if (!IsFastJSArray(this.receiver, context)) goto Bailout;
- if (!canUseSameAccessorFn(
- context, this.receiver, this.initialReceiverMap,
- this.initialReceiverLength)) {
- goto Bailout;
- }
- }
+ const canUseSameAccessorFn: CanUseSameAccessorFn =
+ this.canUseSameAccessorFn;
- macro ResetToGenericAccessor() {
- this.loadFn = Load<GenericElementsAccessor>;
- this.storeFn = Store<GenericElementsAccessor>;
- this.deleteFn = Delete<GenericElementsAccessor>;
+ if (!canUseSameAccessorFn(
+ context, this.receiver, this.initialReceiverMap,
+ this.initialReceiverLength)) {
+ goto Bailout;
}
+ }
- // The receiver of the Array.p.sort call.
- receiver: JSReceiver;
-
- // The initial map and length of the receiver. After calling into JS, these
- // are reloaded and checked. If they changed we bail to the baseline
- // GenericElementsAccessor.
- initialReceiverMap: Map;
- initialReceiverLength: Number;
-
- // If the user provided a comparison function, it is stored here.
- userCmpFn: Undefined|Callable;
-
- // Function pointer to the comparison function. This can either be a builtin
- // that calls the user-provided comparison function or "SortDefault", which
- // uses ToString and a lexicographical compare.
- sortComparePtr: CompareBuiltinFn;
-
- // The following four function pointer represent a Accessor/Path.
- // These are used to Load/Store/Delete elements and to check whether
- // to bail to the baseline GenericElementsAccessor.
- loadFn: LoadFn;
- storeFn: StoreFn;
- deleteFn: DeleteFn;
- canUseSameAccessorFn: CanUseSameAccessorFn;
-
- // This controls when we get *into* galloping mode. It's initialized to
- // kMinGallop. mergeLow and mergeHigh tend to nudge it higher for random
- // data, and lower for highly structured data.
- minGallop: Smi;
-
- // A stack of sortState.pendingRunsSize pending runs yet to be merged.
- // Run #i starts at sortState.pendingRuns[2 * i] and extends for
- // sortState.pendingRuns[2 * i + 1] elements:
- //
- // [..., base (i-1), length (i-1), base i, length i]
- //
- // It's always true (so long as the indices are in bounds) that
- //
- // base of run #i + length of run #i == base of run #i + 1
- //
- pendingRunsSize: Smi;
- pendingRuns: FixedArray;
+ macro ResetToGenericAccessor() {
+ this.loadFn = Load<GenericElementsAccessor>;
+ this.storeFn = Store<GenericElementsAccessor>;
+ this.deleteFn = Delete<GenericElementsAccessor>;
+ }
- // This is a copy of the original array/object that needs sorting.
- // workArray is never exposed to user-code, and as such cannot change
- // shape and won't be left-trimmed.
- workArray: FixedArray;
+ // The receiver of the Array.p.sort call.
+ receiver: JSReceiver;
+
+ // The initial map and length of the receiver. After calling into JS, these
+ // are reloaded and checked. If they changed we bail to the baseline
+ // GenericElementsAccessor.
+ initialReceiverMap: Map;
+ initialReceiverLength: Number;
+
+ // If the user provided a comparison function, it is stored here.
+ userCmpFn: Undefined|Callable;
+
+ // Function pointer to the comparison function. This can either be a builtin
+ // that calls the user-provided comparison function or "SortDefault", which
+ // uses ToString and a lexicographical compare.
+ sortComparePtr: CompareBuiltinFn;
+
+ // The following four function pointer represent a Accessor/Path.
+ // These are used to Load/Store/Delete elements and to check whether
+ // to bail to the baseline GenericElementsAccessor.
+ loadFn: LoadFn;
+ storeFn: StoreFn;
+ deleteFn: DeleteFn;
+ canUseSameAccessorFn: CanUseSameAccessorFn;
+
+ // This controls when we get *into* galloping mode. It's initialized to
+ // kMinGallop. mergeLow and mergeHigh tend to nudge it higher for random
+ // data, and lower for highly structured data.
+ minGallop: Smi;
+
+ // A stack of sortState.pendingRunsSize pending runs yet to be merged.
+ // Run #i starts at sortState.pendingRuns[2 * i] and extends for
+ // sortState.pendingRuns[2 * i + 1] elements:
+ //
+ // [..., base (i-1), length (i-1), base i, length i]
+ //
+ // It's always true (so long as the indices are in bounds) that
+ //
+ // base of run #i + length of run #i == base of run #i + 1
+ //
+ pendingRunsSize: Smi;
+ pendingRuns: FixedArray;
- // Pointer to the temporary array.
- tempArray: FixedArray;
+ // This is a copy of the original array/object that needs sorting.
+ // workArray is never exposed to user-code, and as such cannot change
+ // shape and won't be left-trimmed.
+ workArray: FixedArray;
- // The initialReceiverLength converted and clamped to Smi.
- sortLength: Smi;
+ // Pointer to the temporary array.
+ tempArray: FixedArray;
- // The number of undefined that need to be inserted after sorting
- // when the elements are copied back from the workArray to the receiver.
- numberOfUndefined: Smi;
- }
+ // The initialReceiverLength converted and clamped to Smi.
+ sortLength: Smi;
- type FastSmiElements extends ElementsKind;
- type FastObjectElements extends ElementsKind;
-
- // With the pre-processing step in Torque, the exact number of elements
- // to sort is unknown at the time the sort state is created.
- // The 'length' property is an upper bound (as per spec),
- // while the actual size of the backing store is a good guess.
- // After the pre-processing step, the workarray won't change in length.
- macro CalculateWorkArrayLength(
- receiver: JSReceiver, initialReceiverLength: Number): intptr {
- // TODO(szuend): Implement full range sorting, not only up to MaxSmi.
- // https://crbug.com/v8/7970.
- let clampedReceiverLength: uintptr;
- try {
- clampedReceiverLength =
- ChangeSafeIntegerNumberToUintPtr(initialReceiverLength)
- otherwise UIntPtrOverflow;
- if (clampedReceiverLength > kSmiMaxValue) {
- clampedReceiverLength = kSmiMaxValue;
- }
- }
- label UIntPtrOverflow {
+ // The number of undefined that need to be inserted after sorting
+ // when the elements are copied back from the workArray to the receiver.
+ numberOfUndefined: Smi;
+}
+
+type FastSmiElements extends ElementsKind;
+type FastObjectElements extends ElementsKind;
+
+// With the pre-processing step in Torque, the exact number of elements
+// to sort is unknown at the time the sort state is created.
+// The 'length' property is an upper bound (as per spec),
+// while the actual size of the backing store is a good guess.
+// After the pre-processing step, the workarray won't change in length.
+macro CalculateWorkArrayLength(
+ receiver: JSReceiver, initialReceiverLength: Number): intptr {
+ // TODO(szuend): Implement full range sorting, not only up to MaxSmi.
+ // https://crbug.com/v8/7970.
+ let clampedReceiverLength: uintptr;
+ try {
+ clampedReceiverLength =
+ ChangeSafeIntegerNumberToUintPtr(initialReceiverLength)
+ otherwise UIntPtrOverflow;
+ if (clampedReceiverLength > kSmiMaxValue) {
clampedReceiverLength = kSmiMaxValue;
}
+ } label UIntPtrOverflow {
+ clampedReceiverLength = kSmiMaxValue;
+ }
- let workArrayLength: intptr = Convert<intptr>(clampedReceiverLength);
- try {
- const object = Cast<JSObject>(receiver) otherwise NoJsObject;
- const elementsLength = Convert<intptr>(object.elements.length);
+ let workArrayLength: intptr = Convert<intptr>(clampedReceiverLength);
+ try {
+ const object = Cast<JSObject>(receiver) otherwise NoJsObject;
+ const elementsLength = Convert<intptr>(object.elements.length);
- // In some cases, elements are only on prototypes, but not on the receiver
- // itself. Do nothing then, as {workArrayLength} got initialized with the
- // {length} property.
- if (elementsLength != 0) {
- workArrayLength = IntPtrMin(workArrayLength, elementsLength);
- }
+ // In some cases, elements are only on prototypes, but not on the receiver
+ // itself. Do nothing then, as {workArrayLength} got initialized with the
+ // {length} property.
+ if (elementsLength != 0) {
+ workArrayLength = IntPtrMin(workArrayLength, elementsLength);
}
- label NoJsObject {}
+ } label NoJsObject {}
- return workArrayLength;
- }
+ return workArrayLength;
+}
- transitioning macro NewSortState(implicit context: Context)(
- receiver: JSReceiver, comparefn: Undefined|Callable,
- initialReceiverLength: Number): SortState {
- const sortComparePtr =
- comparefn != Undefined ? SortCompareUserFn : SortCompareDefault;
- const map = receiver.map;
- let loadFn: LoadFn;
- let storeFn: StoreFn;
- let deleteFn: DeleteFn;
- let canUseSameAccessorFn: CanUseSameAccessorFn;
-
- try {
- const a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
-
- // Copy copy-on-write (COW) arrays.
- array::EnsureWriteableFastElements(a);
-
- const elementsKind: ElementsKind = map.elements_kind;
- if (IsDoubleElementsKind(elementsKind)) {
- loadFn = Load<FastDoubleElements>;
- storeFn = Store<FastDoubleElements>;
- deleteFn = Delete<FastDoubleElements>;
- canUseSameAccessorFn = CanUseSameAccessor<FastDoubleElements>;
- } else if (IsFastSmiElementsKind(elementsKind)) {
- loadFn = Load<FastSmiElements>;
- storeFn = Store<FastSmiElements>;
- deleteFn = Delete<FastSmiElements>;
- canUseSameAccessorFn = CanUseSameAccessor<FastSmiElements>;
- } else {
- loadFn = Load<FastObjectElements>;
- storeFn = Store<FastObjectElements>;
- deleteFn = Delete<FastObjectElements>;
- canUseSameAccessorFn = CanUseSameAccessor<FastObjectElements>;
- }
- }
- label Slow {
- loadFn = Load<GenericElementsAccessor>;
- storeFn = Store<GenericElementsAccessor>;
- deleteFn = Delete<GenericElementsAccessor>;
- canUseSameAccessorFn = CanUseSameAccessor<GenericElementsAccessor>;
+transitioning macro NewSortState(implicit context: Context)(
+ receiver: JSReceiver, comparefn: Undefined|Callable,
+ initialReceiverLength: Number): SortState {
+ const sortComparePtr =
+ comparefn != Undefined ? SortCompareUserFn : SortCompareDefault;
+ const map = receiver.map;
+ let loadFn: LoadFn;
+ let storeFn: StoreFn;
+ let deleteFn: DeleteFn;
+ let canUseSameAccessorFn: CanUseSameAccessorFn;
+
+ try {
+ const a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+
+ // Copy copy-on-write (COW) arrays.
+ array::EnsureWriteableFastElements(a);
+
+ const elementsKind: ElementsKind = map.elements_kind;
+ if (IsDoubleElementsKind(elementsKind)) {
+ loadFn = Load<FastDoubleElements>;
+ storeFn = Store<FastDoubleElements>;
+ deleteFn = Delete<FastDoubleElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastDoubleElements>;
+ } else if (IsFastSmiElementsKind(elementsKind)) {
+ loadFn = Load<FastSmiElements>;
+ storeFn = Store<FastSmiElements>;
+ deleteFn = Delete<FastSmiElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastSmiElements>;
+ } else {
+ loadFn = Load<FastObjectElements>;
+ storeFn = Store<FastObjectElements>;
+ deleteFn = Delete<FastObjectElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastObjectElements>;
}
-
- const workArrayLength =
- CalculateWorkArrayLength(receiver, initialReceiverLength);
-
- return new SortState{
- receiver,
- initialReceiverMap: map,
- initialReceiverLength,
- userCmpFn: comparefn,
- sortComparePtr,
- loadFn,
- storeFn,
- deleteFn,
- canUseSameAccessorFn,
- minGallop: kMinGallopWins,
- pendingRunsSize: 0,
- pendingRuns: AllocateZeroedFixedArray(Convert<intptr>(kMaxMergePending)),
- workArray: AllocateZeroedFixedArray(workArrayLength),
- tempArray: kEmptyFixedArray,
- sortLength: 0,
- numberOfUndefined: 0
- };
- }
-
- const kSuccess: Smi = 0;
-
- // The maximum number of entries in a SortState's pending-runs stack.
- // This is enough to sort arrays of size up to about
- // 32 * phi ** kMaxMergePending
- // where phi ~= 1.618. 85 is ridiculously large enough, good for an array with
- // 2 ** 64 elements.
- const kMaxMergePending: constexpr int31 = 85;
-
- // When we get into galloping mode, we stay there until both runs win less
- // often then kMinGallop consecutive times. See listsort.txt for more info.
- const kMinGallopWins: constexpr int31 = 7;
-
- // Default size of the temporary array. The temporary array is allocated when
- // it is first requested, but it has always at least this size.
- const kSortStateTempSize: Smi = 32;
-
- type LoadFn = builtin(Context, SortState, Smi) => (JSAny|TheHole);
- type StoreFn = builtin(Context, SortState, Smi, JSAny) => Smi;
- type DeleteFn = builtin(Context, SortState, Smi) => Smi;
- type CanUseSameAccessorFn = builtin(Context, JSReceiver, Map, Number) =>
- Boolean;
- type CompareBuiltinFn = builtin(Context, JSAny, JSAny, JSAny) => Number;
-
- // The following builtins implement Load/Store for all the Accessors.
- // The most generic baseline version uses Get-/SetProperty. We do not need
- // to worry about the prototype chain, because the pre-processing step has
- // copied values from the prototype chain to the receiver if they were visible
- // through a hole.
-
- transitioning builtin Load<ElementsAccessor : type extends ElementsKind>(
- context: Context, sortState: SortState, index: Smi): JSAny|TheHole {
- const receiver = sortState.receiver;
- if (!HasProperty_Inline(receiver, index)) return TheHole;
- return GetProperty(receiver, index);
+ } label Slow {
+ loadFn = Load<GenericElementsAccessor>;
+ storeFn = Store<GenericElementsAccessor>;
+ deleteFn = Delete<GenericElementsAccessor>;
+ canUseSameAccessorFn = CanUseSameAccessor<GenericElementsAccessor>;
}
- Load<FastSmiElements>(context: Context, sortState: SortState, index: Smi):
- JSAny|TheHole {
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedArray>(object.elements);
- return UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
- }
+ const workArrayLength =
+ CalculateWorkArrayLength(receiver, initialReceiverLength);
+
+ return new SortState{
+ receiver,
+ initialReceiverMap: map,
+ initialReceiverLength,
+ userCmpFn: comparefn,
+ sortComparePtr,
+ loadFn,
+ storeFn,
+ deleteFn,
+ canUseSameAccessorFn,
+ minGallop: kMinGallopWins,
+ pendingRunsSize: 0,
+ pendingRuns: AllocateZeroedFixedArray(Convert<intptr>(kMaxMergePending)),
+ workArray: AllocateZeroedFixedArray(workArrayLength),
+ tempArray: kEmptyFixedArray,
+ sortLength: 0,
+ numberOfUndefined: 0
+ };
+}
- Load<FastObjectElements>(context: Context, sortState: SortState, index: Smi):
- JSAny|TheHole {
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedArray>(object.elements);
- return UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
- }
+const kSuccess: Smi = 0;
+
+// The maximum number of entries in a SortState's pending-runs stack.
+// This is enough to sort arrays of size up to about
+// 32 * phi ** kMaxMergePending
+// where phi ~= 1.618. 85 is ridiculously large enough, good for an array with
+// 2 ** 64 elements.
+const kMaxMergePending: constexpr int31 = 85;
+
+// When we get into galloping mode, we stay there until both runs win less
+// often then kMinGallop consecutive times. See listsort.txt for more info.
+const kMinGallopWins: constexpr int31 = 7;
+
+// Default size of the temporary array. The temporary array is allocated when
+// it is first requested, but it has always at least this size.
+const kSortStateTempSize: Smi = 32;
+
+type LoadFn = builtin(Context, SortState, Smi) => (JSAny|TheHole);
+type StoreFn = builtin(Context, SortState, Smi, JSAny) => Smi;
+type DeleteFn = builtin(Context, SortState, Smi) => Smi;
+type CanUseSameAccessorFn = builtin(Context, JSReceiver, Map, Number) =>
+ Boolean;
+type CompareBuiltinFn = builtin(Context, JSAny, JSAny, JSAny) => Number;
+
+// The following builtins implement Load/Store for all the Accessors.
+// The most generic baseline version uses Get-/SetProperty. We do not need
+// to worry about the prototype chain, because the pre-processing step has
+// copied values from the prototype chain to the receiver if they were visible
+// through a hole.
+
+transitioning builtin Load<ElementsAccessor : type extends ElementsKind>(
+ context: Context, sortState: SortState, index: Smi): JSAny|TheHole {
+ const receiver = sortState.receiver;
+ if (!HasProperty_Inline(receiver, index)) return TheHole;
+ return GetProperty(receiver, index);
+}
- Load<FastDoubleElements>(context: Context, sortState: SortState, index: Smi):
- JSAny|TheHole {
- try {
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedDoubleArray>(object.elements);
- const value = elements.floats[index].Value() otherwise IfHole;
- return AllocateHeapNumberWithValue(value);
- }
- label IfHole {
- return TheHole;
- }
- }
+Load<FastSmiElements>(
+ context: Context, sortState: SortState, index: Smi): JSAny|TheHole {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ return UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
+}
- transitioning builtin Store<ElementsAccessor : type extends ElementsKind>(
- context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
- SetProperty(sortState.receiver, index, value);
- return kSuccess;
- }
+Load<FastObjectElements>(
+ context: Context, sortState: SortState, index: Smi): JSAny|TheHole {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ return UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
+}
- Store<FastSmiElements>(
- context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
+Load<FastDoubleElements>(
+ context: Context, sortState: SortState, index: Smi): JSAny|TheHole {
+ try {
const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedArray>(object.elements);
- const value = UnsafeCast<Smi>(value);
- StoreFixedArrayElement(elements, index, value, SKIP_WRITE_BARRIER);
- return kSuccess;
+ const elements = UnsafeCast<FixedDoubleArray>(object.elements);
+ const value = elements.floats[index].Value() otherwise IfHole;
+ return AllocateHeapNumberWithValue(value);
+ } label IfHole {
+ return TheHole;
}
+}
- Store<FastObjectElements>(
- context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedArray>(object.elements);
- elements.objects[index] = value;
- return kSuccess;
- }
+transitioning builtin Store<ElementsAccessor : type extends ElementsKind>(
+ context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
+ SetProperty(sortState.receiver, index, value);
+ return kSuccess;
+}
- Store<FastDoubleElements>(
- context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedDoubleArray>(object.elements);
- const heapVal = UnsafeCast<HeapNumber>(value);
- const val = Convert<float64>(heapVal);
- StoreFixedDoubleArrayElementSmi(elements, index, val);
- return kSuccess;
- }
+Store<FastSmiElements>(
+ context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ const value = UnsafeCast<Smi>(value);
+ StoreFixedArrayElement(elements, index, value, SKIP_WRITE_BARRIER);
+ return kSuccess;
+}
- transitioning builtin Delete<ElementsAccessor : type extends ElementsKind>(
- context: Context, sortState: SortState, index: Smi): Smi {
- const receiver = sortState.receiver;
- DeleteProperty(receiver, index, LanguageMode::kStrict);
- return kSuccess;
- }
+Store<FastObjectElements>(
+ context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ elements.objects[index] = value;
+ return kSuccess;
+}
- Delete<FastSmiElements>(context: Context, sortState: SortState, index: Smi):
- Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+Store<FastDoubleElements>(
+ context: Context, sortState: SortState, index: Smi, value: JSAny): Smi {
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedDoubleArray>(object.elements);
+ const heapVal = UnsafeCast<HeapNumber>(value);
+ const val = Convert<float64>(heapVal);
+ StoreFixedDoubleArrayElementSmi(elements, index, val);
+ return kSuccess;
+}
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedArray>(object.elements);
- elements.objects[index] = TheHole;
- return kSuccess;
- }
+transitioning builtin Delete<ElementsAccessor : type extends ElementsKind>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ const receiver = sortState.receiver;
+ DeleteProperty(receiver, index, LanguageMode::kStrict);
+ return kSuccess;
+}
- Delete<FastObjectElements>(
- context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+Delete<FastSmiElements>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedArray>(object.elements);
- elements.objects[index] = TheHole;
- return kSuccess;
- }
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ elements.objects[index] = TheHole;
+ return kSuccess;
+}
- Delete<FastDoubleElements>(
- context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+Delete<FastObjectElements>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const elements = UnsafeCast<FixedDoubleArray>(object.elements);
- elements.floats[index] = kDoubleHole;
- return kSuccess;
- }
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ elements.objects[index] = TheHole;
+ return kSuccess;
+}
- transitioning builtin SortCompareDefault(
- context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
- assert(comparefn == Undefined);
+Delete<FastDoubleElements>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
- if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
- return SmiLexicographicCompare(UnsafeCast<Smi>(x), UnsafeCast<Smi>(y));
- }
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedDoubleArray>(object.elements);
+ elements.floats[index] = kDoubleHole;
+ return kSuccess;
+}
- // 5. Let xString be ? ToString(x).
- const xString = ToString_Inline(x);
+transitioning builtin SortCompareDefault(
+ context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
+ assert(comparefn == Undefined);
- // 6. Let yString be ? ToString(y).
- const yString = ToString_Inline(y);
+ if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
+ return SmiLexicographicCompare(UnsafeCast<Smi>(x), UnsafeCast<Smi>(y));
+ }
- // 7. Let xSmaller be the result of performing
- // Abstract Relational Comparison xString < yString.
- // 8. If xSmaller is true, return -1.
- if (StringLessThan(context, xString, yString) == True) return -1;
+ // 5. Let xString be ? ToString(x).
+ const xString = ToString_Inline(x);
- // 9. Let ySmaller be the result of performing
- // Abstract Relational Comparison yString < xString.
- // 10. If ySmaller is true, return 1.
- if (StringLessThan(context, yString, xString) == True) return 1;
+ // 6. Let yString be ? ToString(y).
+ const yString = ToString_Inline(y);
- // 11. Return +0.
- return 0;
- }
+ // 7. Let xSmaller be the result of performing
+ // Abstract Relational Comparison xString < yString.
+ // 8. If xSmaller is true, return -1.
+ if (StringLessThan(context, xString, yString) == True) return -1;
- transitioning builtin SortCompareUserFn(
- context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
- assert(comparefn != Undefined);
- const cmpfn = UnsafeCast<Callable>(comparefn);
+ // 9. Let ySmaller be the result of performing
+ // Abstract Relational Comparison yString < xString.
+ // 10. If ySmaller is true, return 1.
+ if (StringLessThan(context, yString, xString) == True) return 1;
- // a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
- const v = ToNumber_Inline(Call(context, cmpfn, Undefined, x, y));
+ // 11. Return +0.
+ return 0;
+}
- // b. If v is NaN, return +0.
- if (NumberIsNaN(v)) return 0;
+transitioning builtin SortCompareUserFn(
+ context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
+ assert(comparefn != Undefined);
+ const cmpfn = UnsafeCast<Callable>(comparefn);
- // c. return v.
- return v;
- }
+ // a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
+ const v = ToNumber_Inline(Call(context, cmpfn, Undefined, x, y));
- builtin CanUseSameAccessor<ElementsAccessor : type extends ElementsKind>(
- context: Context, receiver: JSReceiver, initialReceiverMap: Map,
- initialReceiverLength: Number): Boolean {
- if (receiver.map != initialReceiverMap) return False;
+ // b. If v is NaN, return +0.
+ if (NumberIsNaN(v)) return 0;
- assert(TaggedIsSmi(initialReceiverLength));
- const array = UnsafeCast<JSArray>(receiver);
- const originalLength = UnsafeCast<Smi>(initialReceiverLength);
+ // c. return v.
+ return v;
+}
- return SelectBooleanConstant(
- UnsafeCast<Smi>(array.length) == originalLength);
- }
+builtin CanUseSameAccessor<ElementsAccessor : type extends ElementsKind>(
+ context: Context, receiver: JSReceiver, initialReceiverMap: Map,
+ initialReceiverLength: Number): Boolean {
+ if (receiver.map != initialReceiverMap) return False;
- CanUseSameAccessor<GenericElementsAccessor>(
- _context: Context, _receiver: JSReceiver, _initialReceiverMap: Map,
- _initialReceiverLength: Number): Boolean {
- // Do nothing. We are already on the slow path.
- return True;
- }
+ assert(TaggedIsSmi(initialReceiverLength));
+ const array = UnsafeCast<JSArray>(receiver);
+ const originalLength = UnsafeCast<Smi>(initialReceiverLength);
- // Re-loading the stack-size is done in a few places. The small macro allows
- // for easier invariant checks at all use sites.
- macro GetPendingRunsSize(implicit context: Context)(sortState: SortState):
- Smi {
- const stackSize: Smi = sortState.pendingRunsSize;
- assert(stackSize >= 0);
- return stackSize;
- }
+ return SelectBooleanConstant(UnsafeCast<Smi>(array.length) == originalLength);
+}
- macro GetPendingRunBase(implicit context:
- Context)(pendingRuns: FixedArray, run: Smi): Smi {
- return UnsafeCast<Smi>(pendingRuns.objects[run << 1]);
- }
+CanUseSameAccessor<GenericElementsAccessor>(
+ _context: Context, _receiver: JSReceiver, _initialReceiverMap: Map,
+ _initialReceiverLength: Number): Boolean {
+ // Do nothing. We are already on the slow path.
+ return True;
+}
- macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
- pendingRuns.objects[run << 1] = value;
- }
+// Re-loading the stack-size is done in a few places. The small macro allows
+// for easier invariant checks at all use sites.
+macro GetPendingRunsSize(implicit context: Context)(sortState: SortState): Smi {
+ const stackSize: Smi = sortState.pendingRunsSize;
+ assert(stackSize >= 0);
+ return stackSize;
+}
- macro GetPendingRunLength(implicit context: Context)(
- pendingRuns: FixedArray, run: Smi): Smi {
- return UnsafeCast<Smi>(pendingRuns.objects[(run << 1) + 1]);
- }
+macro GetPendingRunBase(implicit context: Context)(
+ pendingRuns: FixedArray, run: Smi): Smi {
+ return UnsafeCast<Smi>(pendingRuns.objects[run << 1]);
+}
- macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
- pendingRuns.objects[(run << 1) + 1] = value;
- }
+macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
+ pendingRuns.objects[run << 1] = value;
+}
- macro PushRun(implicit context:
- Context)(sortState: SortState, base: Smi, length: Smi) {
- assert(GetPendingRunsSize(sortState) < kMaxMergePending);
+macro GetPendingRunLength(implicit context: Context)(
+ pendingRuns: FixedArray, run: Smi): Smi {
+ return UnsafeCast<Smi>(pendingRuns.objects[(run << 1) + 1]);
+}
- const stackSize: Smi = GetPendingRunsSize(sortState);
- const pendingRuns: FixedArray = sortState.pendingRuns;
+macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
+ pendingRuns.objects[(run << 1) + 1] = value;
+}
- SetPendingRunBase(pendingRuns, stackSize, base);
- SetPendingRunLength(pendingRuns, stackSize, length);
+macro PushRun(implicit context: Context)(
+ sortState: SortState, base: Smi, length: Smi) {
+ assert(GetPendingRunsSize(sortState) < kMaxMergePending);
- sortState.pendingRunsSize = stackSize + 1;
- }
+ const stackSize: Smi = GetPendingRunsSize(sortState);
+ const pendingRuns: FixedArray = sortState.pendingRuns;
- // Returns the temporary array and makes sure that it is big enough.
- // TODO(szuend): Implement a better re-size strategy.
- macro GetTempArray(implicit context: Context)(
- sortState: SortState, requestedSize: Smi): FixedArray {
- const minSize: Smi = SmiMax(kSortStateTempSize, requestedSize);
+ SetPendingRunBase(pendingRuns, stackSize, base);
+ SetPendingRunLength(pendingRuns, stackSize, length);
- const currentSize: Smi = sortState.tempArray.length;
- if (currentSize >= minSize) {
- return sortState.tempArray;
- }
+ sortState.pendingRunsSize = stackSize + 1;
+}
- const tempArray: FixedArray =
- AllocateZeroedFixedArray(Convert<intptr>(minSize));
+// Returns the temporary array and makes sure that it is big enough.
+// TODO(szuend): Implement a better re-size strategy.
+macro GetTempArray(implicit context: Context)(
+ sortState: SortState, requestedSize: Smi): FixedArray {
+ const minSize: Smi = SmiMax(kSortStateTempSize, requestedSize);
- sortState.tempArray = tempArray;
- return tempArray;
+ const currentSize: Smi = sortState.tempArray.length;
+ if (currentSize >= minSize) {
+ return sortState.tempArray;
}
- transitioning builtin
- Copy(implicit context: Context)(
- source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
- length: Smi): JSAny {
- assert(srcPos >= 0);
- assert(dstPos >= 0);
- assert(srcPos <= source.length - length);
- assert(dstPos <= target.length - length);
-
- // TODO(szuend): Investigate whether this builtin should be replaced
- // by CopyElements/MoveElements for perfomance.
-
- // source and target might be the same array. To avoid overwriting
- // values in the case of overlaping ranges, elements are copied from
- // the back when srcPos < dstPos.
- if (srcPos < dstPos) {
- let srcIdx: Smi = srcPos + length - 1;
- let dstIdx: Smi = dstPos + length - 1;
- while (srcIdx >= srcPos) {
- target.objects[dstIdx--] = source.objects[srcIdx--];
- }
- } else {
- let srcIdx: Smi = srcPos;
- let dstIdx: Smi = dstPos;
- const to: Smi = srcPos + length;
+ const tempArray: FixedArray =
+ AllocateZeroedFixedArray(Convert<intptr>(minSize));
- while (srcIdx < to) {
- target.objects[dstIdx++] = source.objects[srcIdx++];
- }
+ sortState.tempArray = tempArray;
+ return tempArray;
+}
+
+transitioning builtin
+Copy(implicit context: Context)(
+ source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
+ length: Smi): JSAny {
+ assert(srcPos >= 0);
+ assert(dstPos >= 0);
+ assert(srcPos <= source.length - length);
+ assert(dstPos <= target.length - length);
+
+ // TODO(szuend): Investigate whether this builtin should be replaced
+ // by CopyElements/MoveElements for perfomance.
+
+ // source and target might be the same array. To avoid overwriting
+ // values in the case of overlaping ranges, elements are copied from
+ // the back when srcPos < dstPos.
+ if (srcPos < dstPos) {
+ let srcIdx: Smi = srcPos + length - 1;
+ let dstIdx: Smi = dstPos + length - 1;
+ while (srcIdx >= srcPos) {
+ target.objects[dstIdx--] = source.objects[srcIdx--];
+ }
+ } else {
+ let srcIdx: Smi = srcPos;
+ let dstIdx: Smi = dstPos;
+ const to: Smi = srcPos + length;
+
+ while (srcIdx < to) {
+ target.objects[dstIdx++] = source.objects[srcIdx++];
}
- return kSuccess;
}
+ return kSuccess;
+}
- // BinaryInsertionSort is the best method for sorting small arrays: it
- // does few compares, but can do data movement quadratic in the number of
- // elements. This is an advantage since comparisons are more expensive due
- // to calling into JS.
- //
- // [low, high) is a contiguous range of a array, and is sorted via
- // binary insertion. This sort is stable.
- //
- // On entry, must have low <= start <= high, and that [low, start) is
- // already sorted. Pass start == low if you do not know!.
- macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
- low: Smi, startArg: Smi, high: Smi) {
- assert(low <= startArg && startArg <= high);
+// BinaryInsertionSort is the best method for sorting small arrays: it
+// does few compares, but can do data movement quadratic in the number of
+// elements. This is an advantage since comparisons are more expensive due
+// to calling into JS.
+//
+// [low, high) is a contiguous range of a array, and is sorted via
+// binary insertion. This sort is stable.
+//
+// On entry, must have low <= start <= high, and that [low, start) is
+// already sorted. Pass start == low if you do not know!.
+macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
+ low: Smi, startArg: Smi, high: Smi) {
+ assert(low <= startArg && startArg <= high);
- const workArray = sortState.workArray;
+ const workArray = sortState.workArray;
- let start: Smi = low == startArg ? (startArg + 1) : startArg;
+ let start: Smi = low == startArg ? (startArg + 1) : startArg;
- for (; start < high; ++start) {
- // Set left to where a[start] belongs.
- let left: Smi = low;
- let right: Smi = start;
+ for (; start < high; ++start) {
+ // Set left to where a[start] belongs.
+ let left: Smi = low;
+ let right: Smi = start;
- const pivot = UnsafeCast<JSAny>(workArray.objects[right]);
+ const pivot = UnsafeCast<JSAny>(workArray.objects[right]);
- // Invariants:
- // pivot >= all in [low, left).
- // pivot < all in [right, start).
- assert(left < right);
+ // Invariants:
+ // pivot >= all in [low, left).
+ // pivot < all in [right, start).
+ assert(left < right);
- // Find pivot insertion point.
- while (left < right) {
- const mid: Smi = left + ((right - left) >> 1);
- const order =
- sortState.Compare(pivot, UnsafeCast<JSAny>(workArray.objects[mid]));
+ // Find pivot insertion point.
+ while (left < right) {
+ const mid: Smi = left + ((right - left) >> 1);
+ const order =
+ sortState.Compare(pivot, UnsafeCast<JSAny>(workArray.objects[mid]));
- if (order < 0) {
- right = mid;
- } else {
- left = mid + 1;
- }
- }
- assert(left == right);
-
- // The invariants still hold, so:
- // pivot >= all in [low, left) and
- // pivot < all in [left, start),
- //
- // so pivot belongs at left. Note that if there are elements equal
- // to pivot, left points to the first slot after them -- that's why
- // this sort is stable. Slide over to make room.
- for (let p: Smi = start; p > left; --p) {
- workArray.objects[p] = workArray.objects[p - 1];
+ if (order < 0) {
+ right = mid;
+ } else {
+ left = mid + 1;
}
- workArray.objects[left] = pivot;
}
- }
+ assert(left == right);
- // Return the length of the run beginning at low, in the range [low,
- // high), low < high is required on entry. "A run" is the longest
- // ascending sequence, with
- //
- // a[low] <= a[low + 1] <= a[low + 2] <= ...
- //
- // or the longest descending sequence, with
- //
- // a[low] > a[low + 1] > a[low + 2] > ...
- //
- // For its intended use in stable mergesort, the strictness of the
- // definition of "descending" is needed so that the range can safely be
- // reversed without violating stability (strict ">" ensures there are no
- // equal elements to get out of order).
- //
- // In addition, if the run is "descending", it is reversed, so the
- // returned length is always an ascending sequence.
- macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
- lowArg: Smi, high: Smi): Smi {
- assert(lowArg < high);
+ // The invariants still hold, so:
+ // pivot >= all in [low, left) and
+ // pivot < all in [left, start),
+ //
+ // so pivot belongs at left. Note that if there are elements equal
+ // to pivot, left points to the first slot after them -- that's why
+ // this sort is stable. Slide over to make room.
+ for (let p: Smi = start; p > left; --p) {
+ workArray.objects[p] = workArray.objects[p - 1];
+ }
+ workArray.objects[left] = pivot;
+ }
+}
- const workArray = sortState.workArray;
+// Return the length of the run beginning at low, in the range [low,
+// high), low < high is required on entry. "A run" is the longest
+// ascending sequence, with
+//
+// a[low] <= a[low + 1] <= a[low + 2] <= ...
+//
+// or the longest descending sequence, with
+//
+// a[low] > a[low + 1] > a[low + 2] > ...
+//
+// For its intended use in stable mergesort, the strictness of the
+// definition of "descending" is needed so that the range can safely be
+// reversed without violating stability (strict ">" ensures there are no
+// equal elements to get out of order).
+//
+// In addition, if the run is "descending", it is reversed, so the
+// returned length is always an ascending sequence.
+macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
+ lowArg: Smi, high: Smi): Smi {
+ assert(lowArg < high);
- const low: Smi = lowArg + 1;
- if (low == high) return 1;
+ const workArray = sortState.workArray;
- let runLength: Smi = 2;
+ const low: Smi = lowArg + 1;
+ if (low == high) return 1;
- const elementLow = UnsafeCast<JSAny>(workArray.objects[low]);
- const elementLowPred = UnsafeCast<JSAny>(workArray.objects[low - 1]);
- let order = sortState.Compare(elementLow, elementLowPred);
+ let runLength: Smi = 2;
- // TODO(szuend): Replace with "order < 0" once Torque supports it.
- // Currently the operator<(Number, Number) has return type
- // 'never' and uses two labels to branch.
- const isDescending: bool = order < 0 ? true : false;
+ const elementLow = UnsafeCast<JSAny>(workArray.objects[low]);
+ const elementLowPred = UnsafeCast<JSAny>(workArray.objects[low - 1]);
+ let order = sortState.Compare(elementLow, elementLowPred);
- let previousElement: JSAny = elementLow;
- for (let idx: Smi = low + 1; idx < high; ++idx) {
- const currentElement = UnsafeCast<JSAny>(workArray.objects[idx]);
- order = sortState.Compare(currentElement, previousElement);
+ // TODO(szuend): Replace with "order < 0" once Torque supports it.
+ // Currently the operator<(Number, Number) has return type
+ // 'never' and uses two labels to branch.
+ const isDescending: bool = order < 0 ? true : false;
- if (isDescending) {
- if (order >= 0) break;
- } else {
- if (order < 0) break;
- }
-
- previousElement = currentElement;
- ++runLength;
- }
+ let previousElement: JSAny = elementLow;
+ for (let idx: Smi = low + 1; idx < high; ++idx) {
+ const currentElement = UnsafeCast<JSAny>(workArray.objects[idx]);
+ order = sortState.Compare(currentElement, previousElement);
if (isDescending) {
- ReverseRange(workArray, lowArg, lowArg + runLength);
+ if (order >= 0) break;
+ } else {
+ if (order < 0) break;
}
- return runLength;
+ previousElement = currentElement;
+ ++runLength;
}
- macro ReverseRange(array: FixedArray, from: Smi, to: Smi) {
- let low: Smi = from;
- let high: Smi = to - 1;
-
- while (low < high) {
- const elementLow = array.objects[low];
- const elementHigh = array.objects[high];
- array.objects[low++] = elementHigh;
- array.objects[high--] = elementLow;
- }
+ if (isDescending) {
+ ReverseRange(workArray, lowArg, lowArg + runLength);
}
- // Merges the two runs at stack indices i and i + 1.
- // Returns kFailure if we need to bailout, kSuccess otherwise.
- transitioning builtin
- MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
- const stackSize: Smi = GetPendingRunsSize(sortState);
-
- // We are only allowed to either merge the two top-most runs, or leave
- // the top most run alone and merge the two next runs.
- assert(stackSize >= 2);
- assert(i >= 0);
- assert(i == stackSize - 2 || i == stackSize - 3);
-
- const workArray = sortState.workArray;
-
- const pendingRuns: FixedArray = sortState.pendingRuns;
- let baseA: Smi = GetPendingRunBase(pendingRuns, i);
- let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
- const baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
- let lengthB: Smi = GetPendingRunLength(pendingRuns, i + 1);
- assert(lengthA > 0 && lengthB > 0);
- assert(baseA + lengthA == baseB);
-
- // Record the length of the combined runs; if i is the 3rd-last run now,
- // also slide over the last run (which isn't involved in this merge).
- // The current run i + 1 goes away in any case.
- SetPendingRunLength(pendingRuns, i, lengthA + lengthB);
- if (i == stackSize - 3) {
- const base: Smi = GetPendingRunBase(pendingRuns, i + 2);
- const length: Smi = GetPendingRunLength(pendingRuns, i + 2);
- SetPendingRunBase(pendingRuns, i + 1, base);
- SetPendingRunLength(pendingRuns, i + 1, length);
- }
- sortState.pendingRunsSize = stackSize - 1;
-
- // Where does b start in a? Elements in a before that can be ignored,
- // because they are already in place.
- const keyRight = UnsafeCast<JSAny>(workArray.objects[baseB]);
- const k: Smi = GallopRight(workArray, keyRight, baseA, lengthA, 0);
- assert(k >= 0);
-
- baseA = baseA + k;
- lengthA = lengthA - k;
- if (lengthA == 0) return kSuccess;
- assert(lengthA > 0);
-
- // Where does a end in b? Elements in b after that can be ignored,
- // because they are already in place.
- const keyLeft = UnsafeCast<JSAny>(workArray.objects[baseA + lengthA - 1]);
- lengthB = GallopLeft(workArray, keyLeft, baseB, lengthB, lengthB - 1);
- assert(lengthB >= 0);
- if (lengthB == 0) return kSuccess;
-
- // Merge what remains of the runs, using a temp array with
- // min(lengthA, lengthB) elements.
- if (lengthA <= lengthB) {
- MergeLow(baseA, lengthA, baseB, lengthB);
- } else {
- MergeHigh(baseA, lengthA, baseB, lengthB);
- }
- return kSuccess;
- }
-
- // Locates the proper position of key in a sorted array; if the array
- // contains an element equal to key, return the position immediately to
- // the left of the leftmost equal element. (GallopRight does the same
- // except returns the position to the right of the rightmost equal element
- // (if any)).
- //
- // The array is sorted with "length" elements, starting at "base".
- // "length" must be > 0.
- //
- // "hint" is an index at which to begin the search, 0 <= hint < n. The
- // closer hint is to the final result, the faster this runs.
- //
- // The return value is the int offset in 0..length such that
- //
- // array[base + offset] < key <= array[base + offset + 1]
- //
- // pretending that array[base - 1] is minus infinity and array[base + len]
- // is plus infinity. In other words, key belongs at index base + k.
- builtin GallopLeft(implicit context: Context, sortState: SortState)(
- array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
- assert(length > 0 && base >= 0);
- assert(0 <= hint && hint < length);
-
- let lastOfs: Smi = 0;
- let offset: Smi = 1;
-
- const baseHintElement = UnsafeCast<JSAny>(array.objects[base + hint]);
- let order = sortState.Compare(baseHintElement, key);
-
- if (order < 0) {
- // a[base + hint] < key: gallop right, until
- // a[base + hint + lastOfs] < key <= a[base + hint + offset].
+ return runLength;
+}
- // a[base + length - 1] is highest.
- const maxOfs: Smi = length - hint;
- while (offset < maxOfs) {
- const offsetElement =
- UnsafeCast<JSAny>(array.objects[base + hint + offset]);
- order = sortState.Compare(offsetElement, key);
+macro ReverseRange(array: FixedArray, from: Smi, to: Smi) {
+ let low: Smi = from;
+ let high: Smi = to - 1;
- // a[base + hint + offset] >= key? Break.
- if (order >= 0) break;
+ while (low < high) {
+ const elementLow = array.objects[low];
+ const elementHigh = array.objects[high];
+ array.objects[low++] = elementHigh;
+ array.objects[high--] = elementLow;
+ }
+}
- lastOfs = offset;
- offset = (offset << 1) + 1;
+// Merges the two runs at stack indices i and i + 1.
+// Returns kFailure if we need to bailout, kSuccess otherwise.
+transitioning builtin
+MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
+ const stackSize: Smi = GetPendingRunsSize(sortState);
+
+ // We are only allowed to either merge the two top-most runs, or leave
+ // the top most run alone and merge the two next runs.
+ assert(stackSize >= 2);
+ assert(i >= 0);
+ assert(i == stackSize - 2 || i == stackSize - 3);
+
+ const workArray = sortState.workArray;
+
+ const pendingRuns: FixedArray = sortState.pendingRuns;
+ let baseA: Smi = GetPendingRunBase(pendingRuns, i);
+ let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
+ const baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
+ let lengthB: Smi = GetPendingRunLength(pendingRuns, i + 1);
+ assert(lengthA > 0 && lengthB > 0);
+ assert(baseA + lengthA == baseB);
+
+ // Record the length of the combined runs; if i is the 3rd-last run now,
+ // also slide over the last run (which isn't involved in this merge).
+ // The current run i + 1 goes away in any case.
+ SetPendingRunLength(pendingRuns, i, lengthA + lengthB);
+ if (i == stackSize - 3) {
+ const base: Smi = GetPendingRunBase(pendingRuns, i + 2);
+ const length: Smi = GetPendingRunLength(pendingRuns, i + 2);
+ SetPendingRunBase(pendingRuns, i + 1, base);
+ SetPendingRunLength(pendingRuns, i + 1, length);
+ }
+ sortState.pendingRunsSize = stackSize - 1;
+
+ // Where does b start in a? Elements in a before that can be ignored,
+ // because they are already in place.
+ const keyRight = UnsafeCast<JSAny>(workArray.objects[baseB]);
+ const k: Smi = GallopRight(workArray, keyRight, baseA, lengthA, 0);
+ assert(k >= 0);
+
+ baseA = baseA + k;
+ lengthA = lengthA - k;
+ if (lengthA == 0) return kSuccess;
+ assert(lengthA > 0);
+
+ // Where does a end in b? Elements in b after that can be ignored,
+ // because they are already in place.
+ const keyLeft = UnsafeCast<JSAny>(workArray.objects[baseA + lengthA - 1]);
+ lengthB = GallopLeft(workArray, keyLeft, baseB, lengthB, lengthB - 1);
+ assert(lengthB >= 0);
+ if (lengthB == 0) return kSuccess;
+
+ // Merge what remains of the runs, using a temp array with
+ // min(lengthA, lengthB) elements.
+ if (lengthA <= lengthB) {
+ MergeLow(baseA, lengthA, baseB, lengthB);
+ } else {
+ MergeHigh(baseA, lengthA, baseB, lengthB);
+ }
+ return kSuccess;
+}
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+// Locates the proper position of key in a sorted array; if the array
+// contains an element equal to key, return the position immediately to
+// the left of the leftmost equal element. (GallopRight does the same
+// except returns the position to the right of the rightmost equal element
+// (if any)).
+//
+// The array is sorted with "length" elements, starting at "base".
+// "length" must be > 0.
+//
+// "hint" is an index at which to begin the search, 0 <= hint < n. The
+// closer hint is to the final result, the faster this runs.
+//
+// The return value is the int offset in 0..length such that
+//
+// array[base + offset] < key <= array[base + offset + 1]
+//
+// pretending that array[base - 1] is minus infinity and array[base + len]
+// is plus infinity. In other words, key belongs at index base + k.
+builtin GallopLeft(implicit context: Context, sortState: SortState)(
+ array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
+ assert(length > 0 && base >= 0);
+ assert(0 <= hint && hint < length);
+
+ let lastOfs: Smi = 0;
+ let offset: Smi = 1;
+
+ const baseHintElement = UnsafeCast<JSAny>(array.objects[base + hint]);
+ let order = sortState.Compare(baseHintElement, key);
+
+ if (order < 0) {
+ // a[base + hint] < key: gallop right, until
+ // a[base + hint + lastOfs] < key <= a[base + hint + offset].
+
+ // a[base + length - 1] is highest.
+ const maxOfs: Smi = length - hint;
+ while (offset < maxOfs) {
+ const offsetElement =
+ UnsafeCast<JSAny>(array.objects[base + hint + offset]);
+ order = sortState.Compare(offsetElement, key);
+
+ // a[base + hint + offset] >= key? Break.
+ if (order >= 0) break;
+
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
+ }
- if (offset > maxOfs) offset = maxOfs;
+ if (offset > maxOfs) offset = maxOfs;
- // Translate back to positive offsets relative to base.
- lastOfs = lastOfs + hint;
- offset = offset + hint;
- } else {
- // key <= a[base + hint]: gallop left, until
- // a[base + hint - offset] < key <= a[base + hint - lastOfs].
- assert(order >= 0);
+ // Translate back to positive offsets relative to base.
+ lastOfs = lastOfs + hint;
+ offset = offset + hint;
+ } else {
+ // key <= a[base + hint]: gallop left, until
+ // a[base + hint - offset] < key <= a[base + hint - lastOfs].
+ assert(order >= 0);
- // a[base + hint] is lowest.
- const maxOfs: Smi = hint + 1;
- while (offset < maxOfs) {
- const offsetElement =
- UnsafeCast<JSAny>(array.objects[base + hint - offset]);
- order = sortState.Compare(offsetElement, key);
+ // a[base + hint] is lowest.
+ const maxOfs: Smi = hint + 1;
+ while (offset < maxOfs) {
+ const offsetElement =
+ UnsafeCast<JSAny>(array.objects[base + hint - offset]);
+ order = sortState.Compare(offsetElement, key);
- if (order < 0) break;
+ if (order < 0) break;
- lastOfs = offset;
- offset = (offset << 1) + 1;
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
+ }
- if (offset > maxOfs) offset = maxOfs;
+ if (offset > maxOfs) offset = maxOfs;
- // Translate back to positive offsets relative to base.
- const tmp: Smi = lastOfs;
- lastOfs = hint - offset;
- offset = hint - tmp;
- }
+ // Translate back to positive offsets relative to base.
+ const tmp: Smi = lastOfs;
+ lastOfs = hint - offset;
+ offset = hint - tmp;
+ }
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
- // Now a[base+lastOfs] < key <= a[base+offset], so key belongs
- // somewhere to the right of lastOfs but no farther right than offset.
- // Do a binary search, with invariant:
- // a[base + lastOfs - 1] < key <= a[base + offset].
- lastOfs++;
- while (lastOfs < offset) {
- const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
+ // Now a[base+lastOfs] < key <= a[base+offset], so key belongs
+ // somewhere to the right of lastOfs but no farther right than offset.
+ // Do a binary search, with invariant:
+ // a[base + lastOfs - 1] < key <= a[base + offset].
+ lastOfs++;
+ while (lastOfs < offset) {
+ const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
- order =
- sortState.Compare(UnsafeCast<JSAny>(array.objects[base + m]), key);
+ order = sortState.Compare(UnsafeCast<JSAny>(array.objects[base + m]), key);
- if (order < 0) {
- lastOfs = m + 1; // a[base + m] < key.
- } else {
- offset = m; // key <= a[base + m].
- }
+ if (order < 0) {
+ lastOfs = m + 1; // a[base + m] < key.
+ } else {
+ offset = m; // key <= a[base + m].
}
- // so a[base + offset - 1] < key <= a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
- return offset;
}
+ // so a[base + offset - 1] < key <= a[base + offset].
+ assert(lastOfs == offset);
+ assert(0 <= offset && offset <= length);
+ return offset;
+}
- // Exactly like GallopLeft, except that if key already exists in
- // [base, base + length), finds the position immediately to the right of
- // the rightmost equal value.
- //
- // The return value is the int offset in 0..length such that
- //
- // array[base + offset - 1] <= key < array[base + offset]
- //
- // or kFailure on error.
- builtin GallopRight(implicit context: Context, sortState: SortState)(
- array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
- assert(length > 0 && base >= 0);
- assert(0 <= hint && hint < length);
+// Exactly like GallopLeft, except that if key already exists in
+// [base, base + length), finds the position immediately to the right of
+// the rightmost equal value.
+//
+// The return value is the int offset in 0..length such that
+//
+// array[base + offset - 1] <= key < array[base + offset]
+//
+// or kFailure on error.
+builtin GallopRight(implicit context: Context, sortState: SortState)(
+ array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
+ assert(length > 0 && base >= 0);
+ assert(0 <= hint && hint < length);
- let lastOfs: Smi = 0;
- let offset: Smi = 1;
+ let lastOfs: Smi = 0;
+ let offset: Smi = 1;
- const baseHintElement = UnsafeCast<JSAny>(array.objects[base + hint]);
- let order = sortState.Compare(key, baseHintElement);
+ const baseHintElement = UnsafeCast<JSAny>(array.objects[base + hint]);
+ let order = sortState.Compare(key, baseHintElement);
- if (order < 0) {
- // key < a[base + hint]: gallop left, until
- // a[base + hint - offset] <= key < a[base + hint - lastOfs].
+ if (order < 0) {
+ // key < a[base + hint]: gallop left, until
+ // a[base + hint - offset] <= key < a[base + hint - lastOfs].
- // a[base + hint] is lowest.
- const maxOfs: Smi = hint + 1;
- while (offset < maxOfs) {
- const offsetElement =
- UnsafeCast<JSAny>(array.objects[base + hint - offset]);
- order = sortState.Compare(key, offsetElement);
+ // a[base + hint] is lowest.
+ const maxOfs: Smi = hint + 1;
+ while (offset < maxOfs) {
+ const offsetElement =
+ UnsafeCast<JSAny>(array.objects[base + hint - offset]);
+ order = sortState.Compare(key, offsetElement);
- if (order >= 0) break;
+ if (order >= 0) break;
- lastOfs = offset;
- offset = (offset << 1) + 1;
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
+ }
- if (offset > maxOfs) offset = maxOfs;
+ if (offset > maxOfs) offset = maxOfs;
- // Translate back to positive offsets relative to base.
- const tmp: Smi = lastOfs;
- lastOfs = hint - offset;
- offset = hint - tmp;
- } else {
- // a[base + hint] <= key: gallop right, until
- // a[base + hint + lastOfs] <= key < a[base + hint + offset].
+ // Translate back to positive offsets relative to base.
+ const tmp: Smi = lastOfs;
+ lastOfs = hint - offset;
+ offset = hint - tmp;
+ } else {
+ // a[base + hint] <= key: gallop right, until
+ // a[base + hint + lastOfs] <= key < a[base + hint + offset].
- // a[base + length - 1] is highest.
- const maxOfs: Smi = length - hint;
- while (offset < maxOfs) {
- const offsetElement =
- UnsafeCast<JSAny>(array.objects[base + hint + offset]);
- order = sortState.Compare(key, offsetElement);
+ // a[base + length - 1] is highest.
+ const maxOfs: Smi = length - hint;
+ while (offset < maxOfs) {
+ const offsetElement =
+ UnsafeCast<JSAny>(array.objects[base + hint + offset]);
+ order = sortState.Compare(key, offsetElement);
- // a[base + hint + ofs] <= key.
- if (order < 0) break;
+ // a[base + hint + ofs] <= key.
+ if (order < 0) break;
- lastOfs = offset;
- offset = (offset << 1) + 1;
+ lastOfs = offset;
+ offset = (offset << 1) + 1;
- // Integer overflow.
- if (offset <= 0) offset = maxOfs;
- }
+ // Integer overflow.
+ if (offset <= 0) offset = maxOfs;
+ }
- if (offset > maxOfs) offset = maxOfs;
+ if (offset > maxOfs) offset = maxOfs;
- // Translate back to positive offests relative to base.
- lastOfs = lastOfs + hint;
- offset = offset + hint;
- }
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ // Translate back to positive offests relative to base.
+ lastOfs = lastOfs + hint;
+ offset = offset + hint;
+ }
+ assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
- // Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
- // somewhere to the right of lastOfs but no farther right than ofs.
- // Do a binary search, with invariant
- // a[base + lastOfs - 1] < key <= a[base + ofs].
- lastOfs++;
- while (lastOfs < offset) {
- const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
+ // Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
+ // somewhere to the right of lastOfs but no farther right than ofs.
+ // Do a binary search, with invariant
+ // a[base + lastOfs - 1] < key <= a[base + ofs].
+ lastOfs++;
+ while (lastOfs < offset) {
+ const m: Smi = lastOfs + ((offset - lastOfs) >> 1);
- order =
- sortState.Compare(key, UnsafeCast<JSAny>(array.objects[base + m]));
+ order = sortState.Compare(key, UnsafeCast<JSAny>(array.objects[base + m]));
- if (order < 0) {
- offset = m; // key < a[base + m].
- } else {
- lastOfs = m + 1; // a[base + m] <= key.
- }
+ if (order < 0) {
+ offset = m; // key < a[base + m].
+ } else {
+ lastOfs = m + 1; // a[base + m] <= key.
}
- // so a[base + offset - 1] <= key < a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
- return offset;
}
+ // so a[base + offset - 1] <= key < a[base + offset].
+ assert(lastOfs == offset);
+ assert(0 <= offset && offset <= length);
+ return offset;
+}
- // Merge the lengthA elements starting at baseA with the lengthB elements
- // starting at baseB in a stable way, in-place. lengthA and lengthB must
- // be > 0, and baseA + lengthA == baseB. Must also have that
- // array[baseB] < array[baseA],
- // that array[baseA + lengthA - 1] belongs at the end of the merge,
- // and should have lengthA <= lengthB.
- transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
- baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
- assert(0 < lengthAArg && 0 < lengthBArg);
- assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthAArg == baseB);
+// Merge the lengthA elements starting at baseA with the lengthB elements
+// starting at baseB in a stable way, in-place. lengthA and lengthB must
+// be > 0, and baseA + lengthA == baseB. Must also have that
+// array[baseB] < array[baseA],
+// that array[baseA + lengthA - 1] belongs at the end of the merge,
+// and should have lengthA <= lengthB.
+transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
+ assert(0 < lengthAArg && 0 < lengthBArg);
+ assert(0 <= baseA && 0 < baseB);
+ assert(baseA + lengthAArg == baseB);
+
+ let lengthA: Smi = lengthAArg;
+ let lengthB: Smi = lengthBArg;
+
+ const workArray = sortState.workArray;
+ const tempArray: FixedArray = GetTempArray(sortState, lengthA);
+ Copy(workArray, baseA, tempArray, 0, lengthA);
+
+ let dest: Smi = baseA;
+ let cursorTemp: Smi = 0;
+ let cursorB: Smi = baseB;
+
+ workArray.objects[dest++] = workArray.objects[cursorB++];
+
+ try {
+ if (--lengthB == 0) goto Succeed;
+ if (lengthA == 1) goto CopyB;
+
+ let minGallop: Smi = sortState.minGallop;
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ let nofWinsA: Smi = 0; // # of times A won in a row.
+ let nofWinsB: Smi = 0; // # of times B won in a row.
+
+ // Do the straightforward thing until (if ever) one run appears to
+ // win consistently.
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ assert(lengthA > 1 && lengthB > 0);
- let lengthA: Smi = lengthAArg;
- let lengthB: Smi = lengthBArg;
+ const order = sortState.Compare(
+ UnsafeCast<JSAny>(workArray.objects[cursorB]),
+ UnsafeCast<JSAny>(tempArray.objects[cursorTemp]));
- const workArray = sortState.workArray;
- const tempArray: FixedArray = GetTempArray(sortState, lengthA);
- Copy(workArray, baseA, tempArray, 0, lengthA);
+ if (order < 0) {
+ workArray.objects[dest++] = workArray.objects[cursorB++];
- let dest: Smi = baseA;
- let cursorTemp: Smi = 0;
- let cursorB: Smi = baseB;
+ ++nofWinsB;
+ --lengthB;
+ nofWinsA = 0;
- workArray.objects[dest++] = workArray.objects[cursorB++];
+ if (lengthB == 0) goto Succeed;
+ if (nofWinsB >= minGallop) break;
+ } else {
+ workArray.objects[dest++] = tempArray.objects[cursorTemp++];
- try {
- if (--lengthB == 0) goto Succeed;
- if (lengthA == 1) goto CopyB;
+ ++nofWinsA;
+ --lengthA;
+ nofWinsB = 0;
- let minGallop: Smi = sortState.minGallop;
- // TODO(szuend): Replace with something that does not have a runtime
- // overhead as soon as its available in Torque.
- while (Int32TrueConstant()) {
- let nofWinsA: Smi = 0; // # of times A won in a row.
- let nofWinsB: Smi = 0; // # of times B won in a row.
-
- // Do the straightforward thing until (if ever) one run appears to
- // win consistently.
- // TODO(szuend): Replace with something that does not have a runtime
- // overhead as soon as its available in Torque.
- while (Int32TrueConstant()) {
- assert(lengthA > 1 && lengthB > 0);
-
- const order = sortState.Compare(
- UnsafeCast<JSAny>(workArray.objects[cursorB]),
- UnsafeCast<JSAny>(tempArray.objects[cursorTemp]));
-
- if (order < 0) {
- workArray.objects[dest++] = workArray.objects[cursorB++];
-
- ++nofWinsB;
- --lengthB;
- nofWinsA = 0;
-
- if (lengthB == 0) goto Succeed;
- if (nofWinsB >= minGallop) break;
- } else {
- workArray.objects[dest++] = tempArray.objects[cursorTemp++];
-
- ++nofWinsA;
- --lengthA;
- nofWinsB = 0;
-
- if (lengthA == 1) goto CopyB;
- if (nofWinsA >= minGallop) break;
- }
+ if (lengthA == 1) goto CopyB;
+ if (nofWinsA >= minGallop) break;
}
+ }
- // One run is winning so consistently that galloping may be a huge
- // win. So try that, and continue galloping until (if ever) neither
- // run appears to be winning consistently anymore.
- ++minGallop;
- let firstIteration: bool = true;
- while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
- firstIteration) {
- firstIteration = false;
- assert(lengthA > 1 && lengthB > 0);
-
- minGallop = SmiMax(1, minGallop - 1);
- sortState.minGallop = minGallop;
-
- nofWinsA = GallopRight(
- tempArray, UnsafeCast<JSAny>(workArray.objects[cursorB]),
- cursorTemp, lengthA, 0);
- assert(nofWinsA >= 0);
-
- if (nofWinsA > 0) {
- Copy(tempArray, cursorTemp, workArray, dest, nofWinsA);
- dest = dest + nofWinsA;
- cursorTemp = cursorTemp + nofWinsA;
- lengthA = lengthA - nofWinsA;
-
- if (lengthA == 1) goto CopyB;
-
- // lengthA == 0 is impossible now if the comparison function is
- // consistent, but we can't assume that it is.
- if (lengthA == 0) goto Succeed;
- }
- workArray.objects[dest++] = workArray.objects[cursorB++];
- if (--lengthB == 0) goto Succeed;
+ // One run is winning so consistently that galloping may be a huge
+ // win. So try that, and continue galloping until (if ever) neither
+ // run appears to be winning consistently anymore.
+ ++minGallop;
+ let firstIteration: bool = true;
+ while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
+ firstIteration) {
+ firstIteration = false;
+ assert(lengthA > 1 && lengthB > 0);
+
+ minGallop = SmiMax(1, minGallop - 1);
+ sortState.minGallop = minGallop;
- nofWinsB = GallopLeft(
- workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
- cursorB, lengthB, 0);
- assert(nofWinsB >= 0);
- if (nofWinsB > 0) {
- Copy(workArray, cursorB, workArray, dest, nofWinsB);
+ nofWinsA = GallopRight(
+ tempArray, UnsafeCast<JSAny>(workArray.objects[cursorB]),
+ cursorTemp, lengthA, 0);
+ assert(nofWinsA >= 0);
- dest = dest + nofWinsB;
- cursorB = cursorB + nofWinsB;
- lengthB = lengthB - nofWinsB;
+ if (nofWinsA > 0) {
+ Copy(tempArray, cursorTemp, workArray, dest, nofWinsA);
+ dest = dest + nofWinsA;
+ cursorTemp = cursorTemp + nofWinsA;
+ lengthA = lengthA - nofWinsA;
- if (lengthB == 0) goto Succeed;
- }
- workArray.objects[dest++] = tempArray.objects[cursorTemp++];
- if (--lengthA == 1) goto CopyB;
+ if (lengthA == 1) goto CopyB;
+
+ // lengthA == 0 is impossible now if the comparison function is
+ // consistent, but we can't assume that it is.
+ if (lengthA == 0) goto Succeed;
}
- ++minGallop; // Penalize it for leaving galloping mode
- sortState.minGallop = minGallop;
- }
- }
- label Succeed {
- if (lengthA > 0) {
- Copy(tempArray, cursorTemp, workArray, dest, lengthA);
+ workArray.objects[dest++] = workArray.objects[cursorB++];
+ if (--lengthB == 0) goto Succeed;
+
+ nofWinsB = GallopLeft(
+ workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
+ cursorB, lengthB, 0);
+ assert(nofWinsB >= 0);
+ if (nofWinsB > 0) {
+ Copy(workArray, cursorB, workArray, dest, nofWinsB);
+
+ dest = dest + nofWinsB;
+ cursorB = cursorB + nofWinsB;
+ lengthB = lengthB - nofWinsB;
+
+ if (lengthB == 0) goto Succeed;
+ }
+ workArray.objects[dest++] = tempArray.objects[cursorTemp++];
+ if (--lengthA == 1) goto CopyB;
}
+ ++minGallop; // Penalize it for leaving galloping mode
+ sortState.minGallop = minGallop;
}
- label CopyB {
- assert(lengthA == 1 && lengthB > 0);
- // The last element of run A belongs at the end of the merge.
- Copy(workArray, cursorB, workArray, dest, lengthB);
- workArray.objects[dest + lengthB] = tempArray.objects[cursorTemp];
+ } label Succeed {
+ if (lengthA > 0) {
+ Copy(tempArray, cursorTemp, workArray, dest, lengthA);
}
+ } label CopyB {
+ assert(lengthA == 1 && lengthB > 0);
+ // The last element of run A belongs at the end of the merge.
+ Copy(workArray, cursorB, workArray, dest, lengthB);
+ workArray.objects[dest + lengthB] = tempArray.objects[cursorTemp];
}
+}
- // Merge the lengthA elements starting at baseA with the lengthB elements
- // starting at baseB in a stable way, in-place. lengthA and lengthB must
- // be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
- // end of the merge and should have lengthA >= lengthB.
- transitioning macro MergeHigh(
- implicit context: Context,
- sortState:
- SortState)(baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
- assert(0 < lengthAArg && 0 < lengthBArg);
- assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthAArg == baseB);
-
- let lengthA: Smi = lengthAArg;
- let lengthB: Smi = lengthBArg;
-
- const workArray = sortState.workArray;
- const tempArray: FixedArray = GetTempArray(sortState, lengthB);
- Copy(workArray, baseB, tempArray, 0, lengthB);
-
- // MergeHigh merges the two runs backwards.
- let dest: Smi = baseB + lengthB - 1;
- let cursorTemp: Smi = lengthB - 1;
- let cursorA: Smi = baseA + lengthA - 1;
-
- workArray.objects[dest--] = workArray.objects[cursorA--];
-
- try {
- if (--lengthA == 0) goto Succeed;
- if (lengthB == 1) goto CopyA;
-
- let minGallop: Smi = sortState.minGallop;
+// Merge the lengthA elements starting at baseA with the lengthB elements
+// starting at baseB in a stable way, in-place. lengthA and lengthB must
+// be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
+// end of the merge and should have lengthA >= lengthB.
+transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
+ assert(0 < lengthAArg && 0 < lengthBArg);
+ assert(0 <= baseA && 0 < baseB);
+ assert(baseA + lengthAArg == baseB);
+
+ let lengthA: Smi = lengthAArg;
+ let lengthB: Smi = lengthBArg;
+
+ const workArray = sortState.workArray;
+ const tempArray: FixedArray = GetTempArray(sortState, lengthB);
+ Copy(workArray, baseB, tempArray, 0, lengthB);
+
+ // MergeHigh merges the two runs backwards.
+ let dest: Smi = baseB + lengthB - 1;
+ let cursorTemp: Smi = lengthB - 1;
+ let cursorA: Smi = baseA + lengthA - 1;
+
+ workArray.objects[dest--] = workArray.objects[cursorA--];
+
+ try {
+ if (--lengthA == 0) goto Succeed;
+ if (lengthB == 1) goto CopyA;
+
+ let minGallop: Smi = sortState.minGallop;
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ let nofWinsA: Smi = 0; // # of times A won in a row.
+ let nofWinsB: Smi = 0; // # of times B won in a row.
+
+ // Do the straightforward thing until (if ever) one run appears to
+ // win consistently.
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- let nofWinsA: Smi = 0; // # of times A won in a row.
- let nofWinsB: Smi = 0; // # of times B won in a row.
-
- // Do the straightforward thing until (if ever) one run appears to
- // win consistently.
- // TODO(szuend): Replace with something that does not have a runtime
- // overhead as soon as its available in Torque.
- while (Int32TrueConstant()) {
- assert(lengthA > 0 && lengthB > 1);
-
- const order = sortState.Compare(
- UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
- UnsafeCast<JSAny>(workArray.objects[cursorA]));
-
- if (order < 0) {
- workArray.objects[dest--] = workArray.objects[cursorA--];
-
- ++nofWinsA;
- --lengthA;
- nofWinsB = 0;
-
- if (lengthA == 0) goto Succeed;
- if (nofWinsA >= minGallop) break;
- } else {
- workArray.objects[dest--] = tempArray.objects[cursorTemp--];
-
- ++nofWinsB;
- --lengthB;
- nofWinsA = 0;
-
- if (lengthB == 1) goto CopyA;
- if (nofWinsB >= minGallop) break;
- }
- }
+ assert(lengthA > 0 && lengthB > 1);
- // One run is winning so consistently that galloping may be a huge
- // win. So try that, and continue galloping until (if ever) neither
- // run appears to be winning consistently anymore.
- ++minGallop;
- let firstIteration: bool = true;
- while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
- firstIteration) {
- firstIteration = false;
-
- assert(lengthA > 0 && lengthB > 1);
-
- minGallop = SmiMax(1, minGallop - 1);
- sortState.minGallop = minGallop;
-
- let k: Smi = GallopRight(
- workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
- baseA, lengthA, lengthA - 1);
- assert(k >= 0);
- nofWinsA = lengthA - k;
-
- if (nofWinsA > 0) {
- dest = dest - nofWinsA;
- cursorA = cursorA - nofWinsA;
- Copy(workArray, cursorA + 1, workArray, dest + 1, nofWinsA);
-
- lengthA = lengthA - nofWinsA;
- if (lengthA == 0) goto Succeed;
- }
- workArray.objects[dest--] = tempArray.objects[cursorTemp--];
- if (--lengthB == 1) goto CopyA;
-
- k = GallopLeft(
- tempArray, UnsafeCast<JSAny>(workArray.objects[cursorA]), 0,
- lengthB, lengthB - 1);
- assert(k >= 0);
- nofWinsB = lengthB - k;
-
- if (nofWinsB > 0) {
- dest = dest - nofWinsB;
- cursorTemp = cursorTemp - nofWinsB;
- Copy(tempArray, cursorTemp + 1, workArray, dest + 1, nofWinsB);
-
- lengthB = lengthB - nofWinsB;
- if (lengthB == 1) goto CopyA;
-
- // lengthB == 0 is impossible now if the comparison function is
- // consistent, but we can't assume that it is.
- if (lengthB == 0) goto Succeed;
- }
+ const order = sortState.Compare(
+ UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
+ UnsafeCast<JSAny>(workArray.objects[cursorA]));
+
+ if (order < 0) {
workArray.objects[dest--] = workArray.objects[cursorA--];
- if (--lengthA == 0) goto Succeed;
+
+ ++nofWinsA;
+ --lengthA;
+ nofWinsB = 0;
+
+ if (lengthA == 0) goto Succeed;
+ if (nofWinsA >= minGallop) break;
+ } else {
+ workArray.objects[dest--] = tempArray.objects[cursorTemp--];
+
+ ++nofWinsB;
+ --lengthB;
+ nofWinsA = 0;
+
+ if (lengthB == 1) goto CopyA;
+ if (nofWinsB >= minGallop) break;
}
- ++minGallop;
- sortState.minGallop = minGallop;
- }
- }
- label Succeed {
- if (lengthB > 0) {
- assert(lengthA == 0);
- Copy(tempArray, 0, workArray, dest - (lengthB - 1), lengthB);
}
- }
- label CopyA {
- assert(lengthB == 1 && lengthA > 0);
-
- // The first element of run B belongs at the front of the merge.
- dest = dest - lengthA;
- cursorA = cursorA - lengthA;
- Copy(workArray, cursorA + 1, workArray, dest + 1, lengthA);
- workArray.objects[dest] = tempArray.objects[cursorTemp];
- }
- }
- // Compute a good value for the minimum run length; natural runs shorter
- // than this are boosted artificially via binary insertion sort.
- //
- // If n < 64, return n (it's too small to bother with fancy stuff).
- // Else if n is an exact power of 2, return 32.
- // Else return an int k, 32 <= k <= 64, such that n/k is close to, but
- // strictly less than, an exact power of 2.
- //
- // See listsort.txt for more info.
- macro ComputeMinRunLength(nArg: Smi): Smi {
- let n: Smi = nArg;
- let r: Smi = 0; // Becomes 1 if any 1 bits are shifted off.
-
- assert(n >= 0);
- while (n >= 64) {
- r = r | (n & 1);
- n = n >> 1;
- }
+ // One run is winning so consistently that galloping may be a huge
+ // win. So try that, and continue galloping until (if ever) neither
+ // run appears to be winning consistently anymore.
+ ++minGallop;
+ let firstIteration: bool = true;
+ while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
+ firstIteration) {
+ firstIteration = false;
- const minRunLength: Smi = n + r;
- assert(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
- return minRunLength;
- }
+ assert(lengthA > 0 && lengthB > 1);
- // Returns true iff run_length(n - 2) > run_length(n - 1) + run_length(n).
- macro RunInvariantEstablished(implicit context: Context)(
- pendingRuns: FixedArray, n: Smi): bool {
- if (n < 2) return true;
+ minGallop = SmiMax(1, minGallop - 1);
+ sortState.minGallop = minGallop;
- const runLengthN: Smi = GetPendingRunLength(pendingRuns, n);
- const runLengthNM: Smi = GetPendingRunLength(pendingRuns, n - 1);
- const runLengthNMM: Smi = GetPendingRunLength(pendingRuns, n - 2);
+ let k: Smi = GallopRight(
+ workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]), baseA,
+ lengthA, lengthA - 1);
+ assert(k >= 0);
+ nofWinsA = lengthA - k;
- return runLengthNMM > runLengthNM + runLengthN;
- }
+ if (nofWinsA > 0) {
+ dest = dest - nofWinsA;
+ cursorA = cursorA - nofWinsA;
+ Copy(workArray, cursorA + 1, workArray, dest + 1, nofWinsA);
- // Examines the stack of runs waiting to be merged, merging adjacent runs
- // until the stack invariants are re-established:
- //
- // 1. run_length(i - 3) > run_length(i - 2) + run_length(i - 1)
- // 2. run_length(i - 2) > run_length(i - 1)
- //
- // TODO(szuend): Remove unnecessary loads. This macro was refactored to
- // improve readability, introducing unnecessary loads in the
- // process. Determine if all these extra loads are ok.
- transitioning macro MergeCollapse(context: Context, sortState: SortState) {
- const pendingRuns: FixedArray = sortState.pendingRuns;
-
- // Reload the stack size because MergeAt might change it.
- while (GetPendingRunsSize(sortState) > 1) {
- let n: Smi = GetPendingRunsSize(sortState) - 2;
-
- if (!RunInvariantEstablished(pendingRuns, n + 1) ||
- !RunInvariantEstablished(pendingRuns, n)) {
- if (GetPendingRunLength(pendingRuns, n - 1) <
- GetPendingRunLength(pendingRuns, n + 1)) {
- --n;
+ lengthA = lengthA - nofWinsA;
+ if (lengthA == 0) goto Succeed;
}
-
- MergeAt(n);
- } else if (
- GetPendingRunLength(pendingRuns, n) <=
- GetPendingRunLength(pendingRuns, n + 1)) {
- MergeAt(n);
- } else {
- break;
+ workArray.objects[dest--] = tempArray.objects[cursorTemp--];
+ if (--lengthB == 1) goto CopyA;
+
+ k = GallopLeft(
+ tempArray, UnsafeCast<JSAny>(workArray.objects[cursorA]), 0,
+ lengthB, lengthB - 1);
+ assert(k >= 0);
+ nofWinsB = lengthB - k;
+
+ if (nofWinsB > 0) {
+ dest = dest - nofWinsB;
+ cursorTemp = cursorTemp - nofWinsB;
+ Copy(tempArray, cursorTemp + 1, workArray, dest + 1, nofWinsB);
+
+ lengthB = lengthB - nofWinsB;
+ if (lengthB == 1) goto CopyA;
+
+ // lengthB == 0 is impossible now if the comparison function is
+ // consistent, but we can't assume that it is.
+ if (lengthB == 0) goto Succeed;
+ }
+ workArray.objects[dest--] = workArray.objects[cursorA--];
+ if (--lengthA == 0) goto Succeed;
}
+ ++minGallop;
+ sortState.minGallop = minGallop;
+ }
+ } label Succeed {
+ if (lengthB > 0) {
+ assert(lengthA == 0);
+ Copy(tempArray, 0, workArray, dest - (lengthB - 1), lengthB);
}
+ } label CopyA {
+ assert(lengthB == 1 && lengthA > 0);
+
+ // The first element of run B belongs at the front of the merge.
+ dest = dest - lengthA;
+ cursorA = cursorA - lengthA;
+ Copy(workArray, cursorA + 1, workArray, dest + 1, lengthA);
+ workArray.objects[dest] = tempArray.objects[cursorTemp];
+ }
+}
+
+// Compute a good value for the minimum run length; natural runs shorter
+// than this are boosted artificially via binary insertion sort.
+//
+// If n < 64, return n (it's too small to bother with fancy stuff).
+// Else if n is an exact power of 2, return 32.
+// Else return an int k, 32 <= k <= 64, such that n/k is close to, but
+// strictly less than, an exact power of 2.
+//
+// See listsort.txt for more info.
+macro ComputeMinRunLength(nArg: Smi): Smi {
+ let n: Smi = nArg;
+ let r: Smi = 0; // Becomes 1 if any 1 bits are shifted off.
+
+ assert(n >= 0);
+ while (n >= 64) {
+ r = r | (n & 1);
+ n = n >> 1;
}
- // Regardless of invariants, merge all runs on the stack until only one
- // remains. This is used at the end of the mergesort.
- transitioning macro
- MergeForceCollapse(context: Context, sortState: SortState) {
- const pendingRuns: FixedArray = sortState.pendingRuns;
+ const minRunLength: Smi = n + r;
+ assert(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
+ return minRunLength;
+}
+
+// Returns true iff run_length(n - 2) > run_length(n - 1) + run_length(n).
+macro RunInvariantEstablished(implicit context: Context)(
+ pendingRuns: FixedArray, n: Smi): bool {
+ if (n < 2) return true;
+
+ const runLengthN: Smi = GetPendingRunLength(pendingRuns, n);
+ const runLengthNM: Smi = GetPendingRunLength(pendingRuns, n - 1);
+ const runLengthNMM: Smi = GetPendingRunLength(pendingRuns, n - 2);
- // Reload the stack size becuase MergeAt might change it.
- while (GetPendingRunsSize(sortState) > 1) {
- let n: Smi = GetPendingRunsSize(sortState) - 2;
+ return runLengthNMM > runLengthNM + runLengthN;
+}
- if (n > 0 &&
- GetPendingRunLength(pendingRuns, n - 1) <
- GetPendingRunLength(pendingRuns, n + 1)) {
+// Examines the stack of runs waiting to be merged, merging adjacent runs
+// until the stack invariants are re-established:
+//
+// 1. run_length(i - 3) > run_length(i - 2) + run_length(i - 1)
+// 2. run_length(i - 2) > run_length(i - 1)
+//
+// TODO(szuend): Remove unnecessary loads. This macro was refactored to
+// improve readability, introducing unnecessary loads in the
+// process. Determine if all these extra loads are ok.
+transitioning macro MergeCollapse(context: Context, sortState: SortState) {
+ const pendingRuns: FixedArray = sortState.pendingRuns;
+
+ // Reload the stack size because MergeAt might change it.
+ while (GetPendingRunsSize(sortState) > 1) {
+ let n: Smi = GetPendingRunsSize(sortState) - 2;
+
+ if (!RunInvariantEstablished(pendingRuns, n + 1) ||
+ !RunInvariantEstablished(pendingRuns, n)) {
+ if (GetPendingRunLength(pendingRuns, n - 1) <
+ GetPendingRunLength(pendingRuns, n + 1)) {
--n;
}
+
MergeAt(n);
+ } else if (
+ GetPendingRunLength(pendingRuns, n) <=
+ GetPendingRunLength(pendingRuns, n + 1)) {
+ MergeAt(n);
+ } else {
+ break;
}
}
+}
- transitioning macro
- ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi) {
- if (length < 2) return;
- let remaining: Smi = length;
-
- // March over the array once, left to right, finding natural runs,
- // and extending short natural runs to minrun elements.
- let low: Smi = 0;
- const minRunLength: Smi = ComputeMinRunLength(remaining);
- while (remaining != 0) {
- let currentRunLength: Smi = CountAndMakeRun(low, low + remaining);
-
- // If the run is short, extend it to min(minRunLength, remaining).
- if (currentRunLength < minRunLength) {
- const forcedRunLength: Smi = SmiMin(minRunLength, remaining);
- BinaryInsertionSort(low, low + currentRunLength, low + forcedRunLength);
- currentRunLength = forcedRunLength;
- }
-
- // Push run onto pending-runs stack, and maybe merge.
- PushRun(sortState, low, currentRunLength);
+// Regardless of invariants, merge all runs on the stack until only one
+// remains. This is used at the end of the mergesort.
+transitioning macro
+MergeForceCollapse(context: Context, sortState: SortState) {
+ const pendingRuns: FixedArray = sortState.pendingRuns;
- MergeCollapse(context, sortState);
+ // Reload the stack size becuase MergeAt might change it.
+ while (GetPendingRunsSize(sortState) > 1) {
+ let n: Smi = GetPendingRunsSize(sortState) - 2;
- // Advance to find next run.
- low = low + currentRunLength;
- remaining = remaining - currentRunLength;
+ if (n > 0 &&
+ GetPendingRunLength(pendingRuns, n - 1) <
+ GetPendingRunLength(pendingRuns, n + 1)) {
+ --n;
}
-
- MergeForceCollapse(context, sortState);
- assert(GetPendingRunsSize(sortState) == 1);
- assert(GetPendingRunLength(sortState.pendingRuns, 0) == length);
+ MergeAt(n);
}
+}
- transitioning macro
- CompactReceiverElementsIntoWorkArray(
- implicit context: Context, sortState: SortState)(): Smi {
- let growableWorkArray = growable_fixed_array::GrowableFixedArray{
- array: sortState.workArray,
- capacity: Convert<intptr>(sortState.workArray.length),
- length: 0
- };
-
- const loadFn = sortState.loadFn;
-
- // TODO(szuend): Implement full range sorting, not only up to MaxSmi.
- // https://crbug.com/v8/7970.
- const receiverLength: Number = sortState.initialReceiverLength;
- assert(IsNumberNormalized(receiverLength));
-
- const sortLength: Smi = TaggedIsSmi(receiverLength) ?
- UnsafeCast<Smi>(receiverLength) :
- Convert<PositiveSmi>(kSmiMax) otherwise unreachable;
-
- // Move all non-undefined elements into {sortState.workArray}, holes
- // are ignored.
- let numberOfUndefined: Smi = 0;
- for (let i: Smi = 0; i < receiverLength; ++i) {
- const element: JSAny|TheHole = loadFn(context, sortState, i);
-
- if (element == TheHole) {
- // Do nothing for holes. The result is that elements are
- // compacted at the front of the work array.
- } else if (element == Undefined) {
- numberOfUndefined++;
- } else {
- growableWorkArray.Push(element);
- }
+transitioning macro
+ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi) {
+ if (length < 2) return;
+ let remaining: Smi = length;
+
+ // March over the array once, left to right, finding natural runs,
+ // and extending short natural runs to minrun elements.
+ let low: Smi = 0;
+ const minRunLength: Smi = ComputeMinRunLength(remaining);
+ while (remaining != 0) {
+ let currentRunLength: Smi = CountAndMakeRun(low, low + remaining);
+
+ // If the run is short, extend it to min(minRunLength, remaining).
+ if (currentRunLength < minRunLength) {
+ const forcedRunLength: Smi = SmiMin(minRunLength, remaining);
+ BinaryInsertionSort(low, low + currentRunLength, low + forcedRunLength);
+ currentRunLength = forcedRunLength;
}
- // Reset the workArray on the frameState, as it may have grown.
- sortState.workArray = growableWorkArray.array;
- sortState.sortLength = sortLength;
- sortState.numberOfUndefined = numberOfUndefined;
+ // Push run onto pending-runs stack, and maybe merge.
+ PushRun(sortState, low, currentRunLength);
+
+ MergeCollapse(context, sortState);
- return Convert<Smi>(growableWorkArray.length);
+ // Advance to find next run.
+ low = low + currentRunLength;
+ remaining = remaining - currentRunLength;
}
- transitioning macro
- CopyWorkArrayToReceiver(implicit context: Context, sortState: SortState)(
- numberOfNonUndefined: Smi) {
- const storeFn = sortState.storeFn;
- const workArray = sortState.workArray;
-
- assert(numberOfNonUndefined <= workArray.length);
- assert(
- numberOfNonUndefined + sortState.numberOfUndefined <=
- sortState.sortLength);
-
- // Writing the elements back is a 3 step process:
- // 1. Copy the sorted elements from the workarray to the receiver.
- // 2. Add {nOfUndefined} undefineds to the receiver.
- // 3. Depending on the backing store either delete properties or
- // set them to the TheHole up to {sortState.sortLength}.
- let index: Smi = 0;
- for (; index < numberOfNonUndefined; ++index) {
- storeFn(
- context, sortState, index,
- UnsafeCast<JSAny>(workArray.objects[index]));
- }
+ MergeForceCollapse(context, sortState);
+ assert(GetPendingRunsSize(sortState) == 1);
+ assert(GetPendingRunLength(sortState.pendingRuns, 0) == length);
+}
- const numberOfUndefinedEnd: Smi =
- sortState.numberOfUndefined + numberOfNonUndefined;
- for (; index < numberOfUndefinedEnd; ++index) {
- storeFn(context, sortState, index, Undefined);
+transitioning macro
+CompactReceiverElementsIntoWorkArray(
+ implicit context: Context, sortState: SortState)(): Smi {
+ let growableWorkArray = growable_fixed_array::GrowableFixedArray{
+ array: sortState.workArray,
+ capacity: Convert<intptr>(sortState.workArray.length),
+ length: 0
+ };
+
+ const loadFn = sortState.loadFn;
+
+ // TODO(szuend): Implement full range sorting, not only up to MaxSmi.
+ // https://crbug.com/v8/7970.
+ const receiverLength: Number = sortState.initialReceiverLength;
+ assert(IsNumberNormalized(receiverLength));
+
+ const sortLength: Smi = TaggedIsSmi(receiverLength) ?
+ UnsafeCast<Smi>(receiverLength) :
+ Convert<PositiveSmi>(kSmiMax) otherwise unreachable;
+
+ // Move all non-undefined elements into {sortState.workArray}, holes
+ // are ignored.
+ let numberOfUndefined: Smi = 0;
+ for (let i: Smi = 0; i < receiverLength; ++i) {
+ const element: JSAny|TheHole = loadFn(context, sortState, i);
+
+ if (element == TheHole) {
+ // Do nothing for holes. The result is that elements are
+ // compacted at the front of the work array.
+ } else if (element == Undefined) {
+ numberOfUndefined++;
+ } else {
+ growableWorkArray.Push(element);
}
+ }
- const end: Smi = sortState.sortLength;
- const deleteFn = sortState.deleteFn;
- for (; index < end; ++index) {
- deleteFn(context, sortState, index);
- }
+ // Reset the workArray on the frameState, as it may have grown.
+ sortState.workArray = growableWorkArray.array;
+ sortState.sortLength = sortLength;
+ sortState.numberOfUndefined = numberOfUndefined;
+
+ return Convert<Smi>(growableWorkArray.length);
+}
+
+transitioning macro
+CopyWorkArrayToReceiver(implicit context: Context, sortState: SortState)(
+ numberOfNonUndefined: Smi) {
+ const storeFn = sortState.storeFn;
+ const workArray = sortState.workArray;
+
+ assert(numberOfNonUndefined <= workArray.length);
+ assert(
+ numberOfNonUndefined + sortState.numberOfUndefined <=
+ sortState.sortLength);
+
+ // Writing the elements back is a 3 step process:
+ // 1. Copy the sorted elements from the workarray to the receiver.
+ // 2. Add {nOfUndefined} undefineds to the receiver.
+ // 3. Depending on the backing store either delete properties or
+ // set them to the TheHole up to {sortState.sortLength}.
+ let index: Smi = 0;
+ for (; index < numberOfNonUndefined; ++index) {
+ storeFn(
+ context, sortState, index, UnsafeCast<JSAny>(workArray.objects[index]));
}
- transitioning builtin
- ArrayTimSort(context: Context, sortState: SortState): JSAny {
- const numberOfNonUndefined: Smi = CompactReceiverElementsIntoWorkArray();
- ArrayTimSortImpl(context, sortState, numberOfNonUndefined);
+ const numberOfUndefinedEnd: Smi =
+ sortState.numberOfUndefined + numberOfNonUndefined;
+ for (; index < numberOfUndefinedEnd; ++index) {
+ storeFn(context, sortState, index, Undefined);
+ }
- try {
- // The comparison function or toString might have changed the
- // receiver, if that is the case, we switch to the slow path.
- sortState.CheckAccessor() otherwise Slow;
- }
- label Slow deferred {
- sortState.ResetToGenericAccessor();
- }
+ const end: Smi = sortState.sortLength;
+ const deleteFn = sortState.deleteFn;
+ for (; index < end; ++index) {
+ deleteFn(context, sortState, index);
+ }
+}
- CopyWorkArrayToReceiver(numberOfNonUndefined);
- return kSuccess;
+transitioning builtin
+ArrayTimSort(context: Context, sortState: SortState): JSAny {
+ const numberOfNonUndefined: Smi = CompactReceiverElementsIntoWorkArray();
+ ArrayTimSortImpl(context, sortState, numberOfNonUndefined);
+
+ try {
+ // The comparison function or toString might have changed the
+ // receiver, if that is the case, we switch to the slow path.
+ sortState.CheckAccessor() otherwise Slow;
+ } label Slow deferred {
+ sortState.ResetToGenericAccessor();
}
- // https://tc39.github.io/ecma262/#sec-array.prototype.sort
- transitioning javascript builtin
- ArrayPrototypeSort(js-implicit context: NativeContext, receiver: JSAny)(
- ...arguments): JSAny {
- // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
- // throw a TypeError exception.
- const comparefnObj: JSAny = arguments[0];
- const comparefn = Cast<(Undefined | Callable)>(comparefnObj) otherwise
- ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj);
+ CopyWorkArrayToReceiver(numberOfNonUndefined);
+ return kSuccess;
+}
- // 2. Let obj be ? ToObject(this value).
- const obj: JSReceiver = ToObject(context, receiver);
+// https://tc39.github.io/ecma262/#sec-array.prototype.sort
+transitioning javascript builtin
+ArrayPrototypeSort(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
+ // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
+ // throw a TypeError exception.
+ const comparefnObj: JSAny = arguments[0];
+ const comparefn = Cast<(Undefined | Callable)>(comparefnObj) otherwise
+ ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj);
- // 3. Let len be ? ToLength(? Get(obj, "length")).
- const len: Number = GetLengthProperty(obj);
+ // 2. Let obj be ? ToObject(this value).
+ const obj: JSReceiver = ToObject(context, receiver);
- if (len < 2) return receiver;
+ // 3. Let len be ? ToLength(? Get(obj, "length")).
+ const len: Number = GetLengthProperty(obj);
- const sortState: SortState = NewSortState(obj, comparefn, len);
- ArrayTimSort(context, sortState);
+ if (len < 2) return receiver;
- return receiver;
- }
+ const sortState: SortState = NewSortState(obj, comparefn, len);
+ ArrayTimSort(context, sortState);
+
+ return receiver;
+}
}
diff --git a/deps/v8/third_party/zlib/BUILD.gn b/deps/v8/third_party/zlib/BUILD.gn
index 1f572378e0..f8e6b4daf2 100644
--- a/deps/v8/third_party/zlib/BUILD.gn
+++ b/deps/v8/third_party/zlib/BUILD.gn
@@ -4,6 +4,10 @@
import("//build/config/compiler/compiler.gni")
+if (build_with_chromium) {
+ import("//testing/test.gni")
+}
+
if (current_cpu == "arm" || current_cpu == "arm64") {
import("//build/config/arm.gni")
}
@@ -37,7 +41,9 @@ config("zlib_adler32_simd_config") {
} else {
defines += [ "X86_NOT_WINDOWS" ]
}
- } else if (use_arm_neon_optimizations) {
+ }
+
+ if (use_arm_neon_optimizations) {
defines = [ "ADLER32_SIMD_NEON" ]
}
}
@@ -61,6 +67,7 @@ source_set("zlib_adler32_simd") {
"adler32_simd.c",
"adler32_simd.h",
]
+
if (!is_debug) {
# Use optimize_speed (-O3) to output the _smallest_ code.
configs -= [ "//build/config/compiler:default_optimization" ]
@@ -134,6 +141,7 @@ config("zlib_inflate_chunk_simd_config") {
if (use_arm_neon_optimizations) {
defines = [ "INFLATE_CHUNK_SIMD_NEON" ]
+
if (current_cpu == "arm64") {
defines += [ "INFLATE_CHUNK_READ_64LE" ]
}
@@ -161,11 +169,12 @@ source_set("zlib_inflate_chunk_simd") {
}
}
+ configs += [ ":zlib_internal_config" ]
+
+ # Needed for MSVC, which is still supported by V8 and PDFium. zlib uses K&R C
+ # style function declarations, which triggers warning C4131.
configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [
- ":zlib_internal_config",
- "//build/config/compiler:no_chromium_code",
- ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
public_configs = [ ":zlib_inflate_chunk_simd_config" ]
}
@@ -198,6 +207,15 @@ source_set("zlib_crc32_simd") {
public_configs = [ ":zlib_crc32_simd_config" ]
}
+config("zlib_x86_simd_config") {
+ if (use_x86_x64_optimizations) {
+ defines = [
+ "CRC32_SIMD_SSE42_PCLMUL",
+ "DEFLATE_FILL_WINDOW_SSE2",
+ ]
+ }
+}
+
source_set("zlib_x86_simd") {
visibility = [ ":*" ]
@@ -215,11 +233,9 @@ source_set("zlib_x86_simd") {
}
}
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [
- ":zlib_internal_config",
- "//build/config/compiler:no_chromium_code",
- ]
+ configs += [ ":zlib_internal_config" ]
+
+ public_configs = [ ":zlib_x86_simd_config" ]
}
config("zlib_warnings") {
@@ -268,6 +284,7 @@ component("zlib") {
defines = []
deps = []
+
if (!use_x86_x64_optimizations && !use_arm_neon_optimizations) {
# Apparently android_cronet bot builds with NEON disabled and
# we also should disable optimizations for iOS@x86 (a.k.a. simulator).
@@ -277,8 +294,8 @@ component("zlib") {
if (is_ios) {
# iOS@ARM is a special case where we always have NEON but don't check
# for crypto extensions.
- # TODO(cavalcantii): verify what is the current state of CPU features shipped
- # on latest iOS devices.
+ # TODO(cavalcantii): verify what is the current state of CPU features
+ # shipped on latest iOS devices.
defines += [ "ARM_OS_IOS" ]
}
@@ -298,6 +315,8 @@ component("zlib") {
sources += [ "inflate.c" ]
}
+ deps += [ ":zlib_x86_simd" ]
+
if (is_android) {
import("//build/config/android/config.gni")
if (defined(android_ndk_root) && android_ndk_root != "") {
@@ -308,17 +327,17 @@ component("zlib") {
}
configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+
+ public_configs = [ ":zlib_config" ]
+
configs += [
":zlib_internal_config",
- "//build/config/compiler:no_chromium_code",
# Must be after no_chromium_code for warning flags to be ordered correctly.
":zlib_warnings",
]
- public_configs = [ ":zlib_config" ]
-
- deps += [ ":zlib_x86_simd" ]
allow_circular_includes_from = deps
}
@@ -359,14 +378,14 @@ static_library("minizip") {
deps = [ ":zlib" ]
configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [
- "//build/config/compiler:no_chromium_code",
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ public_configs = [ ":zlib_config" ]
+
+ configs += [
# Must be after no_chromium_code for warning flags to be ordered correctly.
":minizip_warnings",
]
-
- public_configs = [ ":zlib_config" ]
}
executable("zlib_bench") {
@@ -379,8 +398,28 @@ executable("zlib_bench") {
configs += [ "//build/config/compiler:optimize_speed" ]
}
+ deps = [ ":zlib" ]
+
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
+}
- deps = [ ":zlib" ]
+if (build_with_chromium) {
+ test("zlib_unittests") {
+ testonly = true
+
+ sources = [
+ "contrib/tests/infcover.cc",
+ "contrib/tests/infcover.h",
+ "contrib/tests/utils_unittest.cc",
+ "google/compression_utils_portable.cc",
+ "google/compression_utils_portable.h",
+ ]
+
+ deps = [
+ ":zlib",
+ "//testing/gtest",
+ "//testing/gtest:gtest_main",
+ ]
+ }
}
diff --git a/deps/v8/third_party/zlib/DEPS b/deps/v8/third_party/zlib/DEPS
new file mode 100644
index 0000000000..b6dcfc6bc1
--- /dev/null
+++ b/deps/v8/third_party/zlib/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+testing/gtest",
+] \ No newline at end of file
diff --git a/deps/v8/third_party/zlib/README.chromium b/deps/v8/third_party/zlib/README.chromium
index 3d90f79be2..c3c1ef69ad 100644
--- a/deps/v8/third_party/zlib/README.chromium
+++ b/deps/v8/third_party/zlib/README.chromium
@@ -2,6 +2,7 @@ Name: zlib
Short Name: zlib
URL: http://zlib.net/
Version: 1.2.11
+CPEPrefix: cpe:/a:zlib:zlib:1.2.11
Security Critical: yes
License: Custom license
License File: LICENSE
diff --git a/deps/v8/third_party/zlib/chromeconf.h b/deps/v8/third_party/zlib/chromeconf.h
index 666093d696..5ecf29edbf 100644
--- a/deps/v8/third_party/zlib/chromeconf.h
+++ b/deps/v8/third_party/zlib/chromeconf.h
@@ -192,4 +192,8 @@
#define arm_check_features Cr_z_arm_check_features
#define armv8_crc32_little Cr_z_armv8_crc32_little
+/* Symbols added by cpu_features.c */
+#define cpu_check_features Cr_z_cpu_check_features
+#define x86_cpu_enable_sse2 Cr_z_x86_cpu_enable_sse2
+
#endif /* THIRD_PARTY_ZLIB_CHROMECONF_H_ */
diff --git a/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h b/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h
index 1826601b7f..d3bc33c5ab 100644
--- a/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h
+++ b/deps/v8/third_party/zlib/contrib/optimizations/insert_string.h
@@ -4,45 +4,47 @@
* Use of this source code is governed by a BSD-style license that can be
* found in the Chromium source repository LICENSE file.
*/
-#ifdef _MSC_VER
+
+#if defined(_MSC_VER)
#define INLINE __inline
#else
#define INLINE inline
#endif
#include "cpu_features.h"
-/* Optimized insert_string block */
-#if defined(CRC32_SIMD_SSE42_PCLMUL) || defined(CRC32_ARMV8_CRC32)
-#define TARGET_CPU_WITH_CRC
+
// clang-format off
#if defined(CRC32_SIMD_SSE42_PCLMUL)
- /* Required to make MSVC bot build pass. */
- #include <smmintrin.h>
- #if defined(__GNUC__) || defined(__clang__)
- #undef TARGET_CPU_WITH_CRC
+ #include <smmintrin.h> /* Required to make MSVC bot build pass. */
+
+ #if defined(__clang__) || defined(__GNUC__)
#define TARGET_CPU_WITH_CRC __attribute__((target("sse4.2")))
+ #else
+ #define TARGET_CPU_WITH_CRC
#endif
#define _cpu_crc32_u32 _mm_crc32_u32
#elif defined(CRC32_ARMV8_CRC32)
#if defined(__clang__)
- #undef TARGET_CPU_WITH_CRC
#define __crc32cw __builtin_arm_crc32cw
#endif
- #define _cpu_crc32_u32 __crc32cw
-
#if defined(__aarch64__)
#define TARGET_CPU_WITH_CRC __attribute__((target("crc")))
#else // !defined(__aarch64__)
#define TARGET_CPU_WITH_CRC __attribute__((target("armv8-a,crc")))
#endif // defined(__aarch64__)
+
+ #define _cpu_crc32_u32 __crc32cw
+
#endif
// clang-format on
+
+#if defined(TARGET_CPU_WITH_CRC)
+
TARGET_CPU_WITH_CRC
-local INLINE Pos insert_string_optimized(deflate_state* const s,
- const Pos str) {
+local INLINE Pos insert_string_simd(deflate_state* const s, const Pos str) {
Pos ret;
unsigned *ip, val, h = 0;
@@ -64,7 +66,8 @@ local INLINE Pos insert_string_optimized(deflate_state* const s,
s->prev[str & s->w_mask] = ret;
return ret;
}
-#endif /* Optimized insert_string block */
+
+#endif // TARGET_CPU_WITH_CRC
/* ===========================================================================
* Update a hash value with the given input byte
@@ -99,24 +102,22 @@ local INLINE Pos insert_string_c(deflate_state* const s, const Pos str) {
}
local INLINE Pos insert_string(deflate_state* const s, const Pos str) {
-/* String dictionary insertion: faster symbol hashing has a positive impact
- * on data compression speeds (around 20% on Intel and 36% on Arm Cortex big
- * cores).
- * A misfeature is that the generated compressed output will differ from
- * vanilla zlib (even though it is still valid 'DEFLATE-d' content).
+/* insert_string_simd string dictionary insertion: this SIMD symbol hashing
+ * significantly improves data compression speed.
*
- * We offer here a way to disable the optimization if there is the expectation
- * that compressed content should match when compared to vanilla zlib.
+ * Note: the generated compressed output is a valid DEFLATE stream but will
+ * differ from vanilla zlib output ...
*/
-#if !defined(CHROMIUM_ZLIB_NO_CASTAGNOLI)
- /* TODO(cavalcantii): unify CPU features code. */
-#if defined(CRC32_ARMV8_CRC32)
- if (arm_cpu_enable_crc32)
- return insert_string_optimized(s, str);
-#elif defined(CRC32_SIMD_SSE42_PCLMUL)
+#if defined(CHROMIUM_ZLIB_NO_CASTAGNOLI)
+/* ... so this build-time option can used to disable the SIMD symbol hasher
+ * if matching vanilla zlib DEFLATE output is required.
+ */ (;) /* FALLTHOUGH */
+#elif defined(TARGET_CPU_WITH_CRC) && defined(CRC32_SIMD_SSE42_PCLMUL)
if (x86_cpu_enable_simd)
- return insert_string_optimized(s, str);
-#endif
+ return insert_string_simd(s, str);
+#elif defined(TARGET_CPU_WITH_CRC) && defined(CRC32_ARMV8_CRC32)
+ if (arm_cpu_enable_crc32)
+ return insert_string_simd(s, str);
#endif
return insert_string_c(s, str);
}
diff --git a/deps/v8/third_party/zlib/cpu_features.c b/deps/v8/third_party/zlib/cpu_features.c
index ceed98822a..0c10a0024f 100644
--- a/deps/v8/third_party/zlib/cpu_features.c
+++ b/deps/v8/third_party/zlib/cpu_features.c
@@ -19,6 +19,7 @@
*/
int ZLIB_INTERNAL arm_cpu_enable_crc32 = 0;
int ZLIB_INTERNAL arm_cpu_enable_pmull = 0;
+int ZLIB_INTERNAL x86_cpu_enable_sse2 = 0;
int ZLIB_INTERNAL x86_cpu_enable_ssse3 = 0;
int ZLIB_INTERNAL x86_cpu_enable_simd = 0;
@@ -127,16 +128,20 @@ static void _cpu_check_features(void)
int x86_cpu_has_sse42;
int x86_cpu_has_pclmulqdq;
int abcd[4];
+
#ifdef _MSC_VER
__cpuid(abcd, 1);
#else
__cpuid(1, abcd[0], abcd[1], abcd[2], abcd[3]);
#endif
+
x86_cpu_has_sse2 = abcd[3] & 0x4000000;
x86_cpu_has_ssse3 = abcd[2] & 0x000200;
x86_cpu_has_sse42 = abcd[2] & 0x100000;
x86_cpu_has_pclmulqdq = abcd[2] & 0x2;
+ x86_cpu_enable_sse2 = x86_cpu_has_sse2;
+
x86_cpu_enable_ssse3 = x86_cpu_has_ssse3;
x86_cpu_enable_simd = x86_cpu_has_sse2 &&
@@ -145,4 +150,4 @@ static void _cpu_check_features(void)
}
#endif
#endif
-#endif \ No newline at end of file
+#endif
diff --git a/deps/v8/third_party/zlib/cpu_features.h b/deps/v8/third_party/zlib/cpu_features.h
index 2a4a797342..c7b15c5597 100644
--- a/deps/v8/third_party/zlib/cpu_features.h
+++ b/deps/v8/third_party/zlib/cpu_features.h
@@ -11,6 +11,7 @@
*/
extern int arm_cpu_enable_crc32;
extern int arm_cpu_enable_pmull;
+extern int x86_cpu_enable_sse2;
extern int x86_cpu_enable_ssse3;
extern int x86_cpu_enable_simd;
diff --git a/deps/v8/third_party/zlib/crc32.c b/deps/v8/third_party/zlib/crc32.c
index bd6964701b..d4c3248d98 100644
--- a/deps/v8/third_party/zlib/crc32.c
+++ b/deps/v8/third_party/zlib/crc32.c
@@ -497,7 +497,7 @@ uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
ZLIB_INTERNAL void crc_reset(deflate_state *const s)
{
-#ifdef ADLER32_SIMD_SSSE3
+#ifdef CRC32_SIMD_SSE42_PCLMUL
if (x86_cpu_enable_simd) {
crc_fold_init(s);
return;
@@ -508,7 +508,7 @@ ZLIB_INTERNAL void crc_reset(deflate_state *const s)
ZLIB_INTERNAL void crc_finalize(deflate_state *const s)
{
-#ifdef ADLER32_SIMD_SSSE3
+#ifdef CRC32_SIMD_SSE42_PCLMUL
if (x86_cpu_enable_simd)
s->strm->adler = crc_fold_512to32(s);
#endif
@@ -516,7 +516,7 @@ ZLIB_INTERNAL void crc_finalize(deflate_state *const s)
ZLIB_INTERNAL void copy_with_crc(z_streamp strm, Bytef *dst, long size)
{
-#ifdef ADLER32_SIMD_SSSE3
+#ifdef CRC32_SIMD_SSE42_PCLMUL
if (x86_cpu_enable_simd) {
crc_fold_copy(strm->state, dst, strm->next_in, size);
return;
diff --git a/deps/v8/third_party/zlib/crc_folding.c b/deps/v8/third_party/zlib/crc_folding.c
index 48d77744aa..ee31d4918d 100644
--- a/deps/v8/third_party/zlib/crc_folding.c
+++ b/deps/v8/third_party/zlib/crc_folding.c
@@ -18,6 +18,8 @@
#include "deflate.h"
+#ifdef CRC32_SIMD_SSE42_PCLMUL
+
#include <inttypes.h>
#include <emmintrin.h>
#include <immintrin.h>
@@ -283,7 +285,7 @@ ZLIB_INTERNAL void crc_fold_copy(deflate_state *const s,
goto partial;
}
- algn_diff = 0 - (uintptr_t)src & 0xF;
+ algn_diff = (0 - (uintptr_t)src) & 0xF;
if (algn_diff) {
xmm_crc_part = _mm_loadu_si128((__m128i *)src);
_mm_storeu_si128((__m128i *)dst, xmm_crc_part);
@@ -491,3 +493,5 @@ unsigned ZLIB_INTERNAL crc_fold_512to32(deflate_state *const s)
return ~crc;
CRC_SAVE(s)
}
+
+#endif /* CRC32_SIMD_SSE42_PCLMUL */
diff --git a/deps/v8/third_party/zlib/deflate.c b/deps/v8/third_party/zlib/deflate.c
index 744d8558e2..1597196b08 100644
--- a/deps/v8/third_party/zlib/deflate.c
+++ b/deps/v8/third_party/zlib/deflate.c
@@ -1213,7 +1213,7 @@ ZLIB_INTERNAL unsigned deflate_read_buf(strm, buf, size)
#ifdef GZIP
if (strm->state->wrap == 2)
copy_with_crc(strm, buf, len);
- else
+ else
#endif
{
zmemcpy(buf, strm->next_in, len);
@@ -1521,7 +1521,7 @@ local void fill_window_c(deflate_state *s);
local void fill_window(deflate_state *s)
{
-#ifdef ADLER32_SIMD_SSSE3
+#ifdef DEFLATE_FILL_WINDOW_SSE2
if (x86_cpu_enable_simd) {
fill_window_sse(s);
return;
diff --git a/deps/v8/third_party/zlib/fill_window_sse.c b/deps/v8/third_party/zlib/fill_window_sse.c
index ed1e5d1d67..a841c99904 100644
--- a/deps/v8/third_party/zlib/fill_window_sse.c
+++ b/deps/v8/third_party/zlib/fill_window_sse.c
@@ -9,9 +9,10 @@
* For conditions of distribution and use, see copyright notice in zlib.h
*/
-#include <immintrin.h>
#include "deflate.h"
+#ifdef DEFLATE_FILL_WINDOW_SSE2
+
#define UPDATE_HASH(s,h,i) \
{\
if (s->level < 6) { \
@@ -28,6 +29,8 @@
extern int deflate_read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
+#include <immintrin.h>
+
void fill_window_sse(deflate_state *s)
{
const __m128i xmm_wsize = _mm_set1_epi16(s->w_size);
@@ -175,3 +178,5 @@ void fill_window_sse(deflate_state *s)
Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
"not enough room for search");
}
+
+#endif /* DEFLATE_FILL_WINDOW_SSE2 */
diff --git a/deps/v8/third_party/zlib/google/OWNERS b/deps/v8/third_party/zlib/google/OWNERS
index 1ca2531463..868af3cc66 100644
--- a/deps/v8/third_party/zlib/google/OWNERS
+++ b/deps/v8/third_party/zlib/google/OWNERS
@@ -3,3 +3,4 @@ satorux@chromium.org
# compression_utils*
asvitkine@chromium.org
isherman@chromium.org
+cavalcantii@chromium.org
diff --git a/deps/v8/third_party/zlib/google/compression_utils_portable.cc b/deps/v8/third_party/zlib/google/compression_utils_portable.cc
index 191e349e31..2926810446 100644
--- a/deps/v8/third_party/zlib/google/compression_utils_portable.cc
+++ b/deps/v8/third_party/zlib/google/compression_utils_portable.cc
@@ -84,7 +84,7 @@ int CompressHelper(WrapperType wrapper_type,
int compression_level,
void* (*malloc_fn)(size_t),
void (*free_fn)(void*)) {
- if (compression_level < 1 || compression_level > 9) {
+ if (compression_level < 0 || compression_level > 9) {
compression_level = Z_DEFAULT_COMPRESSION;
}
diff --git a/deps/v8/third_party/zlib/google/zip_internal.cc b/deps/v8/third_party/zlib/google/zip_internal.cc
index 314740f544..a67b6c9435 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.cc
+++ b/deps/v8/third_party/zlib/google/zip_internal.cc
@@ -5,6 +5,7 @@
#include "third_party/zlib/google/zip_internal.h"
#include <stddef.h>
+#include <string.h>
#include <algorithm>
diff --git a/deps/v8/third_party/zlib/patches/0005-infcover-gtest.patch b/deps/v8/third_party/zlib/patches/0005-infcover-gtest.patch
new file mode 100644
index 0000000000..d963a3aa07
--- /dev/null
+++ b/deps/v8/third_party/zlib/patches/0005-infcover-gtest.patch
@@ -0,0 +1,405 @@
+From 409594639f15d825202971db7a275023e05772ff Mon Sep 17 00:00:00 2001
+From: Adenilson Cavalcanti <adenilson.cavalcanti@arm.com>
+Date: Tue, 28 Apr 2020 10:48:01 -0700
+Subject: [PATCH] Local Changes: - make C tests build as C++ code so we can
+ use gtest. - use gtest EXPECT_TRUE instead of C assert. - replace C
+ streams for C++ (portability issues).
+
+---
+ test/infcover.c | 167 ++++++++++++++++++++++++++----------------------
+ 1 file changed, 90 insertions(+), 77 deletions(-)
+
+diff --git a/test/infcover.c b/test/infcover.c
+index 2be0164..a8c51c7 100644
+--- a/test/infcover.c
++++ b/test/infcover.c
+@@ -4,11 +4,12 @@
+ */
+
+ /* to use, do: ./configure --cover && make cover */
+-
++// clang-format off
++#include "infcover.h"
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+-#include <assert.h>
++
+ #include "zlib.h"
+
+ /* get definition of internal structure so we can mess with it (see pull()),
+@@ -17,8 +18,22 @@
+ #include "inftrees.h"
+ #include "inflate.h"
+
++/* XXX: use C++ streams instead of printf/fputs/etc due to portability
++ * as type sizes can vary between platforms.
++ */
++#include <iostream>
+ #define local static
+
++/* XXX: hacking C assert and plugging into GTest. */
++#include "testing/gtest/include/gtest/gtest.h"
++#if defined(assert)
++#undef assert
++#define assert EXPECT_TRUE
++#endif
++
++/* XXX: handle what is a reserved word in C++. */
++#define try try_f
++
+ /* -- memory tracking routines -- */
+
+ /*
+@@ -72,7 +87,7 @@ local void *mem_alloc(void *mem, unsigned count, unsigned size)
+ {
+ void *ptr;
+ struct mem_item *item;
+- struct mem_zone *zone = mem;
++ struct mem_zone *zone = static_cast<struct mem_zone *>(mem);
+ size_t len = count * (size_t)size;
+
+ /* induced allocation failure */
+@@ -87,7 +102,7 @@ local void *mem_alloc(void *mem, unsigned count, unsigned size)
+ memset(ptr, 0xa5, len);
+
+ /* create a new item for the list */
+- item = malloc(sizeof(struct mem_item));
++ item = static_cast<struct mem_item *>(malloc(sizeof(struct mem_item)));
+ if (item == NULL) {
+ free(ptr);
+ return NULL;
+@@ -112,7 +127,7 @@ local void *mem_alloc(void *mem, unsigned count, unsigned size)
+ local void mem_free(void *mem, void *ptr)
+ {
+ struct mem_item *item, *next;
+- struct mem_zone *zone = mem;
++ struct mem_zone *zone = static_cast<struct mem_zone *>(mem);
+
+ /* if no zone, just do a free */
+ if (zone == NULL) {
+@@ -159,7 +174,7 @@ local void mem_setup(z_stream *strm)
+ {
+ struct mem_zone *zone;
+
+- zone = malloc(sizeof(struct mem_zone));
++ zone = static_cast<struct mem_zone *>(malloc(sizeof(struct mem_zone)));
+ assert(zone != NULL);
+ zone->first = NULL;
+ zone->total = 0;
+@@ -175,33 +190,33 @@ local void mem_setup(z_stream *strm)
+ /* set a limit on the total memory allocation, or 0 to remove the limit */
+ local void mem_limit(z_stream *strm, size_t limit)
+ {
+- struct mem_zone *zone = strm->opaque;
++ struct mem_zone *zone = static_cast<struct mem_zone *>(strm->opaque);
+
+ zone->limit = limit;
+ }
+
+ /* show the current total requested allocations in bytes */
+-local void mem_used(z_stream *strm, char *prefix)
++local void mem_used(z_stream *strm, const char *prefix)
+ {
+- struct mem_zone *zone = strm->opaque;
++ struct mem_zone *zone = static_cast<struct mem_zone *>(strm->opaque);
+
+- fprintf(stderr, "%s: %lu allocated\n", prefix, zone->total);
++ std::cout << prefix << ": " << zone->total << " allocated" << std::endl;
+ }
+
+ /* show the high water allocation in bytes */
+-local void mem_high(z_stream *strm, char *prefix)
++local void mem_high(z_stream *strm, const char *prefix)
+ {
+- struct mem_zone *zone = strm->opaque;
++ struct mem_zone *zone = static_cast<struct mem_zone *>(strm->opaque);
+
+- fprintf(stderr, "%s: %lu high water mark\n", prefix, zone->highwater);
++ std::cout << prefix << ": " << zone->highwater << " high water mark" << std::endl;
+ }
+
+ /* release the memory allocation zone -- if there are any surprises, notify */
+-local void mem_done(z_stream *strm, char *prefix)
++local void mem_done(z_stream *strm, const char *prefix)
+ {
+ int count = 0;
+ struct mem_item *item, *next;
+- struct mem_zone *zone = strm->opaque;
++ struct mem_zone *zone = static_cast<struct mem_zone *>(strm->opaque);
+
+ /* show high water mark */
+ mem_high(strm, prefix);
+@@ -218,13 +233,20 @@ local void mem_done(z_stream *strm, char *prefix)
+
+ /* issue alerts about anything unexpected */
+ if (count || zone->total)
+- fprintf(stderr, "** %s: %lu bytes in %d blocks not freed\n",
+- prefix, zone->total, count);
++ std::cout << "** " << prefix << ": "
++ << zone->total << " bytes in "
++ << count << " blocks not freed"
++ << std::endl;
++
+ if (zone->notlifo)
+- fprintf(stderr, "** %s: %d frees not LIFO\n", prefix, zone->notlifo);
++ std::cout << "** " << prefix << ": "
++ << zone->notlifo << " frees not LIFO"
++ << std::endl;
++
+ if (zone->rogue)
+- fprintf(stderr, "** %s: %d frees not recognized\n",
+- prefix, zone->rogue);
++ std::cout << "** " << prefix << ": "
++ << zone->rogue << " frees not recognized"
++ << std::endl;
+
+ /* free the zone and delete from the stream */
+ free(zone);
+@@ -247,7 +269,7 @@ local unsigned char *h2b(const char *hex, unsigned *len)
+ unsigned char *in, *re;
+ unsigned next, val;
+
+- in = malloc((strlen(hex) + 1) >> 1);
++ in = static_cast<unsigned char *>(malloc((strlen(hex) + 1) >> 1));
+ if (in == NULL)
+ return NULL;
+ next = 0;
+@@ -268,7 +290,7 @@ local unsigned char *h2b(const char *hex, unsigned *len)
+ } while (*hex++); /* go through the loop with the terminating null */
+ if (len != NULL)
+ *len = next;
+- re = realloc(in, next);
++ re = static_cast<unsigned char *>(realloc(in, next));
+ return re == NULL ? in : re;
+ }
+
+@@ -281,7 +303,7 @@ local unsigned char *h2b(const char *hex, unsigned *len)
+ header information is collected with inflateGetHeader(). If a zlib stream
+ is looking for a dictionary, then an empty dictionary is provided.
+ inflate() is run until all of the input data is consumed. */
+-local void inf(char *hex, char *what, unsigned step, int win, unsigned len,
++local void inf(const char *hex, const char *what, unsigned step, int win, unsigned len,
+ int err)
+ {
+ int ret;
+@@ -298,7 +320,7 @@ local void inf(char *hex, char *what, unsigned step, int win, unsigned len,
+ mem_done(&strm, what);
+ return;
+ }
+- out = malloc(len); assert(out != NULL);
++ out = static_cast<unsigned char *>(malloc(len)); assert(out != NULL);
+ if (win == 47) {
+ head.extra = out;
+ head.extra_max = len;
+@@ -347,7 +369,7 @@ local void inf(char *hex, char *what, unsigned step, int win, unsigned len,
+ }
+
+ /* cover all of the lines in inflate.c up to inflate() */
+-local void cover_support(void)
++void cover_support(void)
+ {
+ int ret;
+ z_stream strm;
+@@ -381,11 +403,11 @@ local void cover_support(void)
+ strm.next_in = Z_NULL;
+ ret = inflateInit(&strm); assert(ret == Z_OK);
+ ret = inflateEnd(&strm); assert(ret == Z_OK);
+- fputs("inflate built-in memory routines\n", stderr);
++ std::cout << "inflate built-in memory routines" << std::endl;;
+ }
+
+ /* cover all inflate() header and trailer cases and code after inflate() */
+-local void cover_wrap(void)
++void cover_wrap(void)
+ {
+ int ret;
+ z_stream strm, copy;
+@@ -394,7 +416,7 @@ local void cover_wrap(void)
+ ret = inflate(Z_NULL, 0); assert(ret == Z_STREAM_ERROR);
+ ret = inflateEnd(Z_NULL); assert(ret == Z_STREAM_ERROR);
+ ret = inflateCopy(Z_NULL, Z_NULL); assert(ret == Z_STREAM_ERROR);
+- fputs("inflate bad parameters\n", stderr);
++ std::cout << "inflate bad parameters" << std::endl;
+
+ inf("1f 8b 0 0", "bad gzip method", 0, 31, 0, Z_DATA_ERROR);
+ inf("1f 8b 8 80", "bad gzip flags", 0, 31, 0, Z_DATA_ERROR);
+@@ -415,9 +437,9 @@ local void cover_wrap(void)
+ strm.next_in = Z_NULL;
+ ret = inflateInit2(&strm, -8);
+ strm.avail_in = 2;
+- strm.next_in = (void *)"\x63";
++ strm.next_in = (Bytef *)"\x63";
+ strm.avail_out = 1;
+- strm.next_out = (void *)&ret;
++ strm.next_out = (Bytef *)&ret;
+ mem_limit(&strm, 1);
+ ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_MEM_ERROR);
+ ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_MEM_ERROR);
+@@ -428,11 +450,11 @@ local void cover_wrap(void)
+ mem_limit(&strm, (sizeof(struct inflate_state) << 1) + 256);
+ ret = inflatePrime(&strm, 16, 0); assert(ret == Z_OK);
+ strm.avail_in = 2;
+- strm.next_in = (void *)"\x80";
++ strm.next_in = (Bytef *)"\x80";
+ ret = inflateSync(&strm); assert(ret == Z_DATA_ERROR);
+ ret = inflate(&strm, Z_NO_FLUSH); assert(ret == Z_STREAM_ERROR);
+ strm.avail_in = 4;
+- strm.next_in = (void *)"\0\0\xff\xff";
++ strm.next_in = (Bytef *)"\0\0\xff\xff";
+ ret = inflateSync(&strm); assert(ret == Z_OK);
+ (void)inflateSyncPoint(&strm);
+ ret = inflateCopy(&copy, &strm); assert(ret == Z_MEM_ERROR);
+@@ -454,7 +476,7 @@ local unsigned pull(void *desc, unsigned char **buf)
+ next = 0;
+ return 0; /* no input (already provided at next_in) */
+ }
+- state = (void *)((z_stream *)desc)->state;
++ state = reinterpret_cast<struct inflate_state *>(((z_stream *)desc)->state);
+ if (state != Z_NULL)
+ state->mode = SYNC; /* force an otherwise impossible situation */
+ return next < sizeof(dat) ? (*buf = dat + next++, 1) : 0;
+@@ -467,7 +489,7 @@ local int push(void *desc, unsigned char *buf, unsigned len)
+ }
+
+ /* cover inflateBack() up to common deflate data cases and after those */
+-local void cover_back(void)
++void cover_back(void)
+ {
+ int ret;
+ z_stream strm;
+@@ -479,17 +501,17 @@ local void cover_back(void)
+ ret = inflateBack(Z_NULL, Z_NULL, Z_NULL, Z_NULL, Z_NULL);
+ assert(ret == Z_STREAM_ERROR);
+ ret = inflateBackEnd(Z_NULL); assert(ret == Z_STREAM_ERROR);
+- fputs("inflateBack bad parameters\n", stderr);
++ std::cout << "inflateBack bad parameters" << std::endl;;
+
+ mem_setup(&strm);
+ ret = inflateBackInit(&strm, 15, win); assert(ret == Z_OK);
+ strm.avail_in = 2;
+- strm.next_in = (void *)"\x03";
++ strm.next_in = (Bytef *)"\x03";
+ ret = inflateBack(&strm, pull, Z_NULL, push, Z_NULL);
+ assert(ret == Z_STREAM_END);
+ /* force output error */
+ strm.avail_in = 3;
+- strm.next_in = (void *)"\x63\x00";
++ strm.next_in = (Bytef *)"\x63\x00";
+ ret = inflateBack(&strm, pull, Z_NULL, push, &strm);
+ assert(ret == Z_BUF_ERROR);
+ /* force mode error by mucking with state */
+@@ -500,11 +522,11 @@ local void cover_back(void)
+
+ ret = inflateBackInit(&strm, 15, win); assert(ret == Z_OK);
+ ret = inflateBackEnd(&strm); assert(ret == Z_OK);
+- fputs("inflateBack built-in memory routines\n", stderr);
++ std::cout << "inflateBack built-in memory routines" << std::endl;;
+ }
+
+ /* do a raw inflate of data in hexadecimal with both inflate and inflateBack */
+-local int try(char *hex, char *id, int err)
++local int try(const char *hex, const char *id, int err)
+ {
+ int ret;
+ unsigned len, size;
+@@ -518,11 +540,11 @@ local int try(char *hex, char *id, int err)
+
+ /* allocate work areas */
+ size = len << 3;
+- out = malloc(size);
++ out = static_cast<unsigned char *>(malloc(size));
+ assert(out != NULL);
+- win = malloc(32768);
++ win = static_cast<unsigned char *>(malloc(32768));
+ assert(win != NULL);
+- prefix = malloc(strlen(id) + 6);
++ prefix = static_cast<char *>(malloc(strlen(id) + 6));
+ assert(prefix != NULL);
+
+ /* first with inflate */
+@@ -578,7 +600,7 @@ local int try(char *hex, char *id, int err)
+ }
+
+ /* cover deflate data cases in both inflate() and inflateBack() */
+-local void cover_inflate(void)
++void cover_inflate(void)
+ {
+ try("0 0 0 0 0", "invalid stored block lengths", 1);
+ try("3 0", "fixed", 0);
+@@ -613,32 +635,33 @@ local void cover_inflate(void)
+ inf("63 18 5 40 c 0", "window wrap", 3, -8, 300, Z_OK);
+ }
+
++/* XXX(cavalcantii): fix linking error due inflate_table. */
+ /* cover remaining lines in inftrees.c */
+-local void cover_trees(void)
+-{
+- int ret;
+- unsigned bits;
+- unsigned short lens[16], work[16];
+- code *next, table[ENOUGH_DISTS];
+-
+- /* we need to call inflate_table() directly in order to manifest not-
+- enough errors, since zlib insures that enough is always enough */
+- for (bits = 0; bits < 15; bits++)
+- lens[bits] = (unsigned short)(bits + 1);
+- lens[15] = 15;
+- next = table;
+- bits = 15;
+- ret = inflate_table(DISTS, lens, 16, &next, &bits, work);
+- assert(ret == 1);
+- next = table;
+- bits = 1;
+- ret = inflate_table(DISTS, lens, 16, &next, &bits, work);
+- assert(ret == 1);
+- fputs("inflate_table not enough errors\n", stderr);
+-}
++/* void cover_trees(void) */
++/* { */
++/* int ret; */
++/* unsigned bits; */
++/* unsigned short lens[16], work[16]; */
++/* code *next, table[ENOUGH_DISTS]; */
++
++/* /\* we need to call inflate_table() directly in order to manifest not- */
++/* enough errors, since zlib insures that enough is always enough *\/ */
++/* for (bits = 0; bits < 15; bits++) */
++/* lens[bits] = (unsigned short)(bits + 1); */
++/* lens[15] = 15; */
++/* next = table; */
++/* bits = 15; */
++/* ret = inflate_table(DISTS, lens, 16, &next, &bits, work); */
++/* assert(ret == 1); */
++/* next = table; */
++/* bits = 1; */
++/* ret = inflate_table(DISTS, lens, 16, &next, &bits, work); */
++/* assert(ret == 1); */
++/* fputs("inflate_table not enough errors\n", stderr); */
++/* } */
+
+ /* cover remaining inffast.c decoding and window copying */
+-local void cover_fast(void)
++void cover_fast(void)
+ {
+ inf("e5 e0 81 ad 6d cb b2 2c c9 01 1e 59 63 ae 7d ee fb 4d fd b5 35 41 68"
+ " ff 7f 0f 0 0 0", "fast length extra bits", 0, -8, 258, Z_DATA_ERROR);
+@@ -658,14 +681,4 @@ local void cover_fast(void)
+ Z_STREAM_END);
+ }
+
+-int main(void)
+-{
+- fprintf(stderr, "%s\n", zlibVersion());
+- cover_support();
+- cover_wrap();
+- cover_back();
+- cover_inflate();
+- cover_trees();
+- cover_fast();
+- return 0;
+-}
++// clang-format on
+--
+2.21.1 (Apple Git-122.3)
+
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index 97a6638215..2ad6cf2daf 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1107,7 +1107,15 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
function handleLoadText(text, append, fileName) {
- handleLoadJSON(JSON.parse(text), append, fileName);
+ try {
+ handleLoadJSON(JSON.parse(text), append, fileName);
+ } catch(e) {
+ if (!fileName.endsWith('.txt')) {
+ alert(`Error parsing "${fileName}"`);
+ console.error(e);
+ }
+ handleLoadTXT(text, append, fileName);
+ }
}
function getStateFromParams() {
@@ -1128,17 +1136,37 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (append && !isFirstLoad) {
json = createUniqueVersions(json)
}
- var state = getStateFromParams();
if (!append || isFirstLoad) {
pages = new Pages();
versions = Versions.fromJSON(json);
} else {
Versions.fromJSON(json).forEach(e => versions.add(e))
}
+ displayResultsAfterLoading(isFirstLoad)
+ }
+
+ function handleLoadTXT(txt, append, fileName) {
+ let isFirstLoad = pages === undefined;
+ // Load raw RCS output which contains a single page
+ if (!append || isFirstLoad) {
+ pages = new Pages();
+ versions = new Versions()
+ }
+ versions.add(Version.fromTXT(fileName, txt))
+ displayResultsAfterLoading()
+
+ }
+
+ function displayResultsAfterLoading(isFirstLoad) {
+ let state = getStateFromParams();
initialize()
if (isFirstLoad && !popHistoryState(state)) {
showEntry(selectedPage.total);
+ return;
}
+ selectedPage = versions.versions[0].pages[0]
+ if (selectedPage == undefined) return;
+ showPage(selectedPage);
}
function fixClusterTelemetryResults(json) {
@@ -1358,14 +1386,15 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (page !== undefined) return page;
}
}
- }
- Versions.fromJSON = function(json) {
- var versions = new Versions();
- for (var version in json) {
- versions.add(Version.fromJSON(version, json[version]));
+
+ static fromJSON(json) {
+ var versions = new Versions();
+ for (var version in json) {
+ versions.add(Version.fromJSON(version, json[version]));
+ }
+ versions.sort();
+ return versions;
}
- versions.sort();
- return versions;
}
class Version {
@@ -1475,14 +1504,22 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
sort() {
this.pages.sort(NameComparator)
}
- }
- Version.fromJSON = function(name, data) {
- var version = new Version(name);
- for (var pageName in data) {
- version.add(PageVersion.fromJSON(version, pageName, data[pageName]));
+
+ static fromJSON(name, data) {
+ var version = new Version(name);
+ for (var pageName in data) {
+ version.add(PageVersion.fromJSON(version, pageName, data[pageName]));
+ }
+ version.sort();
+ return version;
+ }
+
+ static fromTXT(name, txt) {
+ let version = new Version(name);
+ let pageName = "RAW DATA";
+ version.add(PageVersion.fromTXT(version, pageName, txt));
+ return version;
}
- version.sort();
- return version;
}
class Pages extends Map {
@@ -1610,24 +1647,39 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
getNextPage() {
return this.version.getNextPage(this);
}
- }
- PageVersion.fromJSON = function(version, name, data) {
- let page = new PageVersion(version, pages.get(name));
- // Distinguish between the legacy format which just uses Arrays,
- // or the new object style.
- if (Array.isArray(data)) {
- for (let i = 0; i < data.length; i++) {
- page.add(Entry.fromLegacyJSON(i, data[data.length - i - 1]));
+
+ static fromJSON(version, name, data) {
+ let page = new PageVersion(version, pages.get(name));
+ // Distinguish between the legacy format which just uses Arrays,
+ // or the new object style.
+ if (Array.isArray(data)) {
+ for (let i = 0; i < data.length; i++) {
+ page.add(Entry.fromLegacyJSON(i, data[data.length - i - 1]));
+ }
+ } else {
+ let position = 0;
+ for (let metric_name in data) {
+ page.add(Entry.fromJSON(position, metric_name, data[metric_name]));
+ position++;
+ }
}
- } else {
- let position = 0;
- for (let metric_name in data) {
- page.add(Entry.fromJSON(position, metric_name, data[metric_name]));
- position++;
+ page.sort();
+ return page
+ }
+
+ static fromTXT(version, name, txt) {
+ let pageVersion = new PageVersion(version, pages.get(name));
+ let lines = txt.split('\n');
+ let split = / +/g
+ // Skip the first two lines (HEADER and SEPARATOR)
+ for (let i = 2; i < lines.length; i++) {
+ let line = lines[i].trim().split(split)
+ if (line.length != 5) continue;
+ let position = i-2;
+ pageVersion.add(Entry.fromTXT(position, line));
}
+ return pageVersion;
}
- page.sort();
- return page
}
@@ -1716,15 +1768,27 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
get timeVariancePercent() {
return this._timeVariancePercent
}
- }
- Entry.fromLegacyJSON = function(position, data) {
- return new Entry(position, ...data);
- }
- Entry.fromJSON = function(position, name, data) {
- let time = data.duration;
- let count = data.count;
- return new Entry(position, name, time.average, time.stddev,
- count.average, count.stddev);
+
+ static fromLegacyJSON(position, data) {
+ return new Entry(position, ...data);
+ }
+
+ static fromJSON(position, name, data) {
+ let time = data.duration;
+ let count = data.count;
+ return new Entry(position, name, time.average, time.stddev, 0,
+ count.average, count.stddev, 0);
+ }
+
+ static fromTXT(position, splitLine) {
+ let [name, time, timePercent, count, countPercent] = splitLine;
+ time = time.split('ms')
+ let timeDeviation = 0, countDeviation = 0;
+ let timeDeviationPercent = 0, countDeviationPercent = 0
+ return new Entry(position, name,
+ Number.parseFloat(time), timeDeviation, timeDeviationPercent,
+ Number.parseInt(count), countDeviation, countDeviationPercent)
+ }
}
class Group {
@@ -1899,11 +1963,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
<form name="fileForm">
<p>
<label for="uploadInput">Load File:</label>
- <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json">
+ <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json,.txt">
</p>
<p>
<label for="appendInput">Append File:</label>
- <input id="appendInput" type="file" name="files" onchange="handleAppendFile();" accept=".json">
+ <input id="appendInput" type="file" name="files" onchange="handleAppendFile();" accept=".json,.txt">
</p>
</form>
</div>
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index 73db1cb0c3..e1511535bf 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
index 3c105ef1b4..953071228e 100644
--- a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/v8_commands.py b/deps/v8/tools/clusterfuzz/v8_commands.py
index e84cd915e3..1956ef2802 100644
--- a/deps/v8/tools/clusterfuzz/v8_commands.py
+++ b/deps/v8/tools/clusterfuzz/v8_commands.py
@@ -18,6 +18,7 @@ PYTHON3 = sys.version_info >= (3, 0)
DEFAULT_FLAGS = [
'--correctness-fuzzer-suppressions',
'--expose-gc',
+ '--fuzzing',
'--allow-natives-for-differential-fuzzing',
'--invoke-weak-callbacks',
'--omit-quit',
diff --git a/deps/v8/tools/clusterfuzz/v8_mock.js b/deps/v8/tools/clusterfuzz/v8_mock.js
index 618e14c784..6372a7afe2 100644
--- a/deps/v8/tools/clusterfuzz/v8_mock.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock.js
@@ -14,13 +14,26 @@ var prettyPrinted = function prettyPrinted(msg) { return msg; };
// Mock Math.random.
(function() {
- let index = 0
+ let index = 1;
Math.random = function() {
- index = (index + 1) % 10;
- return index / 10.0;
+ const x = Math.sin(index++) * 10000;
+ return x - Math.floor(x);
+ }
+})();
+
+// Mock Math.pow. Work around an optimization for -0.5.
+(function() {
+ const origMathPow = Math.pow;
+ Math.pow = function(a, b) {
+ if (b === -0.5) {
+ return 0;
+ } else {
+ return origMathPow(a, b);
+ }
}
})();
+
// Mock Date.
(function() {
let index = 0;
@@ -159,3 +172,14 @@ Object.defineProperty(
}
};
})();
+
+// Mock Realm.
+Realm.eval = function(realm, code) { return eval(code) };
+
+// Mock the nondeterministic parts of WeakRef and FinalizationRegistry.
+WeakRef.prototype.deref = function() { };
+FinalizationRegistry = function(callback) { };
+FinalizationRegistry.prototype.register = function(target, holdings) { };
+FinalizationRegistry.prototype.unregister = function(unregisterToken) { };
+FinalizationRegistry.prototype.cleanupSome = function() { };
+FinalizationRegistry.prototype[Symbol.toStringTag] = "FinalizationRegistry";
diff --git a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
index f2cb8935a2..c2f0b2a4d9 100644
--- a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
+++ b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -30,3 +30,16 @@ print("https://crbug.com/985154");
}
print(Object.getOwnPropertyNames(foo().bar));
})();
+
+print("Suppresses sensitive natives");
+(function () {
+ function foo() {}
+ %PrepareFunctionForOptimization(foo);
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ print(%GetOptimizationStatus(foo));
+ const fun = new Function("f", "sync", "return %GetOptimizationStatus(f);");
+ print(fun(foo));
+})();
diff --git a/deps/v8/tools/csvparser.js b/deps/v8/tools/csvparser.js
index bd106a7a41..a4d030441a 100644
--- a/deps/v8/tools/csvparser.js
+++ b/deps/v8/tools/csvparser.js
@@ -46,14 +46,14 @@ class CsvParser {
while (nextPos !== -1) {
let escapeIdentifier = string.charAt(nextPos + 1);
pos = nextPos + 2;
- if (escapeIdentifier == 'n') {
+ if (escapeIdentifier === 'n') {
result += '\n';
nextPos = pos;
- } else if (escapeIdentifier == '\\') {
+ } else if (escapeIdentifier === '\\') {
result += '\\';
nextPos = pos;
} else {
- if (escapeIdentifier == 'x') {
+ if (escapeIdentifier === 'x') {
// \x00 ascii range escapes consume 2 chars.
nextPos = pos + 2;
} else {
@@ -71,7 +71,7 @@ class CsvParser {
// If there are no more escape sequences consume the rest of the string.
if (nextPos === -1) {
result += string.substr(pos);
- } else if (pos != nextPos) {
+ } else if (pos !== nextPos) {
result += string.substring(pos, nextPos);
}
}
diff --git a/deps/v8/tools/debug_helper/BUILD.gn b/deps/v8/tools/debug_helper/BUILD.gn
index b151e12918..a2d76d1fda 100644
--- a/deps/v8/tools/debug_helper/BUILD.gn
+++ b/deps/v8/tools/debug_helper/BUILD.gn
@@ -12,6 +12,8 @@ config("internal_config") {
defines = [ "BUILDING_V8_DEBUG_HELPER" ]
}
+ configs = [ "../..:v8_tracing_config" ]
+
include_dirs = [
".",
"../..",
@@ -25,6 +27,9 @@ config("external_config") {
if (is_component_build) {
defines = [ "USING_V8_DEBUG_HELPER" ]
}
+
+ configs = [ "../..:external_config" ]
+
include_dirs = [ "." ]
}
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index 9146dd4633..0e8fbf02a6 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -7,7 +7,8 @@
#include "debug-helper-internal.h"
#include "heap-constants.h"
#include "include/v8-internal.h"
-#include "src/common/ptr-compr-inl.h"
+#include "src/common/external-pointer.h"
+#include "src/execution/isolate-utils.h"
#include "src/objects/string-inl.h"
#include "src/strings/unicode-inl.h"
#include "torque-generated/class-debug-readers-tq.h"
@@ -323,8 +324,15 @@ class ReadStringVisitor : public TqObjectVisitor {
// require knowledge of the embedder. For now, we only read cached external
// strings.
if (IsExternalStringCached(object)) {
- uintptr_t data_address = reinterpret_cast<uintptr_t>(
- GetOrFinish(object->GetResourceDataValue(accessor_)));
+ ExternalPointer_t resource_data =
+ GetOrFinish(object->GetResourceDataValue(accessor_));
+#ifdef V8_COMPRESS_POINTERS
+ uintptr_t data_address = static_cast<uintptr_t>(DecodeExternalPointer(
+ Isolate::FromRoot(GetIsolateRoot(heap_addresses_.any_heap_pointer)),
+ resource_data));
+#else
+ uintptr_t data_address = reinterpret_cast<uintptr_t>(resource_data);
+#endif // V8_COMPRESS_POINTERS
if (done_) return;
ReadStringCharacters<TChar>(object, data_address);
} else {
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 1fddfcf83d..b36cd20221 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -263,7 +263,6 @@ extras_accessors = [
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
- 'UncompiledData, inferred_name, String, kInferredNameOffset',
'UncompiledData, start_position, int32_t, kStartPositionOffset',
'UncompiledData, end_position, int32_t, kEndPositionOffset',
'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
index ebdc55a618..ff0a71a393 100644
--- a/deps/v8/tools/logreader.js
+++ b/deps/v8/tools/logreader.js
@@ -190,7 +190,6 @@ LogReader.prototype.dispatchLogRow_ = function(fields) {
var command = fields[0];
var dispatch = this.dispatchTable_[command];
if (dispatch === undefined) return;
-
if (dispatch === null || this.skipDispatch(dispatch)) {
return;
}
@@ -241,7 +240,7 @@ LogReader.prototype.processLogLine_ = function(line) {
var fields = this.csvParser_.parseLine(line);
this.dispatchLogRow_(fields);
} catch (e) {
- this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e));
+ this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e) + '\n' + e.stack);
}
}
this.lineNum_++;
diff --git a/deps/v8/tools/map-processor.html b/deps/v8/tools/map-processor.html
index a453c9a189..77e0e7b19c 100644
--- a/deps/v8/tools/map-processor.html
+++ b/deps/v8/tools/map-processor.html
@@ -16,6 +16,29 @@ h1, h2, h3, section {
padding-left: 15px;
}
+kbd {
+ background-color: #eee;
+ border-radius: 3px;
+ border: 1px solid black;
+ display: inline-block;
+ font-size: .9em;
+ font-weight: bold;
+ padding: 0px 4px 2px 4px;
+ white-space: nowrap;
+}
+dl {
+ display: grid;
+ grid-template-columns: min-content auto;
+ grid-gap: 10px;
+}
+dt {
+ text-align: right;
+ white-space: nowrap;
+}
+dd {
+ margin: 0;
+}
+
#content {
opacity: 0.0;
height: 0px;
@@ -102,25 +125,35 @@ h1, h2, h3, section {
border: 1px black solid;
}
+#stats {
+ display: flex;
+ height: 250px;
+}
+
#stats table {
- display: inline-block;
+ flex: 1;
padding-right: 50px;
+ max-height: 250px;
+ display: inline-block;
}
#stats table td {
cursor: pointer;
}
#stats .transitionTable {
- max-height: 200px;
overflow-y: scroll;
}
+#stats .transitionTable tr {
+ max-width: 200px;
+}
#stats .transitionType {
text-align: right;
+ max-width: 380px;
}
#stats .transitionType tr td:nth-child(2) {
text-align: left;
}
-#stats .transitionType tr:nth-child(1) td {
+#stats table thead td {
border-bottom: 1px black dotted;
}
@@ -452,19 +485,6 @@ function tr() {
return document.createElement("tr");
}
-function define(prototype, name, fn) {
- Object.defineProperty(prototype, name, {value:fn, enumerable:false});
-}
-
-define(Array.prototype, "max", function(fn) {
- if (this.length == 0) return undefined;
- if (fn == undefined) fn = (each) => each;
- let max = fn(this[0]);
- for (let i = 1; i < this.length; i++) {
- max = Math.max(max, fn(this[i]));
- }
- return max;
-})
define(Array.prototype, "histogram", function(mapFn) {
let histogram = [];
for (let i = 0; i < this.length; i++) {
@@ -483,9 +503,6 @@ define(Array.prototype, "histogram", function(mapFn) {
return histogram;
});
-define(Array.prototype, "first", function() { return this[0] });
-define(Array.prototype, "last", function() { return this[this.length - 1] });
-
// =========================================================================
// EventHandlers
function handleBodyLoad() {
@@ -698,6 +715,7 @@ class View {
timeNode.style.left = ((time-start) * timeToPixel) + "px";
chunksNode.appendChild(timeNode);
};
+ let backgroundTodo = [];
for (let i = 0; i < chunks.length; i++) {
let chunk = chunks[i];
let height = (chunk.size() / max * kChunkHeight);
@@ -711,10 +729,13 @@ class View {
node.addEventListener("mousemove", e => this.handleChunkMouseMove(e));
node.addEventListener("click", e => this.handleChunkClick(e));
node.addEventListener("dblclick", e => this.handleChunkDoubleClick(e));
- this.setTimelineChunkBackground(chunk, node);
+ backgroundTodo.push([chunk, node])
chunksNode.appendChild(node);
chunk.markers.forEach(marker => addTimestamp(marker.time, marker.name));
}
+
+ this.asyncSetTimelineChunkBackground(backgroundTodo)
+
// Put a time marker roughly every 20 chunks.
let expected = duration / chunks.length * 20;
let interval = (10 ** Math.floor(Math.log10(expected)));
@@ -753,6 +774,22 @@ class View {
this.transitionView.showMaps(chunk.getUniqueTransitions());
}
+ asyncSetTimelineChunkBackground(backgroundTodo) {
+ const kIncrement = 100;
+ let start = 0;
+ let delay = 1;
+ while (start < backgroundTodo.length) {
+ let end = Math.min(start+kIncrement, backgroundTodo.length);
+ setTimeout((from, to) => {
+ for (let i = from; i < to; i++) {
+ let [chunk, node] = backgroundTodo[i];
+ this.setTimelineChunkBackground(chunk, node);
+ }
+ }, delay++, start, end);
+ start = end;
+ }
+ }
+
setTimelineChunkBackground(chunk, node) {
// Render the types of transitions as bar charts
const kHeight = chunk.height;
@@ -779,7 +816,7 @@ class View {
});
}
- let imageData = this.backgroundCanvas.toDataURL("image/png");
+ let imageData = this.backgroundCanvas.toDataURL("image/webp", 0.2);
node.style.backgroundImage = "url(" + imageData + ")";
}
@@ -818,7 +855,7 @@ class View {
ctx.stroke();
ctx.closePath();
ctx.fill();
- let imageData = canvas.toDataURL("image/png");
+ let imageData = canvas.toDataURL("image/webp", 0.2);
$("timelineOverview").style.backgroundImage = "url(" + imageData + ")";
}
@@ -1101,7 +1138,7 @@ class StatsView {
}
updateGeneralStats() {
let pairs = [
- ["Maps", null, e => true],
+ ["Total", null, e => true],
["Transitions", 'black', e => e.edge && e.edge.isTransition()],
["Fast to Slow", 'violet', e => e.edge && e.edge.isFastToSlow()],
["Slow to Fast", 'orange', e => e.edge && e.edge.isSlowToFast()],
@@ -1115,6 +1152,7 @@ class StatsView {
let text = "";
let tableNode = table("transitionType");
+ tableNode.innerHTML = "<thead><tr><td>Color</td><td>Type</td><td>Count</td><td>Percent</td></tr></thead>";
let name, filter;
let total = this.timeline.size();
pairs.forEach(([name, color, filter]) => {
@@ -1122,11 +1160,16 @@ class StatsView {
if (color !== null) {
row.appendChild(td(div(['colorbox', color])));
} else {
- row.appendChild(td(""));
+ row.appendChild(td(""));
+ }
+ row.onclick = (e) => {
+ // lazily compute the stats
+ let node = e.target.parentNode;
+ if (node.maps == undefined) {
+ node.maps = this.timeline.filterUniqueTransitions(filter);
+ }
+ this.transitionView.showMaps(node.maps);
}
- row.maps = this.timeline.filterUniqueTransitions(filter);
- row.onclick =
- (e) => this.transitionView.showMaps(e.target.parentNode.maps);
row.appendChild(td(name));
let count = this.timeline.count(filter);
row.appendChild(td(count));
@@ -1139,6 +1182,7 @@ class StatsView {
updateNamedTransitionsStats() {
let tableNode = table("transitionTable");
let nameMapPairs = Array.from(this.timeline.transitions.entries());
+ tableNode.innerHTML = "<thead><tr><td>Propery Name</td><td>#</td></tr></thead>";
nameMapPairs
.sort((a,b) => b[1].length - a[1].length)
.forEach(([name, maps]) => {
@@ -1213,9 +1257,35 @@ function transitionTypeToColor(type) {
<section id="mapDetails"></section>
</div>
- <h2>Instructions</h2>
<section>
+ <h2>Instructions</h2>
<p>Visualize Map trees that have been gathered using <code>--trace-maps</code>.</p>
+ <h3>Keyboard Shortcuts</h3>
+ <dl>
+ <dt><kbd>SHIFT</kbd> + <kbd>Arrow Up</kbd></dt>
+ <dd>Follow Map transition forward (first child)</dd>
+
+ <dt><kbd>SHIFT</kbd> + <kbd>Arrow Down</kbd></dt>
+ <dd>Follow Map transition backwards</dd>
+
+ <dt><kbd>Arrow Up</kbd></dt>
+ <dd>Go to previous Map chunk</dd>
+
+ <dt><kbd>Arrow Down</kbd></dt>
+ <dd>Go to next Map in chunk</dd>
+
+ <dt><kbd>Arrow Left</kbd></dt>
+ <dd>Go to previous chunk</dd>
+
+ <dt><kbd>Arrow Right</kbd></dt>
+ <dd>Go to next chunk</dd>
+
+ <dt><kbd>+</kbd></dt>
+ <dd>Timeline zoom in</dd>
+
+ <dt><kbd>-</kbd></dt>
+ <dd>Timeline zoom out</dd>
+ </dl>
</section>
<div id="tooltip">
diff --git a/deps/v8/tools/map-processor.js b/deps/v8/tools/map-processor.js
index 7e8572af8c..d743cba383 100644
--- a/deps/v8/tools/map-processor.js
+++ b/deps/v8/tools/map-processor.js
@@ -3,10 +3,28 @@
// found in the LICENSE file.
// ===========================================================================
+function define(prototype, name, fn) {
+ Object.defineProperty(prototype, name, {value:fn, enumerable:false});
+}
+
+define(Array.prototype, "max", function(fn) {
+ if (this.length === 0) return undefined;
+ if (fn === undefined) fn = (each) => each;
+ let max = fn(this[0]);
+ for (let i = 1; i < this.length; i++) {
+ max = Math.max(max, fn(this[i]));
+ }
+ return max;
+})
+define(Array.prototype, "first", function() { return this[0] });
+define(Array.prototype, "last", function() { return this[this.length - 1] });
+// ===========================================================================
+
class MapProcessor extends LogReader {
constructor() {
super();
this.dispatchTable_ = {
+ __proto__:null,
'code-creation': {
parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
parseString, parseVarArgs],
@@ -41,6 +59,7 @@ class MapProcessor extends LogReader {
};
this.profile_ = new Profile();
this.timeline_ = new Timeline();
+ this.formatPCRegexp_ = /(.*):[0-9]+:[0-9]+$/;
}
printError(str) {
@@ -73,9 +92,15 @@ class MapProcessor extends LogReader {
processLogFile(fileName) {
this.collectEntries = true
this.lastLogFileName_ = fileName;
+ let i = 1;
let line;
- while (line = readline()) {
- this.processLogLine(line);
+ try {
+ while (line = readline()) {
+ this.processLogLine(line);
+ i++;
+ }
+ } catch(e) {
+ console.error("Error occurred during parsing line " + i + ", trying to continue: " + e);
}
return this.finalize();
}
@@ -131,13 +156,12 @@ class MapProcessor extends LogReader {
formatPC(pc, line, column) {
let entry = this.profile_.findEntry(pc);
if (!entry) return "<unknown>"
- if (entry.type == "Builtin") {
+ if (entry.type === "Builtin") {
return entry.name;
}
let name = entry.func.getName();
- let re = /(.*):[0-9]+:[0-9]+$/;
- let array = re.exec(name);
- if (!array) {
+ let array = this.formatPCRegexp_.exec(name);
+ if (array === null) {
entry = name;
} else {
entry = entry.getState() + array[1];
@@ -146,12 +170,12 @@ class MapProcessor extends LogReader {
}
processMap(type, time, from, to, pc, line, column, reason, name) {
- time = parseInt(time);
- if (type == "Deprecate") return this.deprecateMap(type, time, from);
- from = this.getExistingMap(from, time);
- to = this.getExistingMap(to, time);
- let edge = new Edge(type, name, reason, time, from, to);
- to.filePosition = this.formatPC(pc, line, column);
+ let time_ = parseInt(time);
+ if (type === "Deprecate") return this.deprecateMap(type, time_, from);
+ let from_ = this.getExistingMap(from, time_);
+ let to_ = this.getExistingMap(to, time_);
+ let edge = new Edge(type, name, reason, time, from_, to_);
+ to_.filePosition = this.formatPC(pc, line, column);
edge.finishSetup();
}
@@ -170,7 +194,7 @@ class MapProcessor extends LogReader {
//TODO(cbruni): fix initial map logging.
let map = this.getExistingMap(id, time);
if (!map.description) {
- map.description = string;
+ //map.description = string;
}
}
@@ -212,19 +236,30 @@ class V8Map {
this.filePosition = "";
}
+ finalizeRootMap(id) {
+ let stack = [this];
+ while (stack.length > 0) {
+ let current = stack.pop();
+ if (current.leftId !== 0) {
+ console.error("Skipping potential parent loop between maps:", current)
+ continue;
+ }
+ current.finalize(id)
+ id += 1;
+ current.children.forEach(edge => stack.push(edge.to))
+ // TODO implement rightId
+ }
+ return id;
+ }
+
finalize(id) {
// Initialize preorder tree traversal Ids for fast subtree inclusion checks
if (id <= 0) throw "invalid id";
let currentId = id;
this.leftId = currentId
- this.children.forEach(edge => {
- let map = edge.to;
- currentId = map.finalize(currentId + 1);
- });
- this.rightId = currentId + 1;
- return currentId + 1;
}
+
parent() {
if (this.edge === void 0) return void 0;
return this.edge.from;
@@ -239,7 +274,7 @@ class V8Map {
}
isRoot() {
- return this.edge == void 0 || this.edge.from == void 0;
+ return this.edge === void 0 || this.edge.from === void 0;
}
contains(map) {
@@ -300,16 +335,16 @@ class V8Map {
}
static get(id) {
- if (!this.cache) return undefined;
return this.cache.get(id);
}
static set(id, map) {
- if (!this.cache) this.cache = new Map();
this.cache.set(id, map);
}
}
+V8Map.cache = new Map();
+
// ===========================================================================
class Edge {
@@ -323,21 +358,21 @@ class Edge {
}
finishSetup() {
- if (this.from) this.from.addEdge(this);
- if (this.to) {
- this.to.edge = this;
- if (this.to === this.from) throw "From and to must be distinct.";
- if (this.from) {
- if (this.to.time < this.from.time) {
- console.error("invalid time order");
- }
- let newDepth = this.from.depth + 1;
- if (this.to.depth > 0 && this.to.depth != newDepth) {
- console.error("Depth has already been initialized");
- }
- this.to.depth = newDepth;
- }
+ let from = this.from
+ if (from) from.addEdge(this);
+ let to = this.to;
+ if (to === undefined) return;
+ to.edge = this;
+ if (from === undefined ) return;
+ if (to === from) throw "From and to must be distinct.";
+ if (to.time < from.time) {
+ console.error("invalid time order");
}
+ let newDepth = from.depth + 1;
+ if (to.depth > 0 && to.depth != newDepth) {
+ console.error("Depth has already been initialized");
+ }
+ to.depth = newDepth;
}
chunkIndex(chunks) {
@@ -471,16 +506,16 @@ class Timeline {
finalize() {
let id = 0;
this.forEach(map => {
- if (map.isRoot()) id = map.finalize(id + 1);
- if (map.edge && map.edge.name) {
- let edge = map.edge;
- let list = this.transitions.get(edge.name);
- if (list === undefined) {
- this.transitions.set(edge.name, [edge]);
- } else {
- list.push(edge);
+ if (map.isRoot()) id = map.finalizeRootMap(id + 1);
+ if (map.edge && map.edge.name) {
+ let edge = map.edge;
+ let list = this.transitions.get(edge.name);
+ if (list === undefined) {
+ this.transitions.set(edge.name, [edge]);
+ } else {
+ list.push(edge);
+ }
}
- }
});
this.markers.sort((a, b) => b.time - a.time);
}
@@ -490,7 +525,7 @@ class Timeline {
}
isEmpty() {
- return this.size() == 0
+ return this.size() === 0
}
size() {
@@ -573,7 +608,7 @@ class Timeline {
count(filter) {
return this.values.reduce((sum, each) => {
- return sum + (filter(each) ? 1 : 0);
+ return sum + (filter(each) === true ? 1 : 0);
}, 0);
}
@@ -584,10 +619,10 @@ class Timeline {
filterUniqueTransitions(filter) {
// Returns a list of Maps whose parent is not in the list.
return this.values.filter(map => {
- if (!filter(map)) return false;
+ if (filter(map) === false) return false;
let parent = map.parent();
- if (!parent) return true;
- return !filter(parent);
+ if (parent === undefined) return true;
+ return filter(parent) === false;
});
}
@@ -617,7 +652,7 @@ class Chunk {
}
isEmpty() {
- return this.items.length == 0;
+ return this.items.length === 0;
}
last() {
@@ -662,7 +697,7 @@ class Chunk {
findChunk(chunks, delta) {
let i = this.index + delta;
let chunk = chunks[i];
- while (chunk && chunk.size() == 0) {
+ while (chunk && chunk.size() === 0) {
i += delta;
chunk = chunks[i]
}
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index c4036bb918..43cb639c83 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -346,9 +346,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -800,9 +797,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 7464978eb2..39c3467b1b 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -14,6 +14,7 @@ ALL_VARIANT_FLAGS = {
"interpreted_regexp": [["--regexp-interpret-all"]],
"jitless": [["--jitless"]],
"minor_mc": [["--minor-mc"]],
+ "nci": [["--turbo-nci"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
@@ -28,6 +29,7 @@ ALL_VARIANT_FLAGS = {
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
"--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
+ "stress_snapshot": [["--stress-snapshot"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"trusted": [["--no-untrusted-code-mitigations"]],
@@ -40,6 +42,7 @@ ALL_VARIANT_FLAGS = {
SLOW_VARIANTS = set([
'stress',
+ 'stress_snapshot',
'nooptimization',
])
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 1b7acc41b7..10545fa5f2 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -259,6 +259,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
for v in user_variants:
if v not in ALL_VARIANTS:
print('Unknown variant: %s' % v)
+ print(' Available variants: %s' % ALL_VARIANTS)
+ print(' Available variant aliases: %s' % VARIANT_ALIASES.keys());
raise base_runner.TestRunnerError()
assert False, 'Unreachable'
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index e9aff17705..671eebb922 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -12,9 +12,9 @@ import platform
import subprocess
import sys
import time
+import util
from . import base
-from ..local import junit_output
# Base dir of the build products for Release and Debug.
@@ -317,45 +317,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
@@ -369,7 +330,14 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.arch = arch
self.mode = mode
self.results = []
- self.tests = []
+ self.duration_sum = 0
+ self.test_count = 0
+
+ def configure(self, options):
+ super(JsonTestProgressIndicator, self).configure(options)
+ self.tests = util.FixedSizeTopList(
+ self.options.slow_tests_cutoff,
+ key=lambda rec: rec['duration'])
def _on_result_for(self, test, result):
if result.is_rerun:
@@ -381,9 +349,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
for run, result in enumerate(results):
# TODO(majeski): Support for dummy/grouped results
output = result.output
- # Buffer all tests for sorting the durations in the end.
- # TODO(machenbach): Running average + buffer only slowest 20 tests.
- self.tests.append((test, output.duration, result.cmd))
+
+ self._buffer_slow_tests(test, result, output, run)
# Omit tests that run as expected on the first try.
# Everything that happens after the first run is included in the output
@@ -391,15 +358,36 @@ class JsonTestProgressIndicator(ProgressIndicator):
if not result.has_unexpected_output and run == 0:
continue
- self.results.append({
+ record = self._test_record(test, result, output, run)
+ record.update({
+ "result": test.output_proc.get_outcome(output),
+ "stdout": output.stdout,
+ "stderr": output.stderr,
+ })
+ self.results.append(record)
+
+ def _buffer_slow_tests(self, test, result, output, run):
+ def result_value(test, result, output):
+ if not result.has_unexpected_output:
+ return ""
+ return test.output_proc.get_outcome(output)
+
+ record = self._test_record(test, result, output, run)
+ record.update({
+ "result": result_value(test, result, output),
+ "marked_slow": test.is_slow,
+ })
+ self.tests.add(record)
+ self.duration_sum += record['duration']
+ self.test_count += 1
+
+ def _test_record(self, test, result, output, run):
+ return {
"name": str(test),
"flags": result.cmd.args,
"command": result.cmd.to_string(relative=True),
"run": run + 1,
- "stdout": output.stdout,
- "stderr": output.stderr,
"exit_code": output.exit_code,
- "result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
"random_seed": test.random_seed,
@@ -407,7 +395,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"variant": test.variant,
"variant_flags": test.variant_flags,
"framework_name": self.framework_name,
- })
+ }
def finished(self):
complete_results = []
@@ -417,36 +405,17 @@ class JsonTestProgressIndicator(ProgressIndicator):
complete_results = json.loads(f.read() or "[]")
duration_mean = None
- if self.tests:
- # Get duration mean.
- duration_mean = (
- sum(duration for (_, duration, cmd) in self.tests) /
- float(len(self.tests)))
-
- # Sort tests by duration.
- self.tests.sort(key=lambda __duration_cmd: __duration_cmd[1], reverse=True)
- cutoff = self.options.slow_tests_cutoff
- slowest_tests = self._test_records(self.tests[:cutoff])
+ if self.test_count:
+ duration_mean = self.duration_sum / self.test_count
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
- "slowest_tests": slowest_tests,
+ "slowest_tests": self.tests.as_list(),
"duration_mean": duration_mean,
- "test_total": len(self.tests),
+ "test_total": self.test_count,
})
with open(self.options.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
-
- def _test_records(self, tests):
- return [
- {
- "name": str(test),
- "flags": cmd.args,
- "command": cmd.to_string(relative=True),
- "duration": duration,
- "marked_slow": test.is_slow,
- } for (test, duration, cmd) in tests
- ]
diff --git a/deps/v8/tools/testrunner/testproc/util.py b/deps/v8/tools/testrunner/testproc/util.py
new file mode 100644
index 0000000000..8c1024cc81
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/util.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import heapq
+import random
+
+
+class FixedSizeTopList():
+ """Utility collection for gathering a fixed number of elements with the
+ biggest value for the given key. It employs a heap from which we pop the
+ smallest element when the collection is 'full'.
+
+ If you need a reversed behaviour (collect min values) just provide an
+ inverse key."""
+
+ def __init__(self, size, key=None):
+ self.size = size
+ self.key = key or (lambda x: x)
+ self.data = []
+ self.discriminator = 0
+
+ def add(self, elem):
+ elem_k = self.key(elem)
+ heapq.heappush(self.data, (elem_k, self.extra_key(), elem))
+ if len(self.data) > self.size:
+ heapq.heappop(self.data)
+
+ def extra_key(self):
+ # Avoid key clash in tuples sent to the heap.
+ # We want to avoid comparisons on the last element of the tuple
+ # since those elements might not be comparable.
+ self.discriminator += 1
+ return self.discriminator
+
+ def as_list(self):
+ original_data = [rec for (_, _, rec) in self.data]
+ return sorted(original_data, key=self.key, reverse=True)
diff --git a/deps/v8/tools/testrunner/testproc/util_unittest.py b/deps/v8/tools/testrunner/testproc/util_unittest.py
new file mode 100644
index 0000000000..243bf9789a
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/util_unittest.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from util import FixedSizeTopList
+import unittest
+
+class TestOrderedFixedSizeList(unittest.TestCase):
+ def test_empty(self):
+ ofsl = FixedSizeTopList(3)
+ self.assertEqual(ofsl.as_list(), [])
+
+ def test_12(self):
+ ofsl = FixedSizeTopList(3)
+ ofsl.add(1)
+ ofsl.add(2)
+ self.assertEqual(ofsl.as_list(), [2,1])
+
+ def test_4321(self):
+ ofsl = FixedSizeTopList(3)
+ ofsl.add(4)
+ ofsl.add(3)
+ ofsl.add(2)
+ ofsl.add(1)
+ data = ofsl.as_list()
+ self.assertEqual(data, [4,3,2])
+
+ def test_544321(self):
+ ofsl = FixedSizeTopList(4)
+ ofsl.add(5)
+ ofsl.add(4)
+ ofsl.add(4)
+ ofsl.add(3)
+ ofsl.add(2)
+ ofsl.add(1)
+ data = ofsl.as_list()
+ self.assertEqual(data, [5, 4, 4, 3])
+
+ def test_withkey(self):
+ ofsl = FixedSizeTopList(3,key=lambda x: x['val'])
+ ofsl.add({'val':4, 'something': 'four'})
+ ofsl.add({'val':3, 'something': 'three'})
+ ofsl.add({'val':-1, 'something': 'minusone'})
+ ofsl.add({'val':5, 'something': 'five'})
+ ofsl.add({'val':0, 'something': 'zero'})
+ data = [e['something'] for e in ofsl.as_list()]
+ self.assertEqual(data, ['five', 'four', 'three'])
+
+ def test_withkeyclash(self):
+ # Test that a key clash does not throw exeption
+ ofsl = FixedSizeTopList(2,key=lambda x: x['val'])
+ ofsl.add({'val':2, 'something': 'two'})
+ ofsl.add({'val':2, 'something': 'two'})
+ ofsl.add({'val':0, 'something': 'zero'})
+ data = [e['something'] for e in ofsl.as_list()]
+ self.assertEqual(data, ['two', 'two'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index fd51fa2eaf..96eb3a1909 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -35,12 +35,18 @@ def preprocess(input):
input = re.sub(r'@if\(', r'@iF(', input)
input = re.sub(r'@export', r'@eXpOrT', input)
input = re.sub(r'js-implicit[ \n]+', r'jS_iMpLiCiT_', input)
+ input = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)(\s*)$', r'\1}\2', input, flags = re.MULTILINE);
# Special handing of '%' for intrinsics, turn the percent
# into a unicode character so that it gets treated as part of the
# intrinsic's name if it's already adjacent to it.
input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input)
+ # includes are not recognized, change them into comments so that the
+ # formatter ignores them first, until we can figure out a way to format cpp
+ # includes within a JS file.
+ input = re.sub(r'^#include', r'// InClUdE', input, flags=re.MULTILINE)
+
return input
def postprocess(output):
@@ -68,9 +74,13 @@ def postprocess(output):
r"@export", output)
output = re.sub(r'jS_iMpLiCiT_',
r"js-implicit ", output)
+ output = re.sub(r'}\n *label ', r'} label ', output);
+ output = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)}(\s*)$', r'\1\2', output, flags = re.MULTILINE);
output = re.sub(kPercentEscape, r'%', output)
+ output = re.sub( r'^// InClUdE',r'#include', output, flags=re.MULTILINE)
+
return output
def process(filename, lint, should_format):
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index 14d999b5f4..3fc91b8e90 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -253,7 +253,6 @@ class SystemTest(unittest.TestCase):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
- pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
@@ -268,10 +267,15 @@ class SystemTest(unittest.TestCase):
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
+ # We need lexicographic sorting here to avoid non-deterministic behaviour
+ # The original sorting key is duration, but in our fake test we have
+ # non-deterministic durations before we reset them to 1
+ json_output['slowest_tests'].sort(key= lambda x: str(x))
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
+ pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index 31fac89ec8..d1fdb49525 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -1,124 +1,157 @@
{
- "arch": "x64",
- "duration_mean": 1,
- "mode": "release",
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 2,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/strawberries",
- "random_seed": 123,
- "result": "FAIL",
- "run": 3,
- "stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "stderr": "",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
}
- ],
- "slowest_tests": [
+ ],
+ "slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "marked_slow": true,
- "name": "sweet/strawberries"
- },
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": true,
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
+ "variant_flags": []
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "marked_slow": true,
- "name": "sweet/strawberries"
- },
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": true,
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 2,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
+ "variant_flags": []
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
"flags": [
- "--test",
- "strawberries",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "marked_slow": true,
- "name": "sweet/strawberries"
+ "--test",
+ "strawberries",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": true,
+ "name": "sweet/strawberries",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 3,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
+ "variant_flags": []
}
- ],
+ ],
"test_total": 3
-}
+} \ No newline at end of file
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index fd17972798..ac9ab9cc59 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -1,82 +1,104 @@
{
- "arch": "x64",
- "duration_mean": 1,
- "mode": "release",
+ "arch": "x64",
+ "duration_mean": 1,
+ "mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "FAIL",
- "run": 1,
- "stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "stderr": "",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
- },
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
- "exit_code": 0,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 0,
"expected": [
"PASS"
- ],
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "framework_name": "standard_runner",
- "name": "sweet/bananaflakes",
- "random_seed": 123,
- "result": "PASS",
- "run": 2,
- "stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
- "target_name": "d8_mocked.py",
- "variant": "default",
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "PASS",
+ "run": 2,
+ "stderr": "",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+ "target_name": "d8_mocked.py",
+ "variant": "default",
"variant_flags": []
}
- ],
+ ],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 0,
+ "expected": [
+ "PASS"
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "marked_slow": false,
- "name": "sweet/bananaflakes"
- },
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": false,
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "",
+ "run": 2,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
+ "variant_flags": []
+ },
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
- "duration": 1,
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+ "duration": 1,
+ "exit_code": 1,
+ "expected": [
+ "PASS"
+ ],
"flags": [
- "bananaflakes",
- "--random-seed=123",
- "--nohard-abort",
- "--testing-d8-test-runner"
- ],
- "marked_slow": false,
- "name": "sweet/bananaflakes"
+ "bananaflakes",
+ "--random-seed=123",
+ "--nohard-abort",
+ "--testing-d8-test-runner"
+ ],
+ "framework_name": "standard_runner",
+ "marked_slow": false,
+ "name": "sweet/bananaflakes",
+ "random_seed": 123,
+ "result": "FAIL",
+ "run": 1,
+ "target_name": "d8_mocked.py",
+ "variant": "default",
+ "variant_flags": []
}
- ],
+ ],
"test_total": 2
}
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 07c4ebf8cb..ac69cfb836 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -29,8 +29,8 @@ INSTANCE_TYPES = {
65: "BIG_INT_BASE_TYPE",
66: "HEAP_NUMBER_TYPE",
67: "ODDBALL_TYPE",
- 68: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 69: "EXPORTED_SUB_CLASS_TYPE",
+ 68: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
+ 69: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
70: "FOREIGN_TYPE",
71: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
72: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
@@ -77,71 +77,76 @@ INSTANCE_TYPES = {
113: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
115: "WASM_JS_FUNCTION_DATA_TYPE",
- 116: "FIXED_ARRAY_TYPE",
- 117: "HASH_TABLE_TYPE",
- 118: "EPHEMERON_HASH_TABLE_TYPE",
- 119: "GLOBAL_DICTIONARY_TYPE",
- 120: "NAME_DICTIONARY_TYPE",
- 121: "NUMBER_DICTIONARY_TYPE",
- 122: "ORDERED_HASH_MAP_TYPE",
- 123: "ORDERED_HASH_SET_TYPE",
- 124: "ORDERED_NAME_DICTIONARY_TYPE",
- 125: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 126: "STRING_TABLE_TYPE",
- 127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 128: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 129: "SCOPE_INFO_TYPE",
- 130: "SCRIPT_CONTEXT_TABLE_TYPE",
- 131: "BYTE_ARRAY_TYPE",
- 132: "BYTECODE_ARRAY_TYPE",
- 133: "FIXED_DOUBLE_ARRAY_TYPE",
- 134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 135: "AWAIT_CONTEXT_TYPE",
- 136: "BLOCK_CONTEXT_TYPE",
- 137: "CATCH_CONTEXT_TYPE",
- 138: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 139: "EVAL_CONTEXT_TYPE",
- 140: "FUNCTION_CONTEXT_TYPE",
- 141: "MODULE_CONTEXT_TYPE",
- 142: "NATIVE_CONTEXT_TYPE",
- 143: "SCRIPT_CONTEXT_TYPE",
- 144: "WITH_CONTEXT_TYPE",
- 145: "SMALL_ORDERED_HASH_MAP_TYPE",
- 146: "SMALL_ORDERED_HASH_SET_TYPE",
- 147: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 148: "SOURCE_TEXT_MODULE_TYPE",
- 149: "SYNTHETIC_MODULE_TYPE",
- 150: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 151: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 152: "WEAK_FIXED_ARRAY_TYPE",
- 153: "TRANSITION_ARRAY_TYPE",
- 154: "CELL_TYPE",
- 155: "CODE_TYPE",
- 156: "CODE_DATA_CONTAINER_TYPE",
- 157: "COVERAGE_INFO_TYPE",
- 158: "DESCRIPTOR_ARRAY_TYPE",
- 159: "EMBEDDER_DATA_ARRAY_TYPE",
- 160: "FEEDBACK_METADATA_TYPE",
- 161: "FEEDBACK_VECTOR_TYPE",
- 162: "FILLER_TYPE",
- 163: "FREE_SPACE_TYPE",
- 164: "INTERNAL_CLASS_TYPE",
- 165: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 166: "MAP_TYPE",
- 167: "PREPARSE_DATA_TYPE",
- 168: "PROPERTY_ARRAY_TYPE",
- 169: "PROPERTY_CELL_TYPE",
- 170: "SHARED_FUNCTION_INFO_TYPE",
- 171: "SMI_BOX_TYPE",
- 172: "SMI_PAIR_TYPE",
- 173: "SORT_STATE_TYPE",
- 174: "WEAK_ARRAY_LIST_TYPE",
- 175: "WEAK_CELL_TYPE",
- 176: "JS_PROXY_TYPE",
+ 116: "WASM_VALUE_TYPE",
+ 117: "FIXED_ARRAY_TYPE",
+ 118: "HASH_TABLE_TYPE",
+ 119: "EPHEMERON_HASH_TABLE_TYPE",
+ 120: "GLOBAL_DICTIONARY_TYPE",
+ 121: "NAME_DICTIONARY_TYPE",
+ 122: "NUMBER_DICTIONARY_TYPE",
+ 123: "ORDERED_HASH_MAP_TYPE",
+ 124: "ORDERED_HASH_SET_TYPE",
+ 125: "ORDERED_NAME_DICTIONARY_TYPE",
+ 126: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 127: "STRING_TABLE_TYPE",
+ 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 129: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 130: "SCOPE_INFO_TYPE",
+ 131: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 132: "BYTE_ARRAY_TYPE",
+ 133: "BYTECODE_ARRAY_TYPE",
+ 134: "FIXED_DOUBLE_ARRAY_TYPE",
+ 135: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 136: "AWAIT_CONTEXT_TYPE",
+ 137: "BLOCK_CONTEXT_TYPE",
+ 138: "CATCH_CONTEXT_TYPE",
+ 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 140: "EVAL_CONTEXT_TYPE",
+ 141: "FUNCTION_CONTEXT_TYPE",
+ 142: "MODULE_CONTEXT_TYPE",
+ 143: "NATIVE_CONTEXT_TYPE",
+ 144: "SCRIPT_CONTEXT_TYPE",
+ 145: "WITH_CONTEXT_TYPE",
+ 146: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 147: "SMALL_ORDERED_HASH_SET_TYPE",
+ 148: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 149: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 150: "EXPORTED_SUB_CLASS_TYPE",
+ 151: "SOURCE_TEXT_MODULE_TYPE",
+ 152: "SYNTHETIC_MODULE_TYPE",
+ 153: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 154: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 155: "WEAK_FIXED_ARRAY_TYPE",
+ 156: "TRANSITION_ARRAY_TYPE",
+ 157: "CELL_TYPE",
+ 158: "CODE_TYPE",
+ 159: "CODE_DATA_CONTAINER_TYPE",
+ 160: "COVERAGE_INFO_TYPE",
+ 161: "DESCRIPTOR_ARRAY_TYPE",
+ 162: "EMBEDDER_DATA_ARRAY_TYPE",
+ 163: "FEEDBACK_METADATA_TYPE",
+ 164: "FEEDBACK_VECTOR_TYPE",
+ 165: "FILLER_TYPE",
+ 166: "FREE_SPACE_TYPE",
+ 167: "INTERNAL_CLASS_TYPE",
+ 168: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 169: "MAP_TYPE",
+ 170: "PREPARSE_DATA_TYPE",
+ 171: "PROPERTY_ARRAY_TYPE",
+ 172: "PROPERTY_CELL_TYPE",
+ 173: "SHARED_FUNCTION_INFO_TYPE",
+ 174: "SMI_BOX_TYPE",
+ 175: "SMI_PAIR_TYPE",
+ 176: "SORT_STATE_TYPE",
+ 177: "WASM_ARRAY_TYPE",
+ 178: "WASM_STRUCT_TYPE",
+ 179: "WEAK_ARRAY_LIST_TYPE",
+ 180: "WEAK_CELL_TYPE",
+ 181: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 177: "JS_GLOBAL_OBJECT_TYPE",
- 178: "JS_GLOBAL_PROXY_TYPE",
- 179: "JS_MODULE_NAMESPACE_TYPE",
+ 182: "JS_GLOBAL_OBJECT_TYPE",
+ 183: "JS_GLOBAL_PROXY_TYPE",
+ 184: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1042: "JS_MAP_KEY_ITERATOR_TYPE",
@@ -159,19 +164,19 @@ INSTANCE_TYPES = {
1054: "JS_WEAK_MAP_TYPE",
1055: "JS_WEAK_SET_TYPE",
1056: "JS_API_OBJECT_TYPE",
- 1058: "JS_ARGUMENTS_OBJECT_TYPE",
- 1059: "JS_ARRAY_TYPE",
- 1060: "JS_ARRAY_BUFFER_TYPE",
- 1061: "JS_ARRAY_ITERATOR_TYPE",
- 1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 1063: "JS_COLLATOR_TYPE",
- 1064: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 1065: "JS_DATE_TYPE",
- 1066: "JS_DATE_TIME_FORMAT_TYPE",
- 1067: "JS_DISPLAY_NAMES_TYPE",
- 1068: "JS_ERROR_TYPE",
- 1069: "JS_FINALIZATION_REGISTRY_TYPE",
- 1070: "JS_FINALIZATION_REGISTRY_CLEANUP_ITERATOR_TYPE",
+ 1058: "JS_AGGREGATE_ERROR_TYPE",
+ 1059: "JS_ARGUMENTS_OBJECT_TYPE",
+ 1060: "JS_ARRAY_TYPE",
+ 1061: "JS_ARRAY_BUFFER_TYPE",
+ 1062: "JS_ARRAY_ITERATOR_TYPE",
+ 1063: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 1064: "JS_COLLATOR_TYPE",
+ 1065: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 1066: "JS_DATE_TYPE",
+ 1067: "JS_DATE_TIME_FORMAT_TYPE",
+ 1068: "JS_DISPLAY_NAMES_TYPE",
+ 1069: "JS_ERROR_TYPE",
+ 1070: "JS_FINALIZATION_REGISTRY_TYPE",
1071: "JS_LIST_FORMAT_TYPE",
1072: "JS_LOCALE_TYPE",
1073: "JS_MESSAGE_OBJECT_TYPE",
@@ -198,78 +203,78 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x00121): (163, "FreeSpaceMap"),
- ("read_only_space", 0x00149): (166, "MetaMap"),
+ ("read_only_space", 0x00121): (166, "FreeSpaceMap"),
+ ("read_only_space", 0x00149): (169, "MetaMap"),
("read_only_space", 0x0018d): (67, "NullMap"),
- ("read_only_space", 0x001c5): (158, "DescriptorArrayMap"),
- ("read_only_space", 0x001f5): (152, "WeakFixedArrayMap"),
- ("read_only_space", 0x0021d): (162, "OnePointerFillerMap"),
- ("read_only_space", 0x00245): (162, "TwoPointerFillerMap"),
+ ("read_only_space", 0x001c5): (161, "DescriptorArrayMap"),
+ ("read_only_space", 0x001f5): (155, "WeakFixedArrayMap"),
+ ("read_only_space", 0x0021d): (165, "OnePointerFillerMap"),
+ ("read_only_space", 0x00245): (165, "TwoPointerFillerMap"),
("read_only_space", 0x00289): (67, "UninitializedMap"),
("read_only_space", 0x002cd): (8, "OneByteInternalizedStringMap"),
("read_only_space", 0x00329): (67, "UndefinedMap"),
("read_only_space", 0x0035d): (66, "HeapNumberMap"),
("read_only_space", 0x003a1): (67, "TheHoleMap"),
("read_only_space", 0x00401): (67, "BooleanMap"),
- ("read_only_space", 0x00489): (131, "ByteArrayMap"),
- ("read_only_space", 0x004b1): (116, "FixedArrayMap"),
- ("read_only_space", 0x004d9): (116, "FixedCOWArrayMap"),
- ("read_only_space", 0x00501): (117, "HashTableMap"),
+ ("read_only_space", 0x00489): (132, "ByteArrayMap"),
+ ("read_only_space", 0x004b1): (117, "FixedArrayMap"),
+ ("read_only_space", 0x004d9): (117, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00501): (118, "HashTableMap"),
("read_only_space", 0x00529): (64, "SymbolMap"),
("read_only_space", 0x00551): (40, "OneByteStringMap"),
- ("read_only_space", 0x00579): (129, "ScopeInfoMap"),
- ("read_only_space", 0x005a1): (170, "SharedFunctionInfoMap"),
- ("read_only_space", 0x005c9): (155, "CodeMap"),
- ("read_only_space", 0x005f1): (154, "CellMap"),
- ("read_only_space", 0x00619): (169, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00579): (130, "ScopeInfoMap"),
+ ("read_only_space", 0x005a1): (173, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x005c9): (158, "CodeMap"),
+ ("read_only_space", 0x005f1): (157, "CellMap"),
+ ("read_only_space", 0x00619): (172, "GlobalPropertyCellMap"),
("read_only_space", 0x00641): (70, "ForeignMap"),
- ("read_only_space", 0x00669): (153, "TransitionArrayMap"),
+ ("read_only_space", 0x00669): (156, "TransitionArrayMap"),
("read_only_space", 0x00691): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x006b9): (161, "FeedbackVectorMap"),
+ ("read_only_space", 0x006b9): (164, "FeedbackVectorMap"),
("read_only_space", 0x0070d): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x0076d): (67, "ExceptionMap"),
("read_only_space", 0x007c9): (67, "TerminationExceptionMap"),
("read_only_space", 0x00831): (67, "OptimizedOutMap"),
("read_only_space", 0x00891): (67, "StaleRegisterMap"),
- ("read_only_space", 0x008d5): (130, "ScriptContextTableMap"),
- ("read_only_space", 0x008fd): (127, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x00925): (160, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x0094d): (116, "ArrayListMap"),
+ ("read_only_space", 0x008d5): (131, "ScriptContextTableMap"),
+ ("read_only_space", 0x008fd): (128, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x00925): (163, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x0094d): (117, "ArrayListMap"),
("read_only_space", 0x00975): (65, "BigIntMap"),
- ("read_only_space", 0x0099d): (128, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x009c5): (132, "BytecodeArrayMap"),
- ("read_only_space", 0x009ed): (156, "CodeDataContainerMap"),
- ("read_only_space", 0x00a15): (157, "CoverageInfoMap"),
- ("read_only_space", 0x00a3d): (133, "FixedDoubleArrayMap"),
- ("read_only_space", 0x00a65): (119, "GlobalDictionaryMap"),
+ ("read_only_space", 0x0099d): (129, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x009c5): (133, "BytecodeArrayMap"),
+ ("read_only_space", 0x009ed): (159, "CodeDataContainerMap"),
+ ("read_only_space", 0x00a15): (160, "CoverageInfoMap"),
+ ("read_only_space", 0x00a3d): (134, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x00a65): (120, "GlobalDictionaryMap"),
("read_only_space", 0x00a8d): (96, "ManyClosuresCellMap"),
- ("read_only_space", 0x00ab5): (116, "ModuleInfoMap"),
- ("read_only_space", 0x00add): (120, "NameDictionaryMap"),
+ ("read_only_space", 0x00ab5): (117, "ModuleInfoMap"),
+ ("read_only_space", 0x00add): (121, "NameDictionaryMap"),
("read_only_space", 0x00b05): (96, "NoClosuresCellMap"),
- ("read_only_space", 0x00b2d): (121, "NumberDictionaryMap"),
+ ("read_only_space", 0x00b2d): (122, "NumberDictionaryMap"),
("read_only_space", 0x00b55): (96, "OneClosureCellMap"),
- ("read_only_space", 0x00b7d): (122, "OrderedHashMapMap"),
- ("read_only_space", 0x00ba5): (123, "OrderedHashSetMap"),
- ("read_only_space", 0x00bcd): (124, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x00bf5): (167, "PreparseDataMap"),
- ("read_only_space", 0x00c1d): (168, "PropertyArrayMap"),
+ ("read_only_space", 0x00b7d): (123, "OrderedHashMapMap"),
+ ("read_only_space", 0x00ba5): (124, "OrderedHashSetMap"),
+ ("read_only_space", 0x00bcd): (125, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x00bf5): (170, "PreparseDataMap"),
+ ("read_only_space", 0x00c1d): (171, "PropertyArrayMap"),
("read_only_space", 0x00c45): (92, "SideEffectCallHandlerInfoMap"),
("read_only_space", 0x00c6d): (92, "SideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x00c95): (92, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x00cbd): (125, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x00ce5): (116, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x00d0d): (145, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x00d35): (146, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x00d5d): (147, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x00d85): (148, "SourceTextModuleMap"),
- ("read_only_space", 0x00dad): (126, "StringTableMap"),
- ("read_only_space", 0x00dd5): (149, "SyntheticModuleMap"),
- ("read_only_space", 0x00dfd): (151, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x00e25): (150, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x00e4d): (174, "WeakArrayListMap"),
- ("read_only_space", 0x00e75): (118, "EphemeronHashTableMap"),
- ("read_only_space", 0x00e9d): (159, "EmbedderDataArrayMap"),
- ("read_only_space", 0x00ec5): (175, "WeakCellMap"),
+ ("read_only_space", 0x00cbd): (126, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x00ce5): (117, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x00d0d): (146, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x00d35): (147, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x00d5d): (148, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x00d85): (151, "SourceTextModuleMap"),
+ ("read_only_space", 0x00dad): (127, "StringTableMap"),
+ ("read_only_space", 0x00dd5): (152, "SyntheticModuleMap"),
+ ("read_only_space", 0x00dfd): (154, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x00e25): (153, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x00e4d): (179, "WeakArrayListMap"),
+ ("read_only_space", 0x00e75): (119, "EphemeronHashTableMap"),
+ ("read_only_space", 0x00e9d): (162, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x00ec5): (180, "WeakCellMap"),
("read_only_space", 0x00eed): (32, "StringMap"),
("read_only_space", 0x00f15): (41, "ConsOneByteStringMap"),
("read_only_space", 0x00f3d): (33, "ConsStringMap"),
@@ -289,60 +294,63 @@ KNOWN_MAPS = {
("read_only_space", 0x01179): (95, "EnumCacheMap"),
("read_only_space", 0x011c9): (86, "ArrayBoilerplateDescriptionMap"),
("read_only_space", 0x012c5): (98, "InterceptorInfoMap"),
- ("read_only_space", 0x032e5): (71, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x0330d): (72, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x03335): (73, "CallableTaskMap"),
- ("read_only_space", 0x0335d): (74, "CallbackTaskMap"),
- ("read_only_space", 0x03385): (75, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x033ad): (78, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x033d5): (79, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x033fd): (80, "AccessCheckInfoMap"),
- ("read_only_space", 0x03425): (81, "AccessorInfoMap"),
- ("read_only_space", 0x0344d): (82, "AccessorPairMap"),
- ("read_only_space", 0x03475): (83, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x0349d): (84, "AllocationMementoMap"),
- ("read_only_space", 0x034c5): (87, "AsmWasmDataMap"),
- ("read_only_space", 0x034ed): (88, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x03515): (89, "BreakPointMap"),
- ("read_only_space", 0x0353d): (90, "BreakPointInfoMap"),
- ("read_only_space", 0x03565): (91, "CachedTemplateObjectMap"),
- ("read_only_space", 0x0358d): (93, "ClassPositionsMap"),
- ("read_only_space", 0x035b5): (94, "DebugInfoMap"),
- ("read_only_space", 0x035dd): (97, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x03605): (99, "InterpreterDataMap"),
- ("read_only_space", 0x0362d): (100, "PromiseCapabilityMap"),
- ("read_only_space", 0x03655): (101, "PromiseReactionMap"),
- ("read_only_space", 0x0367d): (102, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x036a5): (103, "PrototypeInfoMap"),
- ("read_only_space", 0x036cd): (104, "ScriptMap"),
- ("read_only_space", 0x036f5): (105, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x0371d): (106, "StackFrameInfoMap"),
- ("read_only_space", 0x03745): (107, "StackTraceFrameMap"),
- ("read_only_space", 0x0376d): (108, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x03795): (109, "Tuple2Map"),
- ("read_only_space", 0x037bd): (110, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x037e5): (111, "WasmDebugInfoMap"),
- ("read_only_space", 0x0380d): (112, "WasmExceptionTagMap"),
- ("read_only_space", 0x03835): (113, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x0385d): (114, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x03885): (115, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x038ad): (134, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x038d5): (165, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x038fd): (164, "InternalClassMap"),
- ("read_only_space", 0x03925): (172, "SmiPairMap"),
- ("read_only_space", 0x0394d): (171, "SmiBoxMap"),
- ("read_only_space", 0x03975): (68, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x0399d): (69, "ExportedSubClassMap"),
- ("read_only_space", 0x039c5): (173, "SortStateMap"),
- ("read_only_space", 0x039ed): (85, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x03a15): (85, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x03a3d): (76, "LoadHandler1Map"),
- ("read_only_space", 0x03a65): (76, "LoadHandler2Map"),
- ("read_only_space", 0x03a8d): (76, "LoadHandler3Map"),
- ("read_only_space", 0x03ab5): (77, "StoreHandler0Map"),
- ("read_only_space", 0x03add): (77, "StoreHandler1Map"),
- ("read_only_space", 0x03b05): (77, "StoreHandler2Map"),
- ("read_only_space", 0x03b2d): (77, "StoreHandler3Map"),
+ ("read_only_space", 0x03335): (71, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x0335d): (72, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x03385): (73, "CallableTaskMap"),
+ ("read_only_space", 0x033ad): (74, "CallbackTaskMap"),
+ ("read_only_space", 0x033d5): (75, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x033fd): (78, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x03425): (79, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x0344d): (80, "AccessCheckInfoMap"),
+ ("read_only_space", 0x03475): (81, "AccessorInfoMap"),
+ ("read_only_space", 0x0349d): (82, "AccessorPairMap"),
+ ("read_only_space", 0x034c5): (83, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x034ed): (84, "AllocationMementoMap"),
+ ("read_only_space", 0x03515): (87, "AsmWasmDataMap"),
+ ("read_only_space", 0x0353d): (88, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x03565): (89, "BreakPointMap"),
+ ("read_only_space", 0x0358d): (90, "BreakPointInfoMap"),
+ ("read_only_space", 0x035b5): (91, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x035dd): (93, "ClassPositionsMap"),
+ ("read_only_space", 0x03605): (94, "DebugInfoMap"),
+ ("read_only_space", 0x0362d): (97, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x03655): (99, "InterpreterDataMap"),
+ ("read_only_space", 0x0367d): (100, "PromiseCapabilityMap"),
+ ("read_only_space", 0x036a5): (101, "PromiseReactionMap"),
+ ("read_only_space", 0x036cd): (102, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x036f5): (103, "PrototypeInfoMap"),
+ ("read_only_space", 0x0371d): (104, "ScriptMap"),
+ ("read_only_space", 0x03745): (105, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x0376d): (106, "StackFrameInfoMap"),
+ ("read_only_space", 0x03795): (107, "StackTraceFrameMap"),
+ ("read_only_space", 0x037bd): (108, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x037e5): (109, "Tuple2Map"),
+ ("read_only_space", 0x0380d): (110, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x03835): (111, "WasmDebugInfoMap"),
+ ("read_only_space", 0x0385d): (112, "WasmExceptionTagMap"),
+ ("read_only_space", 0x03885): (113, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x038ad): (114, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x038d5): (115, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x038fd): (116, "WasmValueMap"),
+ ("read_only_space", 0x03925): (167, "InternalClassMap"),
+ ("read_only_space", 0x0394d): (175, "SmiPairMap"),
+ ("read_only_space", 0x03975): (174, "SmiBoxMap"),
+ ("read_only_space", 0x0399d): (149, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x039c5): (150, "ExportedSubClassMap"),
+ ("read_only_space", 0x039ed): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x03a15): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x03a3d): (135, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x03a65): (168, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x03a8d): (176, "SortStateMap"),
+ ("read_only_space", 0x03ab5): (85, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x03add): (85, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x03b05): (76, "LoadHandler1Map"),
+ ("read_only_space", 0x03b2d): (76, "LoadHandler2Map"),
+ ("read_only_space", 0x03b55): (76, "LoadHandler3Map"),
+ ("read_only_space", 0x03b7d): (77, "StoreHandler0Map"),
+ ("read_only_space", 0x03ba5): (77, "StoreHandler1Map"),
+ ("read_only_space", 0x03bcd): (77, "StoreHandler2Map"),
+ ("read_only_space", 0x03bf5): (77, "StoreHandler3Map"),
("map_space", 0x00121): (1057, "ExternalMap"),
("map_space", 0x00149): (1073, "JSMessageObjectMap"),
}
@@ -414,19 +422,41 @@ KNOWN_OBJECTS = {
("old_space", 0x004e1): "ArraySpeciesProtector",
("old_space", 0x004f5): "TypedArraySpeciesProtector",
("old_space", 0x00509): "PromiseSpeciesProtector",
- ("old_space", 0x0051d): "StringLengthProtector",
- ("old_space", 0x00531): "ArrayIteratorProtector",
- ("old_space", 0x00545): "ArrayBufferDetachingProtector",
- ("old_space", 0x00559): "PromiseHookProtector",
- ("old_space", 0x0056d): "PromiseResolveProtector",
- ("old_space", 0x00581): "MapIteratorProtector",
- ("old_space", 0x00595): "PromiseThenProtector",
- ("old_space", 0x005a9): "SetIteratorProtector",
- ("old_space", 0x005bd): "StringIteratorProtector",
- ("old_space", 0x005d1): "SingleCharacterStringCache",
- ("old_space", 0x009d9): "StringSplitCache",
- ("old_space", 0x00de1): "RegExpMultipleCache",
- ("old_space", 0x011e9): "BuiltinsConstantsTable",
+ ("old_space", 0x0051d): "RegExpSpeciesProtector",
+ ("old_space", 0x00531): "StringLengthProtector",
+ ("old_space", 0x00545): "ArrayIteratorProtector",
+ ("old_space", 0x00559): "ArrayBufferDetachingProtector",
+ ("old_space", 0x0056d): "PromiseHookProtector",
+ ("old_space", 0x00581): "PromiseResolveProtector",
+ ("old_space", 0x00595): "MapIteratorProtector",
+ ("old_space", 0x005a9): "PromiseThenProtector",
+ ("old_space", 0x005bd): "SetIteratorProtector",
+ ("old_space", 0x005d1): "StringIteratorProtector",
+ ("old_space", 0x005e5): "SingleCharacterStringCache",
+ ("old_space", 0x009ed): "StringSplitCache",
+ ("old_space", 0x00df5): "RegExpMultipleCache",
+ ("old_space", 0x011fd): "BuiltinsConstantsTable",
+ ("old_space", 0x015a1): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x015c9): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x015f1): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x01619): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x01641): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x01669): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x01691): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x016b9): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x016e1): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x01709): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x01731): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x01759): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x01781): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x017a9): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x017d1): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x017f9): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x01821): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x01849): "PromiseThenFinallySharedFun",
+ ("old_space", 0x01871): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x01899): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x018c1): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
@@ -442,10 +472,9 @@ FRAME_MARKERS = (
"CONSTRUCT_ENTRY",
"EXIT",
"OPTIMIZED",
- "WASM_COMPILED",
+ "WASM",
"WASM_TO_JS",
"JS_TO_WASM",
- "WASM_INTERPRETER_ENTRY",
"WASM_DEBUG_BREAK",
"C_WASM_ENTRY",
"WASM_EXIT",
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 0b8eb6790d..b3e9185c4d 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -71,7 +71,7 @@ log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
# Generate the proposal tests.
###############################################################################
-repos='bulk-memory-operations reference-types js-types JS-BigInt-integration multi-value'
+repos='bulk-memory-operations reference-types js-types JS-BigInt-integration'
for repo in ${repos}; do
echo "Process ${repo}"
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 482c695b65..e246f93f14 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,10 +7,10 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles......................
+The bartender starts to shake the bottles.....................
I can't add trailing whitespaces, so I'm adding this line.....
I'm starting to think that just adding trailing whitespaces might not be bad.
-Because whitespaces are not that funny......
+Because whitespaces are not that funny.....
Today's answer to life the universe and everything is 12950!
-Today's answer to life the universe and everything is 6727!
+Today's answer to life the universe and everything is 6728!