summaryrefslogtreecommitdiff
path: root/deps/v8/test
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-06-08 14:04:59 +0200
committerMichaël Zasso <targos@protonmail.com>2021-06-10 11:10:13 +0200
commita7cbf19a82c75e9a65e90fb8ba4947e2fc52ef39 (patch)
treedadfaa9c63c5d8db997b7c7aacc313b04131157c /deps/v8/test
parent8834ec9f5c522f7d800d85b245a9806418515b7c (diff)
downloadnode-new-a7cbf19a82c75e9a65e90fb8ba4947e2fc52ef39.tar.gz
deps: update V8 to 9.1.269.36
PR-URL: https://github.com/nodejs/node/pull/38273 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com> Reviewed-By: Mary Marchini <oss@mmarchini.me>
Diffstat (limited to 'deps/v8/test')
-rw-r--r--deps/v8/test/BUILD.gn46
-rw-r--r--deps/v8/test/cctest/BUILD.gn112
-rw-r--r--deps/v8/test/cctest/OWNERS2
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.h13
-rw-r--r--deps/v8/test/cctest/cctest.status12
-rw-r--r--deps/v8/test/cctest/compiler/node-observer-tester.h1
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc27
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc182
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc111
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc33
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc31
-rw-r--r--deps/v8/test/cctest/compiler/test-sloppy-equality.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc109
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc16
-rw-r--r--deps/v8/test/cctest/heap/test-write-barrier.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden30
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc6
-rw-r--r--deps/v8/test/cctest/test-accessors.cc38
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc190
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc129
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc282
-rw-r--r--deps/v8/test/cctest/test-api-typed-array.cc4
-rw-r--r--deps/v8/test/cctest/test-api.cc290
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc57
-rw-r--r--deps/v8/test/cctest/test-code-pages.cc27
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc184
-rw-r--r--deps/v8/test/cctest/test-compiler.cc7
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc76
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc25
-rw-r--r--deps/v8/test/cctest/test-debug.cc76
-rw-r--r--deps/v8/test/cctest/test-descriptor-array.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc18
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc206
-rw-r--r--deps/v8/test/cctest/test-flags.cc2
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc3
-rw-r--r--deps/v8/test/cctest/test-hashcode.cc12
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc8
-rw-r--r--deps/v8/test/cctest/test-icache.cc7
-rw-r--r--deps/v8/test/cctest/test-js-to-wasm.cc40
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc19
-rw-r--r--deps/v8/test/cctest/test-log.cc140
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc10
-rw-r--r--deps/v8/test/cctest/test-object.cc95
-rw-r--r--deps/v8/test/cctest/test-parsing.cc13
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc61
-rw-r--r--deps/v8/test/cctest/test-serialize.cc36
-rw-r--r--deps/v8/test/cctest/test-strings.cc78
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc466
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc139
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-infra.h321
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h942
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary.cc150
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc24
-rw-r--r--deps/v8/test/cctest/test-verifiers.cc4
-rw-r--r--deps/v8/test/cctest/test-web-snapshots.cc131
-rw-r--r--deps/v8/test/cctest/wasm/test-backing-store.cc (renamed from deps/v8/test/cctest/test-backing-store.cc)5
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc167
-rw-r--r--deps/v8/test/cctest/wasm/test-grow-memory.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-inspection.cc82
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc177
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc20
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc239
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc1461
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc55
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-metrics.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc19
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc19
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h20
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.cc752
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.h177
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h4
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc194
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h2
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc7
-rw-r--r--deps/v8/test/debugger/debug/debug-break-class-fields.js100
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js2
-rw-r--r--deps/v8/test/debugger/debugger.status6
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-1199681.js52
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js4
-rw-r--r--deps/v8/test/fuzzer/BUILD.gn17
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc2
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status6
-rw-r--r--deps/v8/test/fuzzer/inspector-fuzzer.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc14
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc167
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc25
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h2
-rw-r--r--deps/v8/test/fuzzer/wasm.cc14
-rw-r--r--deps/v8/test/fuzzer/wasm/regress-1191853.wasmbin0 -> 25 bytes
-rw-r--r--deps/v8/test/inspector/BUILD.gn1
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-await-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js60
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/regress-1190290-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/regress-1190290.js42
-rw-r--r--deps/v8/test/inspector/debugger/regression-1185540-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/regression-1185540.js34
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt14
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js9
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt66
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js75
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js31
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint.js16
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt31
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js214
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js50
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js9
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt34
-rw-r--r--deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js59
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt37
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt53
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-source.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-check.js13
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js11
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js13
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping.js4
-rw-r--r--deps/v8/test/inspector/inspector-test.cc5
-rw-r--r--deps/v8/test/inspector/inspector.status4
-rw-r--r--deps/v8/test/inspector/isolate-data.cc9
-rw-r--r--deps/v8/test/inspector/isolate-data.h3
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt19
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1183664.js39
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt9
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1199919.js44
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt3
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js4
-rw-r--r--deps/v8/test/inspector/task-runner.cc2
-rw-r--r--deps/v8/test/inspector/wasm-inspector-test.js8
-rw-r--r--deps/v8/test/intl/displaynames/getoptionsobject.js20
-rw-r--r--deps/v8/test/intl/intl.status5
-rw-r--r--deps/v8/test/intl/list-format/getoptionsobject.js20
-rw-r--r--deps/v8/test/intl/regress-11595.js23
-rw-r--r--deps/v8/test/intl/segmenter/getoptionsobject.js20
-rw-r--r--deps/v8/test/js-perf-test/OWNERS2
-rw-r--r--deps/v8/test/message/fail/await-non-async.out4
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry1.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry1.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry2.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-finalizationregistry2.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-register1.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-register2.out4
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.js2
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.out4
-rw-r--r--deps/v8/test/message/message.status9
-rw-r--r--deps/v8/test/message/weakref-finalizationregistry-error.js2
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js6
-rw-r--r--deps/v8/test/mjsunit/array-sort.js16
-rw-r--r--deps/v8/test/mjsunit/array-store-and-grow.js12
-rw-r--r--deps/v8/test/mjsunit/baseline/cross-realm.js55
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline-module.mjs2
-rw-r--r--deps/v8/test/mjsunit/baseline/test-baseline.js32
-rw-r--r--deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js37
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-calls.js148
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-const-field.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1215514.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-accessors.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js7
-rw-r--r--deps/v8/test/mjsunit/const-dict-tracking.js472
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking-2.js3
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking.js3
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js2
-rw-r--r--deps/v8/test/mjsunit/ensure-growing-store-learns.js6
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js2
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js7
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt.js22
-rw-r--r--deps/v8/test/mjsunit/field-type-tracking.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.mjs19
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js10
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status73
-rw-r--r--deps/v8/test/mjsunit/promise-hooks.js275
-rw-r--r--deps/v8/test/mjsunit/proto-accessor-not-accessible.js43
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-673297.js (renamed from deps/v8/test/mjsunit/regress/regress-673297.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-743622.js (renamed from deps/v8/test/mjsunit/regress/regress-743622.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1067270.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1146880.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-11491.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-11519.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1181240.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1185072.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1187170.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1193903.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-673241.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7115.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-923723.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-992389.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-chromium-1194026.js69
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1191886.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1195331.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9534.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js49
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1027410.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1034394.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1074586.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1075953.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10831.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-10898.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1101304.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1145135.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1146861.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1153442.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1161654.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1179182.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1184964.js11
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1185464.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1187831.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1188825.js28
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1188975.js21
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1189454.js218
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1197393.js35
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1201340.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-5800.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7353.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7366.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-782280.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-791810.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-793551.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-842501.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8533.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-854050.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-905815.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-913804.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917412.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-917588b.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-919533.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-922933.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-924843.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-968078.js4
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9759.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9832.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress1192313.js30
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/tools/foozzie.js9
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics64-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange-stress.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-rethrow.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-shared.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-simd.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js119
-rw-r--r--deps/v8/test/mjsunit/wasm/externref.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/globals.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-call.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-rotation.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/loop-unrolling.js49
-rw-r--r--deps/v8/test/mjsunit/wasm/memory64.js25
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/multiple-code-spaces.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-tables.js157
-rw-r--r--deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js39
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-error-position.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable-validation.js14
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js149
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js433
-rw-r--r--deps/v8/test/mkgrokdump/BUILD.gn1
-rw-r--r--deps/v8/test/test262/test262.status17
-rw-r--r--deps/v8/test/test262/testcfg.py3
-rw-r--r--deps/v8/test/unittests/BUILD.gn59
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc1
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc25
-rw-r--r--deps/v8/test/unittests/base/vlq-unittest.cc123
-rw-r--r--deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc175
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc266
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc155
-rw-r--r--deps/v8/test/unittests/compiler/frame-unittest.cc242
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc110
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc105
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h11
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc288
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc1
-rw-r--r--deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc12
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc11
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc73
-rw-r--r--deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc194
-rw-r--r--deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc184
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc79
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc60
-rw-r--r--deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/cppgc/testing-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h2
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc76
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc306
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc7
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc9
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc38
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc6
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc138
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h3
-rw-r--r--deps/v8/test/unittests/numbers/conversions-unittest.cc5
-rw-r--r--deps/v8/test/unittests/objects/object-unittest.cc6
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc33
-rw-r--r--deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc (renamed from deps/v8/test/unittests/objects/backing-store-unittest.cc)3
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc109
-rw-r--r--deps/v8/test/unittests/wasm/liftoff-register-unittests.cc41
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc1
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc77
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc44
-rw-r--r--deps/v8/test/wasm-api-tests/BUILD.gn6
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-tests.status6
-rw-r--r--deps/v8/test/wasm-js/testcfg.py11
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status15
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py12
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status16
405 files changed, 12750 insertions, 4394 deletions
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index fb872ad39f..d90a4c670f 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -15,17 +15,22 @@ group("gn_all") {
"mjsunit:v8_mjsunit",
"mozilla:v8_mozilla",
"test262:v8_test262",
- "wasm-js:v8_wasm_js",
- "wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
]
deps = [
"inspector:inspector-test",
"mkgrokdump:mkgrokdump",
- "wasm-api-tests:wasm_api_tests",
]
+ if (v8_enable_webassembly) {
+ data_deps += [
+ "wasm-js:v8_wasm_js",
+ "wasm-spec-tests:v8_wasm_spec_tests",
+ ]
+ deps += [ "wasm-api-tests:wasm_api_tests" ]
+ }
+
if (v8_fuzzilli) {
deps += [ "fuzzilli:v8_fuzzilli_test" ]
}
@@ -79,11 +84,16 @@ group("v8_bot_default") {
"mjsunit:v8_mjsunit",
"mkgrokdump:mkgrokdump",
"unittests:unittests",
- "wasm-api-tests:wasm_api_tests",
- "wasm-js:v8_wasm_js",
- "wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
]
+
+ if (v8_enable_webassembly) {
+ data_deps += [
+ "wasm-api-tests:wasm_api_tests",
+ "wasm-js:v8_wasm_js",
+ "wasm-spec-tests:v8_wasm_spec_tests",
+ ]
+ }
}
group("v8_default") {
@@ -99,10 +109,15 @@ group("v8_default") {
"mjsunit:v8_mjsunit",
"mkgrokdump:mkgrokdump",
"unittests:unittests",
- "wasm-api-tests:wasm_api_tests",
- "wasm-js:v8_wasm_js",
- "wasm-spec-tests:v8_wasm_spec_tests",
]
+
+ if (v8_enable_webassembly) {
+ data_deps += [
+ "wasm-api-tests:wasm_api_tests",
+ "wasm-js:v8_wasm_js",
+ "wasm-spec-tests:v8_wasm_spec_tests",
+ ]
+ }
}
group("v8_optimize_for_size") {
@@ -146,7 +161,7 @@ v8_header_set("common_test_headers") {
configs = []
public_deps = [
- "../:v8_for_testing",
+ "../:v8_internal_headers",
"../:v8_libbase",
]
@@ -154,8 +169,13 @@ v8_header_set("common_test_headers") {
"common/assembler-tester.h",
"common/flag-utils.h",
"common/types-fuzz.h",
- "common/wasm/flag-utils.h",
- "common/wasm/test-signatures.h",
- "common/wasm/wasm-macro-gen.h",
]
+
+ if (v8_enable_webassembly) {
+ sources += [
+ "common/wasm/flag-utils.h",
+ "common/wasm/test-signatures.h",
+ "common/wasm/wasm-macro-gen.h",
+ ]
+ }
}
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index e63fe5ed35..ffa4e3a136 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -16,10 +16,7 @@ v8_executable("cctest") {
sources = [ "cctest.cc" ]
- deps = [
- ":cctest_sources",
- "../..:v8_wrappers",
- ]
+ deps = [ ":cctest_sources" ]
data_deps = [ "../../tools:v8_testrunner" ]
@@ -60,7 +57,11 @@ v8_header_set("cctest_headers") {
"../..:internal_config_base",
]
- deps = [ "../..:v8_config_headers" ]
+ deps = [
+ "../..:v8_internal_headers",
+ "../..:v8_libbase",
+ "../..:v8_libplatform",
+ ]
sources = [ "cctest.h" ]
}
@@ -78,9 +79,6 @@ v8_source_set("cctest_sources") {
### gcmole(all) ###
"../common/assembler-tester.h",
"../common/flag-utils.h",
- "../common/wasm/flag-utils.h",
- "../common/wasm/test-signatures.h",
- "../common/wasm/wasm-macro-gen.h",
"cctest-utils.h",
"collector.h",
"compiler/c-signature.h",
@@ -109,7 +107,6 @@ v8_source_set("cctest_sources") {
"compiler/test-linkage.cc",
"compiler/test-loop-analysis.cc",
"compiler/test-machine-operator-reducer.cc",
- "compiler/test-multiple-return.cc",
"compiler/test-node.cc",
"compiler/test-operator.cc",
"compiler/test-representation-change.cc",
@@ -124,7 +121,6 @@ v8_source_set("cctest_sources") {
"compiler/test-run-jsops.cc",
"compiler/test-run-load-store.cc",
"compiler/test-run-machops.cc",
- "compiler/test-run-native-calls.cc",
"compiler/test-run-retpoline.cc",
"compiler/test-run-stackcheck.cc",
"compiler/test-run-tail-calls.cc",
@@ -198,12 +194,10 @@ v8_source_set("cctest_sources") {
"test-api-interceptors.cc",
"test-api-stack-traces.cc",
"test-api-typed-array.cc",
- "test-api-wasm.cc",
"test-api.cc",
"test-api.h",
"test-array-list.cc",
"test-atomicops.cc",
- "test-backing-store.cc",
"test-bignum-dtoa.cc",
"test-bignum.cc",
"test-bit-vector.cc",
@@ -251,7 +245,6 @@ v8_source_set("cctest_sources") {
"test-inobject-slack-tracking.cc",
"test-inspector.cc",
"test-intl.cc",
- "test-js-to-wasm.cc",
"test-js-weak-refs.cc",
"test-liveedit.cc",
"test-local-handles.cc",
@@ -276,6 +269,8 @@ v8_source_set("cctest_sources") {
"test-smi-lexicographic-compare.cc",
"test-strings.cc",
"test-strtod.cc",
+ "test-swiss-name-dictionary-csa.cc",
+ "test-swiss-name-dictionary-infra.cc",
"test-swiss-name-dictionary.cc",
"test-symbols.cc",
"test-thread-termination.cc",
@@ -294,45 +289,12 @@ v8_source_set("cctest_sources") {
"test-version.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
+ "test-web-snapshots.cc",
"torque/test-torque.cc",
"trace-extension.cc",
"trace-extension.h",
"unicode-helpers.cc",
"unicode-helpers.h",
- "wasm/test-c-wasm-entry.cc",
- "wasm/test-compilation-cache.cc",
- "wasm/test-gc.cc",
- "wasm/test-grow-memory.cc",
- "wasm/test-jump-table-assembler.cc",
- "wasm/test-liftoff-inspection.cc",
- "wasm/test-run-wasm-64.cc",
- "wasm/test-run-wasm-asmjs.cc",
- "wasm/test-run-wasm-atomics.cc",
- "wasm/test-run-wasm-atomics64.cc",
- "wasm/test-run-wasm-bulk-memory.cc",
- "wasm/test-run-wasm-exceptions.cc",
- "wasm/test-run-wasm-interpreter.cc",
- "wasm/test-run-wasm-js.cc",
- "wasm/test-run-wasm-memory64.cc",
- "wasm/test-run-wasm-module.cc",
- "wasm/test-run-wasm-sign-extension.cc",
- "wasm/test-run-wasm-simd-liftoff.cc",
- "wasm/test-run-wasm-simd-scalar-lowering.cc",
- "wasm/test-run-wasm-simd.cc",
- "wasm/test-run-wasm-wrappers.cc",
- "wasm/test-run-wasm.cc",
- "wasm/test-streaming-compilation.cc",
- "wasm/test-wasm-breakpoints.cc",
- "wasm/test-wasm-codegen.cc",
- "wasm/test-wasm-import-wrapper-cache.cc",
- "wasm/test-wasm-metrics.cc",
- "wasm/test-wasm-serialization.cc",
- "wasm/test-wasm-shared-engine.cc",
- "wasm/test-wasm-stack.cc",
- "wasm/test-wasm-trap-position.cc",
- "wasm/wasm-atomics-utils.h",
- "wasm/wasm-run-utils.cc",
- "wasm/wasm-run-utils.h",
]
if (v8_current_cpu == "arm") {
@@ -441,12 +403,63 @@ v8_source_set("cctest_sources") {
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
- "../..:wasm_test_common",
"../../tools/debug_helper:v8_debug_helper",
"//build/win:default_exe_manifest",
]
+ if (v8_enable_webassembly) {
+ sources += [
+ "../common/wasm/flag-utils.h",
+ "../common/wasm/test-signatures.h",
+ "../common/wasm/wasm-macro-gen.h",
+ "compiler/test-multiple-return.cc",
+
+ # test-run-native-calls uses wasm's LinkageAllocator.
+ "compiler/test-run-native-calls.cc",
+ "test-api-wasm.cc",
+ "test-js-to-wasm.cc",
+ "wasm/test-backing-store.cc",
+ "wasm/test-c-wasm-entry.cc",
+ "wasm/test-compilation-cache.cc",
+ "wasm/test-gc.cc",
+ "wasm/test-grow-memory.cc",
+ "wasm/test-jump-table-assembler.cc",
+ "wasm/test-liftoff-inspection.cc",
+ "wasm/test-run-wasm-64.cc",
+ "wasm/test-run-wasm-asmjs.cc",
+ "wasm/test-run-wasm-atomics.cc",
+ "wasm/test-run-wasm-atomics64.cc",
+ "wasm/test-run-wasm-bulk-memory.cc",
+ "wasm/test-run-wasm-exceptions.cc",
+ "wasm/test-run-wasm-interpreter.cc",
+ "wasm/test-run-wasm-js.cc",
+ "wasm/test-run-wasm-memory64.cc",
+ "wasm/test-run-wasm-module.cc",
+ "wasm/test-run-wasm-relaxed-simd.cc",
+ "wasm/test-run-wasm-sign-extension.cc",
+ "wasm/test-run-wasm-simd-liftoff.cc",
+ "wasm/test-run-wasm-simd-scalar-lowering.cc",
+ "wasm/test-run-wasm-simd.cc",
+ "wasm/test-run-wasm-wrappers.cc",
+ "wasm/test-run-wasm.cc",
+ "wasm/test-streaming-compilation.cc",
+ "wasm/test-wasm-breakpoints.cc",
+ "wasm/test-wasm-codegen.cc",
+ "wasm/test-wasm-import-wrapper-cache.cc",
+ "wasm/test-wasm-metrics.cc",
+ "wasm/test-wasm-serialization.cc",
+ "wasm/test-wasm-shared-engine.cc",
+ "wasm/test-wasm-stack.cc",
+ "wasm/test-wasm-trap-position.cc",
+ "wasm/wasm-atomics-utils.h",
+ "wasm/wasm-run-utils.cc",
+ "wasm/wasm-run-utils.h",
+ "wasm/wasm-simd-utils.cc",
+ "wasm/wasm-simd-utils.h",
+ ]
+ public_deps += [ "../..:wasm_test_common" ]
+ }
+
defines = []
deps = [
"../..:run_torque",
@@ -516,7 +529,6 @@ v8_executable("generate-bytecode-expectations") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index d2464494fa..827fb0a2ef 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,3 +1,3 @@
-per-file *profile*=alph@chromium.org
+per-file *profile*=file:../../src/inspector/OWNERS
per-file test-debug-helper.cc=seth.brenith@microsoft.com
per-file test-v8windbg.cc=seth.brenith@microsoft.com
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 49969a0508..7d5115697a 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -341,10 +341,12 @@ int main(int argc, char* argv[]) {
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv[0]);
+#if V8_ENABLE_WEBASSEMBLY
if (V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler) {
constexpr bool use_default_signal_handler = true;
CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_signal_handler));
}
+#endif // V8_ENABLE_WEBASSEMBLY
CcTest::set_array_buffer_allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index e2fe217fd1..862f347aa4 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -89,6 +89,19 @@ class JSHeapBroker;
static void Test##Name()
#endif
+// Similar to TEST, but used when test definitions appear as members of a
+// (probably parameterized) class. This allows re-using the given tests multiple
+// times. For this to work, the following conditions must hold:
+// 1. The class has a template parameter named kTestFileName of type char
+// const*, which is instantiated with __FILE__ at the *use site*, in order
+// to correctly associate the tests with the test suite using them.
+// 2. To actually execute the tests, create an instance of the class
+// containing the MEMBER_TESTs.
+#define MEMBER_TEST(Name) \
+ CcTest register_test_##Name = \
+ CcTest(Test##Name, kTestFileName, #Name, true, true); \
+ static void Test##Name()
+
#define EXTENSION_LIST(V) \
V(GC_EXTENSION, "v8/gc") \
V(PRINT_EXTENSION, "v8/print") \
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 03898b82c5..7b1bf8caa5 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -39,7 +39,6 @@
# These tests are expected to hit a CHECK (i.e. a FAIL result actually means
# the test passed).
- 'test-api/RegExpInterruptAndReenterIrregexp': [FAIL, CRASH],
'test-verifiers/Fail*': [FAIL, CRASH],
# This test always fails. It tests that LiveEdit causes abort when turned off.
@@ -171,7 +170,6 @@
'test-api/ExternalArrays': [PASS, SLOW],
'test-api/Threading*': [SKIP],
'test-cpu-profiler/MultipleIsolates': [PASS, ['not pointer_compression', SLOW]],
- 'test-debug/DebugBreakStackTrace': [PASS, SLOW],
}], # 'arch == arm64 and simulator_run'
['arch == arm64 and system == macos and not simulator_run', {
@@ -278,7 +276,6 @@
['arch == arm and simulator_run', {
# Pass but take too long with the simulator.
'test-api/Threading*': [SKIP],
- 'test-cpu-profiler/MultipleIsolates': [PASS, SLOW],
}], # 'arch == arm and simulator_run'
##############################################################################
@@ -387,6 +384,8 @@
# SIMD not fully implemented yet
'test-run-wasm-simd-liftoff/*': [SKIP],
+ 'test-run-wasm-simd-scalar-lowering/*':[SKIP],
+ 'test-run-wasm-simd/*':[SKIP],
# Some wasm functionality is not implemented yet
'test-run-wasm-atomics64/*': [SKIP],
@@ -598,6 +597,7 @@
'test-run-variables/*': [SKIP],
'test-serialize/*': [SKIP],
'test-sloppy-equality/*' : [SKIP],
+ 'test-swiss-name-dictionary-csa/*': [SKIP],
'test-torque/*': [SKIP],
'test-unwinder-code-pages/PCIsInV8_LargeCodeObject_CodePagesAPI': [SKIP],
@@ -702,4 +702,10 @@
'test-run-wasm-simd-scalar-lowering/*': [SKIP],
}], # no_simd_sse == True
+################################################################################
+['variant == stress_concurrent_inlining', {
+ # BUG(11524): Crashing flakily.
+ 'test-cpu-profiler/TracingCpuProfiler': [PASS, FAIL],
+}], # variant == stress_concurrent_inlining
+
]
diff --git a/deps/v8/test/cctest/compiler/node-observer-tester.h b/deps/v8/test/cctest/compiler/node-observer-tester.h
index 253eba230e..202a925050 100644
--- a/deps/v8/test/cctest/compiler/node-observer-tester.h
+++ b/deps/v8/test/cctest/compiler/node-observer-tester.h
@@ -9,7 +9,6 @@
#include "src/compiler/simplified-operator.h"
#include "src/objects/type-hints.h"
#include "test/cctest/cctest.h"
-#include "test/common/wasm/flag-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 5bbfb1492b..d52515ec5d 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -10,7 +10,6 @@
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/objects-inl.h"
@@ -20,6 +19,11 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/function-tester.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -229,12 +233,12 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
TNode<FixedArray> vector =
__ Cast(__ LoadFixedArrayElement(result_array, i));
for (int lane = 0; lane < 4; lane++) {
- TNode<Smi> lane_value =
- __ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
+ TNode<Smi> lane_value = __ SmiFromInt32(__ UncheckedCast<Int32T>(
+ tester.raw_assembler_for_testing()->AddNode(
tester.raw_assembler_for_testing()
->machine()
->I32x4ExtractLane(lane),
- param));
+ param)));
__ StoreFixedArrayElement(vector, lane, lane_value,
UNSAFE_SKIP_WRITE_BARRIER);
}
@@ -1058,9 +1062,9 @@ class CodeGeneratorTester {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(ImmediateOperand::INLINE, -1), // poison index.
- ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
- ImmediateOperand(ImmediateOperand::INLINE, stack_slot_delta)};
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index.
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, stack_slot_delta)};
Instruction* tail_call =
Instruction::New(zone_, kArchTailCallCodeObject, 0, nullptr,
arraysize(callee), callee, 0, nullptr);
@@ -1147,9 +1151,10 @@ class CodeGeneratorTester {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(ImmediateOperand::INLINE, -1), // poison index.
- ImmediateOperand(ImmediateOperand::INLINE, optional_padding_slot),
- ImmediateOperand(ImmediateOperand::INLINE, first_unused_stack_slot)};
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index.
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32,
+ first_unused_stack_slot)};
Instruction* tail_call =
Instruction::New(zone_, kArchTailCallCodeObject, 0, nullptr,
arraysize(callee), callee, 0, nullptr);
@@ -1432,6 +1437,7 @@ TEST(AssembleTailCallGap) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
std::shared_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
@@ -1531,6 +1537,7 @@ TEST(Regress_1171759) {
CHECK_EQ(0, mt.Call());
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index 7a9460a688..163477d6fc 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -8,6 +8,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/pipeline.h"
+#include "src/debug/debug.h"
#include "src/handles/handles.h"
#include "src/logging/counters.h"
#include "src/objects/js-function.h"
@@ -33,11 +34,11 @@ void ExpectSharedFunctionInfoState(SharedFunctionInfo sfi,
HeapObject script_or_debug_info = sfi.script_or_debug_info(kAcquireLoad);
switch (expectedState) {
case SfiState::Compiled:
- CHECK(function_data.IsBytecodeArray());
+ CHECK(function_data.IsBytecodeArray() || function_data.IsBaselineData());
CHECK(script_or_debug_info.IsScript());
break;
case SfiState::DebugInfo:
- CHECK(function_data.IsBytecodeArray());
+ CHECK(function_data.IsBytecodeArray() || function_data.IsBaselineData());
CHECK(script_or_debug_info.IsDebugInfo());
{
DebugInfo debug_info = DebugInfo::cast(script_or_debug_info);
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 37e12d9ffc..a66bfb207f 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -14,12 +14,14 @@ namespace compiler {
class TestCode : public HandleAndZoneScope {
public:
- TestCode()
+ explicit TestCode(size_t block_count)
: HandleAndZoneScope(),
blocks_(main_zone()),
sequence_(main_isolate(), main_zone(), &blocks_),
rpo_number_(RpoNumber::FromInt(0)),
- current_(nullptr) {}
+ current_(nullptr) {
+ sequence_.IncreaseRpoForTesting(block_count);
+ }
ZoneVector<InstructionBlock*> blocks_;
InstructionSequence sequence_;
@@ -138,7 +140,8 @@ void VerifyForwarding(TestCode* code, int count, int* expected) {
}
TEST(FwEmpty1) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
code.Jump(1);
@@ -148,13 +151,14 @@ TEST(FwEmpty1) {
code.End();
static int expected[] = {2, 2, 2};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwEmptyN) {
+ constexpr size_t kBlockCount = 3;
for (int i = 0; i < 9; i++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
code.Jump(1);
@@ -165,36 +169,39 @@ TEST(FwEmptyN) {
code.End();
static int expected[] = {2, 2, 2};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
TEST(FwNone1) {
- TestCode code;
+ constexpr size_t kBlockCount = 1;
+ TestCode code(kBlockCount);
// B0
code.End();
static int expected[] = {0};
- VerifyForwarding(&code, 1, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves1) {
- TestCode code;
+ constexpr size_t kBlockCount = 1;
+ TestCode code(kBlockCount);
// B0
code.RedundantMoves();
code.End();
static int expected[] = {0};
- VerifyForwarding(&code, 1, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves2) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.RedundantMoves();
@@ -203,12 +210,13 @@ TEST(FwMoves2) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves2b) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.NonRedundantMoves();
@@ -217,12 +225,13 @@ TEST(FwMoves2b) {
code.End();
static int expected[] = {0, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwOther2) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Other();
@@ -231,12 +240,13 @@ TEST(FwOther2) {
code.End();
static int expected[] = {0, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwNone2a) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -244,12 +254,13 @@ TEST(FwNone2a) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwNone2b) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Jump(1);
@@ -257,23 +268,25 @@ TEST(FwNone2b) {
code.End();
static int expected[] = {1, 1};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop1) {
- TestCode code;
+ constexpr size_t kBlockCount = 1;
+ TestCode code(kBlockCount);
// B0
code.Jump(0);
static int expected[] = {0};
- VerifyForwarding(&code, 1, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -281,12 +294,13 @@ TEST(FwLoop2) {
code.Jump(0);
static int expected[] = {0, 0};
- VerifyForwarding(&code, 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop3) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -296,12 +310,13 @@ TEST(FwLoop3) {
code.Jump(0);
static int expected[] = {0, 0, 0};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop1b) {
- TestCode code;
+ constexpr size_t kBlockCount = 2;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -314,7 +329,8 @@ TEST(FwLoop1b) {
TEST(FwLoop2b) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -324,12 +340,13 @@ TEST(FwLoop2b) {
code.Jump(1);
static int expected[] = {1, 1, 1};
- VerifyForwarding(&code, 3, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop3b) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -341,12 +358,13 @@ TEST(FwLoop3b) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1};
- VerifyForwarding(&code, 4, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1a) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -360,12 +378,13 @@ TEST(FwLoop2_1a) {
code.Jump(2);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1b) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -379,12 +398,13 @@ TEST(FwLoop2_1b) {
code.Jump(2);
static int expected[] = {2, 2, 2, 2, 2};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1c) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -398,12 +418,13 @@ TEST(FwLoop2_1c) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop2_1d) {
- TestCode code;
+ constexpr size_t kBlockCount = 5;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -417,12 +438,13 @@ TEST(FwLoop2_1d) {
code.Jump(1);
static int expected[] = {1, 1, 1, 1, 1};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop3_1a) {
- TestCode code;
+ constexpr size_t kBlockCount = 6;
+ TestCode code(kBlockCount);
// B0
code.Fallthru();
@@ -438,14 +460,16 @@ TEST(FwLoop3_1a) {
code.Jump(0);
static int expected[] = {2, 2, 2, 2, 2, 2};
- VerifyForwarding(&code, 6, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwDiamonds) {
+ constexpr size_t kBlockCount = 4;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
- TestCode code;
+ TestCode code(kBlockCount);
+
// B0
code.Branch(1, 2);
// B1
@@ -458,17 +482,18 @@ TEST(FwDiamonds) {
code.End();
int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3};
- VerifyForwarding(&code, 4, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
}
TEST(FwDiamonds2) {
+ constexpr size_t kBlockCount = 5;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
// B1
@@ -485,7 +510,7 @@ TEST(FwDiamonds2) {
int merge = k ? 3 : 4;
int expected[] = {0, i ? 1 : merge, j ? 2 : merge, merge, 4};
- VerifyForwarding(&code, 5, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
}
@@ -493,11 +518,12 @@ TEST(FwDiamonds2) {
TEST(FwDoubleDiamonds) {
+ constexpr size_t kBlockCount = 7;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int x = 0; x < 2; x++) {
for (int y = 0; y < 2; y++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
// B1
@@ -519,7 +545,7 @@ TEST(FwDoubleDiamonds) {
int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3,
x ? 4 : 6, y ? 5 : 6, 6};
- VerifyForwarding(&code, 7, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
}
}
@@ -572,7 +598,8 @@ int find(int x, int* permutation, int size) {
void RunPermutedChain(int* permutation, int size) {
- TestCode code;
+ const int kBlockCount = size + 2;
+ TestCode code(kBlockCount);
int cur = -1;
for (int i = 0; i < size; i++) {
code.Jump(find(cur + 1, permutation, size) + 1);
@@ -583,7 +610,7 @@ void RunPermutedChain(int* permutation, int size) {
int expected[] = {size + 1, size + 1, size + 1, size + 1,
size + 1, size + 1, size + 1};
- VerifyForwarding(&code, size + 2, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
@@ -595,7 +622,8 @@ TEST(FwPermuted_chain) {
void RunPermutedDiamond(int* permutation, int size) {
- TestCode code;
+ constexpr size_t kBlockCount = 6;
+ TestCode code(kBlockCount);
int br = 1 + find(0, permutation, size);
code.Jump(br);
for (int i = 0; i < size; i++) {
@@ -619,7 +647,7 @@ void RunPermutedDiamond(int* permutation, int size) {
int expected[] = {br, 5, 5, 5, 5, 5};
expected[br] = br;
- VerifyForwarding(&code, 6, expected);
+ VerifyForwarding(&code, kBlockCount, expected);
}
@@ -676,7 +704,8 @@ void CheckAssemblyOrder(TestCode* code, int size, int* expected) {
}
TEST(Rewire1) {
- TestCode code;
+ constexpr size_t kBlockCount = 3;
+ TestCode code(kBlockCount);
// B0
int j1 = code.Jump(1);
@@ -686,17 +715,18 @@ TEST(Rewire1) {
code.End();
static int forward[] = {2, 2, 2};
- ApplyForwarding(&code, 3, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 2);
CheckNop(&code, j2);
static int assembly[] = {0, 1, 1};
- CheckAssemblyOrder(&code, 3, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(Rewire1_deferred) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
int j1 = code.Jump(1);
@@ -709,18 +739,19 @@ TEST(Rewire1_deferred) {
code.End();
static int forward[] = {3, 3, 3, 3};
- ApplyForwarding(&code, 4, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 3);
CheckNop(&code, j2);
CheckNop(&code, j3);
static int assembly[] = {0, 1, 2, 1};
- CheckAssemblyOrder(&code, 4, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(Rewire2_deferred) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Other();
@@ -735,19 +766,20 @@ TEST(Rewire2_deferred) {
code.End();
static int forward[] = {0, 1, 2, 3};
- ApplyForwarding(&code, 4, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 1);
CheckJump(&code, j2, 3);
static int assembly[] = {0, 2, 3, 1};
- CheckAssemblyOrder(&code, 4, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(Rewire_diamond) {
+ constexpr size_t kBlockCount = 5;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
- TestCode code;
+ TestCode code(kBlockCount);
// B0
int j1 = code.Jump(1);
// B1
@@ -760,7 +792,7 @@ TEST(Rewire_diamond) {
code.End();
int forward[] = {0, 1, i ? 4 : 2, j ? 4 : 3, 4};
- ApplyForwarding(&code, 5, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 1);
CheckBranch(&code, b1, i ? 4 : 2, j ? 4 : 3);
if (i) {
@@ -781,13 +813,14 @@ TEST(Rewire_diamond) {
if (j) {
for (int k = 4; k < 5; k++) assembly[k]--;
}
- CheckAssemblyOrder(&code, 5, assembly);
+ CheckAssemblyOrder(&code, kBlockCount, assembly);
}
}
}
TEST(RewireRet) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -807,7 +840,8 @@ TEST(RewireRet) {
}
TEST(RewireRet1) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -819,15 +853,16 @@ TEST(RewireRet1) {
code.End();
int forward[] = {0, 1, 2, 3};
- VerifyForwarding(&code, 4, forward);
- ApplyForwarding(&code, 4, forward);
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckRet(&code, j1);
CheckRet(&code, j2);
}
TEST(RewireRet2) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -839,15 +874,16 @@ TEST(RewireRet2) {
code.End();
int forward[] = {0, 1, 1, 3};
- VerifyForwarding(&code, 4, forward);
- ApplyForwarding(&code, 4, forward);
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckRet(&code, j1);
CheckNop(&code, j2);
}
TEST(DifferentSizeRet) {
- TestCode code;
+ constexpr size_t kBlockCount = 4;
+ TestCode code(kBlockCount);
// B0
code.Branch(1, 2);
@@ -859,8 +895,8 @@ TEST(DifferentSizeRet) {
code.End();
int forward[] = {0, 1, 2, 3};
- VerifyForwarding(&code, 4, forward);
- ApplyForwarding(&code, 4, forward);
+ VerifyForwarding(&code, kBlockCount, forward);
+ ApplyForwarding(&code, kBlockCount, forward);
CheckRet(&code, j1);
CheckRet(&code, j2);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 73aa806479..bd6d0a5f0f 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -114,7 +114,7 @@ TEST(TestLinkageStubCall) {
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
CHECK(call_descriptor);
- CHECK_EQ(0, static_cast<int>(call_descriptor->StackParameterCount()));
+ CHECK_EQ(0, static_cast<int>(call_descriptor->ParameterSlotCount()));
CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
CHECK_EQ(false, call_descriptor->IsJSFunctionCall());
@@ -124,6 +124,7 @@ TEST(TestLinkageStubCall) {
// TODO(titzer): test linkage creation for outgoing stub calls.
}
+#if V8_ENABLE_WEBASSEMBLY
TEST(TestFPLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
@@ -135,7 +136,7 @@ TEST(TestFPLinkageStubCall) {
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
CHECK(call_descriptor);
- CHECK_EQ(0, static_cast<int>(call_descriptor->StackParameterCount()));
+ CHECK_EQ(0, static_cast<int>(call_descriptor->ParameterSlotCount()));
CHECK_EQ(1, static_cast<int>(call_descriptor->ParameterCount()));
CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
@@ -148,6 +149,7 @@ TEST(TestFPLinkageStubCall) {
CHECK_EQ(call_descriptor->GetReturnLocation(0).GetLocation(),
kReturnRegister0.code());
}
+#endif // V8_ENABLE_WEBASSEMBLY
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
index 7a76839081..d62a187a1c 100644
--- a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
+++ b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -4,11 +4,14 @@
#include "src/codegen/external-reference.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-external-refs.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-external-refs.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -199,6 +202,7 @@ void TestExternalReference_BinOpWithReturn(
}
}
+#if V8_ENABLE_WEBASSEMBLY
TEST(RunCallF32Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_trunc();
@@ -373,6 +377,7 @@ TEST(RunCallFloat64Pow) {
TestExternalReference_BinOp<double>(&m, ref, wasm::float64_pow_wrapper,
ValueHelper::float64_vector());
}
+#endif // V8_ENABLE_WEBASSEMBLY
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 5f7b6eed88..3160848b68 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -400,6 +400,117 @@ TEST(RunWord64Popcnt) {
#endif // V8_TARGET_ARCH_64_BIT
+TEST(RunFloat32SelectRegFloatCompare) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
+ if (!m.machine()->Float32Select().IsSupported()) {
+ return;
+ }
+
+ Node* cmp = m.Float32Equal(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float32Select(cmp, m.Parameter(0), m.Parameter(1)));
+
+ FOR_FLOAT32_INPUTS(pl) {
+ FOR_FLOAT32_INPUTS(pr) {
+ float expected_result = pl == pr ? pl : pr;
+ CHECK_FLOAT_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
+
+TEST(RunFloat64SelectRegFloatCompare) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ if (!m.machine()->Float64Select().IsSupported()) {
+ return;
+ }
+
+ Node* cmp = m.Float64LessThan(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float64Select(cmp, m.Parameter(0), m.Parameter(1)));
+
+ FOR_FLOAT64_INPUTS(pl) {
+ FOR_FLOAT64_INPUTS(pr) {
+ double expected_result = pl < pr ? pl : pr;
+ CHECK_DOUBLE_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
+
+TEST(RunFloat32SelectImmediateOnLeftFloatCompare) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ if (!m.machine()->Float32Select().IsSupported()) {
+ return;
+ }
+
+ const float pl = -5.0;
+ Node* a = m.Float32Constant(pl);
+ Node* cmp = m.Float32LessThan(a, m.Parameter(0));
+ m.Return(m.Float32Select(cmp, a, m.Parameter(0)));
+
+ FOR_FLOAT32_INPUTS(pr) {
+ float expected_result = pl < pr ? pl : pr;
+ CHECK_FLOAT_EQ(expected_result, m.Call(pr));
+ }
+}
+
+TEST(RunFloat64SelectImmediateOnRightFloatCompare) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ if (!m.machine()->Float64Select().IsSupported()) {
+ return;
+ }
+
+ double pr = 5.0;
+ Node* b = m.Float64Constant(pr);
+ Node* cmp = m.Float64LessThanOrEqual(m.Parameter(0), b);
+ m.Return(m.Float64Select(cmp, m.Parameter(0), b));
+
+ FOR_FLOAT64_INPUTS(pl) {
+ double expected_result = pl <= pr ? pl : pr;
+ CHECK_DOUBLE_EQ(expected_result, m.Call(pl));
+ }
+}
+
+TEST(RunFloat32SelectImmediateIntCompare) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Int32(),
+ MachineType::Int32());
+ if (!m.machine()->Float32Select().IsSupported()) {
+ return;
+ }
+
+ float tval = -1.0;
+ float fval = 1.0;
+ Node* cmp = m.Int32LessThanOrEqual(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float64Select(cmp, m.Float32Constant(tval),
+ m.Float32Constant(fval)));
+
+ FOR_INT32_INPUTS(pl) {
+ FOR_INT32_INPUTS(pr) {
+ float expected_result = pl <= pr ? tval : fval;
+ CHECK_FLOAT_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
+
+TEST(RunFloat64SelectImmediateIntCompare) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Int64(),
+ MachineType::Int64());
+ if (!m.machine()->Float64Select().IsSupported()) {
+ return;
+ }
+
+ double tval = -1.0;
+ double fval = 1.0;
+ Node* cmp = m.Int64LessThan(m.Parameter(0), m.Parameter(1));
+ m.Return(m.Float64Select(cmp, m.Float64Constant(tval),
+ m.Float64Constant(fval)));
+
+ FOR_INT64_INPUTS(pl) {
+ FOR_INT64_INPUTS(pr) {
+ double expected_result = pl < pr ? tval : fval;
+ CHECK_DOUBLE_EQ(expected_result, m.Call(pl, pr));
+ }
+ }
+}
static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
switch (index) {
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index c0a8324286..090351bc38 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -24,9 +24,9 @@ namespace {
Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
CodeAssemblerTester tester(isolate, call_descriptor, "callee");
CodeStubAssembler assembler(tester.state());
- int param_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
TNode<IntPtrT> sum = __ IntPtrConstant(0);
- for (int i = 0; i < param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
TNode<IntPtrT> product = __ Signed(__ IntPtrMul(
__ UncheckedParameter<IntPtrT>(i), __ IntPtrConstant(i + 1)));
sum = __ IntPtrAdd(sum, product);
@@ -58,17 +58,17 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
__ BIND(&end);
params.push_back(target_var.value());
- int param_count = static_cast<int>(callee_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
params.push_back(__ IntPtrConstant(i));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
if (tail) {
tester.raw_assembler_for_testing()->TailCallN(
- callee_descriptor, param_count + 1, params.data());
+ callee_descriptor, param_slots + 1, params.data());
} else {
Node* result = tester.raw_assembler_for_testing()->CallN(
- callee_descriptor, param_count + 1, params.data());
+ callee_descriptor, param_slots + 1, params.data());
__ Return(__ UncheckedCast<IntPtrT>(result));
}
return tester.GenerateCodeCloseAndEscape();
@@ -85,31 +85,30 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
params.push_back(__ HeapConstant(
BuildCaller(isolate, caller_descriptor, callee_descriptor, tail)));
// Set up arguments for "Caller".
- int param_count = static_cast<int>(caller_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
// Use values that are different from the ones we will pass to this
// function's callee later.
params.push_back(__ IntPtrConstant(i + 42));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
TNode<IntPtrT> intptr_result =
__ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_count + 1, params.data()));
+ caller_descriptor, param_slots + 1, params.data()));
__ Return(__ SmiTag(intptr_result));
return tester.GenerateCodeCloseAndEscape();
}
-CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
- int stack_param_count) {
+CallDescriptor* CreateDescriptorForStackArguments(Zone* zone, int param_slots) {
LocationSignature::Builder locations(zone, 1,
- static_cast<size_t>(stack_param_count));
+ static_cast<size_t>(param_slots));
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr()));
- for (int i = 0; i < stack_param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- i - stack_param_count, MachineType::IntPtr()));
+ i - param_slots, MachineType::IntPtr()));
}
return zone->New<CallDescriptor>(
@@ -118,7 +117,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
locations.Build(), // location_sig
- stack_param_count, // stack_parameter_count
+ param_slots, // stack parameter slots
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 0601c161c1..06b2529ad6 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -25,9 +25,9 @@ namespace {
Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
CodeAssemblerTester tester(isolate, call_descriptor, "callee");
CodeStubAssembler assembler(tester.state());
- int param_count = static_cast<int>(call_descriptor->StackParameterCount());
+ int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
TNode<IntPtrT> sum = __ IntPtrConstant(0);
- for (int i = 0; i < param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
TNode<WordT> product = __ IntPtrMul(__ UncheckedParameter<IntPtrT>(i),
__ IntPtrConstant(i + 1));
sum = __ Signed(__ IntPtrAdd(sum, product));
@@ -45,13 +45,13 @@ Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
std::vector<Node*> params;
// The first parameter is always the callee.
params.push_back(__ HeapConstant(BuildCallee(isolate, callee_descriptor)));
- int param_count = static_cast<int>(callee_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
params.push_back(__ IntPtrConstant(i));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
tester.raw_assembler_for_testing()->TailCallN(callee_descriptor,
- param_count + 1, params.data());
+ param_slots + 1, params.data());
return tester.GenerateCodeCloseAndEscape();
}
@@ -66,31 +66,30 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
params.push_back(__ HeapConstant(
BuildCaller(isolate, caller_descriptor, callee_descriptor)));
// Set up arguments for "Caller".
- int param_count = static_cast<int>(caller_descriptor->StackParameterCount());
- for (int i = 0; i < param_count; ++i) {
+ int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
+ for (int i = 0; i < param_slots; ++i) {
// Use values that are different from the ones we will pass to this
// function's callee later.
params.push_back(__ IntPtrConstant(i + 42));
}
- DCHECK_EQ(param_count + 1, params.size());
+ DCHECK_EQ(param_slots + 1, params.size());
TNode<IntPtrT> intptr_result =
__ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_count + 1, params.data()));
+ caller_descriptor, param_slots + 1, params.data()));
__ Return(__ SmiTag(intptr_result));
return tester.GenerateCodeCloseAndEscape();
}
-CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
- int stack_param_count) {
+CallDescriptor* CreateDescriptorForStackArguments(Zone* zone, int param_slots) {
LocationSignature::Builder locations(zone, 1,
- static_cast<size_t>(stack_param_count));
+ static_cast<size_t>(param_slots));
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr()));
- for (int i = 0; i < stack_param_count; ++i) {
+ for (int i = 0; i < param_slots; ++i) {
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- i - stack_param_count, MachineType::IntPtr()));
+ i - param_slots, MachineType::IntPtr()));
}
return zone->New<CallDescriptor>(
@@ -99,7 +98,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
LinkageLocation::ForAnyRegister(
MachineType::AnyTagged()), // target location
locations.Build(), // location_sig
- stack_param_count, // stack_parameter_count
+ param_slots, // stack parameter slots
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
diff --git a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
index 82450abb7c..7533000afb 100644
--- a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
+++ b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "test/cctest/compiler/node-observer-tester.h"
+#include "test/common/flag-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index c8fffb987d..67040e4d60 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -151,7 +151,7 @@ TEST(StressJS) {
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map(), isolate);
Handle<DescriptorArray> instance_descriptors(
- map->instance_descriptors(kRelaxedLoad), isolate);
+ map->instance_descriptors(isolate), isolate);
CHECK_EQ(0, instance_descriptors->number_of_descriptors());
PropertyAttributes attrs = NONE;
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 24ddbb4cfc..cad0d900ae 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -43,7 +43,7 @@ void CheckAllObjectsOnPage(const std::vector<Handle<FixedArray>>& handles,
} // namespace
HEAP_TEST(CompactionFullAbortedPage) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test the scenario where we reach OOM during compaction and the whole page
// is aborted.
@@ -106,7 +106,7 @@ int GetObjectSize(int objects_per_page) {
} // namespace
HEAP_TEST(CompactionPartiallyAbortedPage) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one.
@@ -186,7 +186,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
}
HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test evacuating a page partially when it contains recorded
// slots and invalidated objects.
@@ -269,7 +269,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
}
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
- if (FLAG_never_compact) return;
+ if (FLAG_never_compact || FLAG_crash_on_aborted_evacuation) return;
// Test the scenario where we reach OOM during compaction and parts of the
// page have already been migrated to a new one. Objects on the aborted page
// are linked together. This test makes sure that intra-aborted page pointers
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
index 5450e0358d..1a664b9562 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -65,7 +65,8 @@ void AllocateSomeObjects(LocalHeap* local_heap) {
class ConcurrentAllocationThread final : public v8::base::Thread {
public:
- explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending)
+ explicit ConcurrentAllocationThread(Heap* heap,
+ std::atomic<int>* pending = nullptr)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap),
pending_(pending) {}
@@ -74,7 +75,7 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap);
AllocateSomeObjects(&local_heap);
- pending_->fetch_sub(1);
+ if (pending_) pending_->fetch_sub(1);
}
Heap* heap_;
@@ -128,6 +129,108 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
isolate->Dispose();
}
+UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
+ FLAG_max_old_space_size = 4;
+ FLAG_stress_concurrent_allocation = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+ const int kThreads = 4;
+
+ {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread =
+ std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+ }
+
+ isolate->Dispose();
+}
+
+UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
+ FLAG_max_old_space_size = 4;
+ FLAG_stress_concurrent_allocation = false;
+ FLAG_incremental_marking = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+ const int kThreads = 4;
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread =
+ std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ for (int i = 0; i < 300'000; i++) {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+ }
+
+ {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+ }
+
+ isolate->Dispose();
+}
+
+UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
+ FLAG_max_old_space_size = 4;
+ FLAG_stress_concurrent_allocation = false;
+ FLAG_incremental_marking = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
+ const int kThreads = 4;
+
+ for (int i = 0; i < kThreads; i++) {
+ auto thread =
+ std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
+ CHECK(thread->Start());
+ threads.push_back(std::move(thread));
+ }
+
+ // Some of the following Safepoint() invocations are supposed to perform a GC.
+ for (int i = 0; i < 1'000'000; i++) {
+ i_isolate->main_thread_local_heap()->Safepoint();
+ }
+
+ {
+ ParkedScope scope(i_isolate->main_thread_local_isolate());
+
+ for (auto& thread : threads) {
+ thread->Join();
+ }
+ }
+
+ i_isolate->main_thread_local_heap()->Safepoint();
+ isolate->Dispose();
+}
+
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit LargeObjectConcurrentAllocationThread(Heap* heap,
@@ -146,7 +249,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
if (result.IsRetry()) {
- local_heap.PerformCollection();
+ local_heap.TryPerformCollection();
} else {
Address address = result.ToAddress();
CreateFixedArray(heap_, address, kLargeObjectSize);
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 96cb22827a..d8ad6e6554 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -1095,6 +1095,9 @@ TEST(TestBytecodeFlushing) {
FLAG_always_opt = false;
i::FLAG_optimize_for_size = false;
#endif // V8_LITE_MODE
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_flush_bytecode = true;
i::FLAG_allow_natives_syntax = true;
@@ -1156,6 +1159,9 @@ HEAP_TEST(Regress10560) {
// Disable flags that allocate a feedback vector eagerly.
i::FLAG_opt = false;
i::FLAG_always_opt = false;
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_lazy_feedback_allocation = true;
ManualGCScope manual_gc_scope;
@@ -1320,6 +1326,9 @@ TEST(Regress10774) {
TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
FLAG_opt = true;
FLAG_always_opt = false;
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_optimize_for_size = false;
i::FLAG_incremental_marking = true;
i::FLAG_flush_bytecode = true;
@@ -1464,7 +1473,6 @@ TEST(CompilationCacheCachingBehavior) {
"};"
"foo();";
Handle<String> source = factory->InternalizeUtf8String(raw_source);
- Handle<Context> native_context = isolate->native_context();
{
v8::HandleScope scope(CcTest::isolate());
@@ -1477,7 +1485,7 @@ TEST(CompilationCacheCachingBehavior) {
MaybeHandle<SharedFunctionInfo> cached_script =
compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
+ language_mode);
CHECK(!cached_script.is_null());
}
@@ -1488,7 +1496,7 @@ TEST(CompilationCacheCachingBehavior) {
MaybeHandle<SharedFunctionInfo> cached_script =
compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
+ language_mode);
CHECK(!cached_script.is_null());
// Progress code age until it's old and ready for GC.
@@ -1508,7 +1516,7 @@ TEST(CompilationCacheCachingBehavior) {
MaybeHandle<SharedFunctionInfo> cached_script =
compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
+ language_mode);
CHECK(cached_script.is_null());
}
}
diff --git a/deps/v8/test/cctest/heap/test-write-barrier.cc b/deps/v8/test/cctest/heap/test-write-barrier.cc
index f12e3fe8f0..9ed3eb668e 100644
--- a/deps/v8/test/cctest/heap/test-write-barrier.cc
+++ b/deps/v8/test/cctest/heap/test-write-barrier.cc
@@ -5,6 +5,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/spaces.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index a127da4b7f..30baf4afad 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -75,9 +75,9 @@ bytecodes: [
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
- /* 22 S> */ B(Return),
+ B(Return),
B(Ldar), R(2),
- /* 22 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 22 S> */ B(Return),
]
@@ -180,9 +180,9 @@ bytecodes: [
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
- /* 31 S> */ B(Return),
+ B(Return),
B(Ldar), R(2),
- /* 31 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 31 S> */ B(Return),
]
@@ -352,9 +352,9 @@ bytecodes: [
B(Mov), R(0), R(7),
B(Mov), R(5), R(8),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(7), U8(3),
- /* 50 S> */ B(Return),
+ B(Return),
B(Ldar), R(5),
- /* 50 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 50 S> */ B(Return),
]
@@ -569,9 +569,9 @@ bytecodes: [
B(Mov), R(0), R(4),
B(Mov), R(2), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
- /* 60 S> */ B(Return),
+ B(Return),
B(Ldar), R(2),
- /* 60 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 60 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
index a1cfdc3bc4..71e54f421e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncModules.golden
@@ -29,7 +29,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 10 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
B(Mov), R(context), R(2),
@@ -67,7 +67,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 10 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [20],
@@ -101,7 +101,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 21 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
B(Mov), R(context), R(2),
@@ -142,7 +142,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 21 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [20],
@@ -184,7 +184,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 54 S> */ B(Return),
+ B(Return),
B(Mov), R(3), R(2),
B(Ldar), R(2),
B(Mov), R(context), R(3),
@@ -223,7 +223,7 @@ bytecodes: [
B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
- /* 54 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [27],
@@ -264,7 +264,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 49 S> */ B(Return),
+ B(Return),
B(Mov), R(3), R(2),
B(Ldar), R(2),
B(Mov), R(context), R(3),
@@ -305,7 +305,7 @@ bytecodes: [
B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
- /* 49 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [30],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 12a5b13aa0..a11a4aa405 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -141,7 +141,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 57 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [85],
@@ -282,7 +282,7 @@ bytecodes: [
B(Mov), R(0), R(14),
B(Mov), R(9), R(15),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(14), U8(3),
- /* 68 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
B(Star6),
B(LdaTrue),
@@ -303,7 +303,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 68 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [85],
@@ -469,7 +469,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 114 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [85],
@@ -578,7 +578,7 @@ bytecodes: [
B(Mov), R(0), R(11),
B(Mov), R(7), R(12),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionResolve), R(11), U8(3),
- /* 96 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
B(Star4),
B(LdaFalse),
@@ -599,7 +599,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 96 S> */ B(Return),
+ B(Return),
]
constant pool: [
OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 8794a29087..0a954c76b8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -162,7 +162,7 @@ bytecodes: [
B(Ldar), R(7),
B(ReThrow),
B(Ldar), R(7),
- /* 85 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 85 S> */ B(Return),
]
@@ -351,7 +351,7 @@ bytecodes: [
B(Ldar), R(5),
B(ReThrow),
B(Ldar), R(5),
- /* 105 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 105 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 7ccbd17f62..d456c0c931 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -429,7 +429,7 @@ bytecodes: [
B(Ldar), R(5),
/* 11 E> */ B(Throw),
B(Ldar), R(5),
- /* 55 S> */ B(Return),
+ B(Return),
/* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -531,7 +531,7 @@ bytecodes: [
B(Ldar), R(4),
/* 11 E> */ B(Throw),
B(Ldar), R(4),
- /* 49 S> */ B(Return),
+ B(Return),
/* 35 S> */ B(GetIterator), R(arg0), U8(0), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
@@ -607,7 +607,7 @@ bytecodes: [
B(Ldar), R(8),
B(ReThrow),
B(Ldar), R(8),
- /* 49 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
@@ -726,7 +726,7 @@ bytecodes: [
B(Star9),
B(Mov), R(0), R(7),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(7), U8(3),
- /* 60 S> */ B(Return),
+ B(Return),
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
@@ -850,7 +850,7 @@ bytecodes: [
B(Star8),
B(Mov), R(0), R(6),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(6), U8(3),
- /* 54 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [88],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index f28a4e70e0..e5f1c46c66 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -28,7 +28,7 @@ bytecodes: [
B(Ldar), R(1),
/* 11 E> */ B(Throw),
B(Ldar), R(1),
- /* 16 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 16 S> */ B(Return),
]
@@ -62,7 +62,7 @@ bytecodes: [
B(Ldar), R(1),
/* 11 E> */ B(Throw),
B(Ldar), R(1),
- /* 25 S> */ B(Return),
+ B(Return),
/* 16 S> */ B(LdaSmi), I8(42),
B(Star1),
B(LdaFalse),
@@ -76,7 +76,7 @@ bytecodes: [
B(Ldar), R(1),
/* 16 E> */ B(Throw),
B(Ldar), R(1),
- /* 25 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 25 S> */ B(Return),
]
@@ -113,7 +113,7 @@ bytecodes: [
B(Ldar), R(4),
/* 11 E> */ B(Throw),
B(Ldar), R(4),
- /* 44 S> */ B(Return),
+ B(Return),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
B(Star6),
B(GetIterator), R(6), U8(1), U8(3),
@@ -191,7 +191,7 @@ bytecodes: [
B(Ldar), R(8),
B(ReThrow),
B(Ldar), R(8),
- /* 44 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
@@ -238,7 +238,7 @@ bytecodes: [
B(Ldar), R(1),
/* 38 E> */ B(Throw),
B(Ldar), R(1),
- /* 54 S> */ B(Return),
+ B(Return),
/* 43 S> */ B(LdaGlobal), U8(4), U8(0),
B(Star5),
/* 50 E> */ B(CallUndefinedReceiver0), R(5), U8(2),
@@ -263,7 +263,7 @@ bytecodes: [
B(CallProperty1), R(6), R(3), R(4), U8(14),
B(Jump), U8(45),
B(Ldar), R(4),
- /* 54 S> */ B(Return),
+ B(Return),
B(LdaNamedProperty), R(3), U8(9), U8(16),
B(JumpIfUndefinedOrNull), U8(10),
B(Star6),
@@ -296,7 +296,7 @@ bytecodes: [
B(TestReferenceEqual), R(2),
B(JumpIfFalse), U8(5),
B(Ldar), R(3),
- /* 54 S> */ B(Return),
+ B(Return),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 149140d4a7..670b9c4e7b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -28,7 +28,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 14 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 14 S> */ B(Return),
@@ -62,7 +62,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 25 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 25 S> */ B(Return),
@@ -98,7 +98,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 65 S> */ B(Return),
+ B(Return),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(3),
B(Star3),
@@ -148,7 +148,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 50 S> */ B(Return),
+ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
@@ -201,7 +201,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 50 S> */ B(Return),
+ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
@@ -255,7 +255,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 52 S> */ B(Return),
+ B(Return),
/* 19 S> */ B(LdaSmi), I8(42),
/* 19 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 23 S> */ B(LdaModuleVariable), I8(1), U8(0),
@@ -307,7 +307,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 33 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(CreateClosure), U8(4), U8(0), U8(0),
B(StaModuleVariable), I8(1), U8(0),
@@ -350,7 +350,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 27 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(LdaTheHole),
B(Star5),
@@ -398,7 +398,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 31 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 31 S> */ B(Return),
@@ -432,7 +432,7 @@ bytecodes: [
B(Ldar), R(2),
/* 0 E> */ B(Throw),
B(Ldar), R(2),
- /* 20 S> */ B(Return),
+ B(Return),
B(Mov), R(2), R(1),
B(Ldar), R(1),
/* 20 S> */ B(Return),
@@ -472,7 +472,7 @@ bytecodes: [
B(Ldar), R(3),
/* 0 E> */ B(Throw),
B(Ldar), R(3),
- /* 46 S> */ B(Return),
+ B(Return),
/* 31 S> */ B(LdaNamedProperty), R(1), U8(3), U8(0),
B(Star3),
/* 42 E> */ B(LdaNamedProperty), R(1), U8(4), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index 07a3cffaa0..cc75b76a47 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -90,7 +90,7 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 110
+bytecode array length: 117
bytecodes: [
/* 30 E> */ B(CreateBlockContext), U8(0),
B(PushContext), R(1),
@@ -105,35 +105,40 @@ bytecodes: [
B(Star3),
B(PopContext), R(1),
B(Mov), R(4), R(0),
- /* 89 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
+ /* 89 S> */ B(CreateEmptyArrayLiteral), U8(0),
B(Star3),
- B(LdaSmi), I8(1),
+ B(LdaZero),
+ B(Star2),
+ B(LdaZero),
+ B(StaInArrayLiteral), R(3), R(2), U8(1),
+ B(Ldar), R(2),
+ B(Inc), U8(3),
B(Star2),
- /* 101 S> */ B(CreateArrayLiteral), U8(4), U8(1), U8(37),
+ /* 101 S> */ B(CreateArrayLiteral), U8(3), U8(4), U8(37),
B(Star6),
- /* 101 E> */ B(GetIterator), R(6), U8(2), U8(4),
+ /* 101 E> */ B(GetIterator), R(6), U8(5), U8(7),
B(Mov), R(4), R(1),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star5),
- B(LdaNamedProperty), R(5), U8(5), U8(6),
+ B(LdaNamedProperty), R(5), U8(4), U8(9),
B(Star4),
B(CallProperty0), R(4), R(5), U8(15),
B(Star6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(6), U8(17),
+ B(LdaNamedProperty), R(6), U8(5), U8(17),
B(JumpIfToBooleanTrue), U8(18),
- B(LdaNamedProperty), R(6), U8(7), U8(8),
- B(StaInArrayLiteral), R(3), R(2), U8(13),
+ B(LdaNamedProperty), R(6), U8(6), U8(11),
+ B(StaInArrayLiteral), R(3), R(2), U8(1),
B(Ldar), R(2),
- B(Inc), U8(12),
+ B(Inc), U8(3),
B(Star2),
B(JumpLoop), U8(31), I8(0),
B(LdaSmi), I8(4),
- B(StaInArrayLiteral), R(3), R(2), U8(13),
+ B(StaInArrayLiteral), R(3), R(2), U8(1),
B(Mov), R(3), R(2),
- B(CallJSRuntime), U8(%reflect_construct), R(1), U8(2),
+ /* 89 E> */ B(CallJSRuntime), U8(%reflect_construct), R(1), U8(2),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
@@ -142,7 +147,6 @@ constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index 8906df4536..c62a6489e7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -265,7 +265,7 @@ bytecodes: [
B(Ldar), R(3),
/* 11 E> */ B(Throw),
B(Ldar), R(3),
- /* 62 S> */ B(Return),
+ B(Return),
/* 31 S> */ B(LdaZero),
B(Star1),
/* 36 S> */ B(LdaSmi), I8(10),
@@ -311,7 +311,7 @@ bytecodes: [
B(Ldar), R(2),
/* 11 E> */ B(Throw),
B(Ldar), R(2),
- /* 56 S> */ B(Return),
+ B(Return),
/* 31 S> */ B(LdaZero),
B(Star1),
/* 36 S> */ B(LdaSmi), I8(10),
@@ -329,7 +329,7 @@ bytecodes: [
B(Ldar), R(2),
/* 47 E> */ B(Throw),
B(Ldar), R(2),
- /* 56 S> */ B(Return),
+ B(Return),
/* 44 S> */ B(Ldar), R(1),
B(Inc), U8(1),
B(Star1),
@@ -394,7 +394,7 @@ bytecodes: [
B(Star7),
B(Mov), R(0), R(5),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(5), U8(3),
- /* 67 S> */ B(Return),
+ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
@@ -462,7 +462,7 @@ bytecodes: [
B(Star6),
B(Mov), R(0), R(4),
B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionReject), R(4), U8(3),
- /* 61 S> */ B(Return),
+ B(Return),
]
constant pool: [
Smi [42],
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index ccb8710865..450c45fb24 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -5,7 +5,6 @@
#include <fstream>
#include "src/init/v8.h"
-
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 5e0f7d5d98..3596b03c25 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -4,14 +4,13 @@
#include <tuple>
-#include "src/init/v8.h"
-
#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
#include "src/codegen/compiler.h"
#include "src/execution/execution.h"
#include "src/handles/handles.h"
#include "src/heap/heap-inl.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
@@ -5054,6 +5053,9 @@ TEST(InterpreterGenerators) {
#ifndef V8_TARGET_ARCH_ARM
TEST(InterpreterWithNativeStack) {
+ // "Always sparkplug" messes with this test.
+ if (FLAG_always_sparkplug) return;
+
i::FLAG_interpreted_frames_native_stack = true;
HandleAndZoneScope handles;
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index d30be37923..79c773b22a 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -527,13 +527,14 @@ THREADED_TEST(Gc) {
static void StackCheck(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::StackFrameIterator iter(reinterpret_cast<i::Isolate*>(info.GetIsolate()));
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ i::StackFrameIterator iter(isolate);
for (int i = 0; !iter.done(); i++) {
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
i::Code code = frame->LookupCode();
CHECK(code.IsCode());
- CHECK(code.contains(frame->pc()));
+ CHECK(code.contains(isolate, frame->pc()));
iter.Advance();
}
}
@@ -902,3 +903,36 @@ TEST(ObjectSetLazyDataPropertyForIndex) {
CHECK_EQ(1, getter_call_count);
}
}
+
+TEST(ObjectTemplateSetLazyPropertySurvivesIC) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ static int getter_call_count = 0;
+ templ->SetLazyDataProperty(
+ v8_str("foo"), [](v8::Local<v8::Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ getter_call_count++;
+ info.GetReturnValue().Set(getter_call_count);
+ });
+
+ v8::Local<v8::Function> f = CompileRun(
+ "function f(obj) {"
+ " obj.foo;"
+ " obj.foo;"
+ "};"
+ "%PrepareFunctionForOptimization(f);"
+ "f")
+ .As<v8::Function>();
+ v8::Local<v8::Value> obj = templ->NewInstance(context).ToLocalChecked();
+ f->Call(context, context->Global(), 1, &obj).ToLocalChecked();
+ CHECK_EQ(getter_call_count, 1);
+
+ obj = templ->NewInstance(context).ToLocalChecked();
+ f->Call(context, context->Global(), 1, &obj).ToLocalChecked();
+ CHECK_EQ(getter_call_count, 2);
+}
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index a55644a2e3..9875098d1f 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api/api-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "test/cctest/test-api.h"
using ::v8::Array;
@@ -46,31 +47,11 @@ Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab, int byteOffset,
std::shared_ptr<v8::BackingStore> Externalize(Local<v8::ArrayBuffer> ab) {
std::shared_ptr<v8::BackingStore> backing_store = ab->GetBackingStore();
- // Keep the tests until the deprecated functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- ab->Externalize(backing_store);
- CHECK(ab->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
return backing_store;
}
std::shared_ptr<v8::BackingStore> Externalize(Local<v8::SharedArrayBuffer> ab) {
std::shared_ptr<v8::BackingStore> backing_store = ab->GetBackingStore();
- // Keep the tests until the deprecated functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- ab->Externalize(backing_store);
- CHECK(ab->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
return backing_store;
}
@@ -149,46 +130,6 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-THREADED_TEST(ArrayBuffer_External) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- // Keep the tests until the deprecated functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- Local<v8::ArrayBuffer> ab3 =
- v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
- CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, ab3->ByteLength());
- CHECK(ab3->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-
- CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xBB, my_data[0]);
- CHECK_EQ(0xCC, my_data[1]);
- my_data[0] = 0xCC;
- my_data[1] = 0x11;
- result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
THREADED_TEST(ArrayBuffer_DisableDetach) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -292,37 +233,6 @@ THREADED_TEST(ArrayBuffer_DetachingScript) {
CheckDataViewIsDetached(dv);
}
-// TODO(v8:9380) the Contents data structure should be deprecated.
-THREADED_TEST(ArrayBuffer_AllocationInformation) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- const size_t ab_size = 1024;
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, ab_size);
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- v8::ArrayBuffer::Contents contents(ab->GetContents());
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
-
- // Array buffers should have normal allocation mode.
- CHECK_EQ(contents.AllocationMode(),
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
- // The allocation must contain the buffer (normally they will be equal, but
- // this is not required by the contract).
- CHECK_NOT_NULL(contents.AllocationBase());
- const uintptr_t alloc =
- reinterpret_cast<uintptr_t>(contents.AllocationBase());
- const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
- CHECK_LE(alloc, data);
- CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
-}
-
THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -341,7 +251,6 @@ THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
// marked as is_external or not.
USE(u8a->Buffer());
- CHECK(ab->IsExternal());
CHECK_EQ(2, backing_store->ByteLength());
}
@@ -380,35 +289,6 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-THREADED_TEST(ArrayBuffer_ExternalReused) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> data(100);
- Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
- ab1->Detach();
- Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
- CHECK_EQ(bs1->Data(), bs2->Data());
-}
-
-THREADED_TEST(SharedArrayBuffer_ExternalReused) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> data(100);
- Local<v8::SharedArrayBuffer> ab1 =
- v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs1 = ab1->GetBackingStore();
- Local<v8::SharedArrayBuffer> ab2 =
- v8::SharedArrayBuffer::New(isolate, data.begin(), 100);
- std::shared_ptr<v8::BackingStore> bs2 = ab2->GetBackingStore();
- CHECK_EQ(bs1->Data(), bs2->Data());
-}
-
THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
i::FLAG_harmony_sharedarraybuffer = true;
LocalContext env;
@@ -450,64 +330,6 @@ THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
-THREADED_TEST(SharedArrayBuffer_External) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- Local<v8::SharedArrayBuffer> ab3 =
- v8::SharedArrayBuffer::New(isolate, my_data.begin(), 100);
- CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
- CHECK(ab3->IsExternal());
-
- CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xBB, my_data[0]);
- CHECK_EQ(0xCC, my_data[1]);
- my_data[0] = 0xCC;
- my_data[1] = 0x11;
- result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-// TODO(v8:9380) the Contents data structure should be deprecated.
-THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- const size_t ab_size = 1024;
- Local<v8::SharedArrayBuffer> ab =
- v8::SharedArrayBuffer::New(isolate, ab_size);
- v8::SharedArrayBuffer::Contents contents(ab->GetContents());
-
- // Array buffers should have normal allocation mode.
- CHECK_EQ(contents.AllocationMode(),
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
- // The allocation must contain the buffer (normally they will be equal, but
- // this is not required by the contract).
- CHECK_NOT_NULL(contents.AllocationBase());
- const uintptr_t alloc =
- reinterpret_cast<uintptr_t>(contents.AllocationBase());
- const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
- CHECK_LE(alloc, data);
- CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
-}
-
THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -515,9 +337,12 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
// Make sure the pointer looks like a heap object
uint8_t* store_ptr = reinterpret_cast<uint8_t*>(i::kHeapObjectTag);
+ auto backing_store = v8::ArrayBuffer::NewBackingStore(
+ store_ptr, 8, [](void*, size_t, void*) {}, nullptr);
// Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, std::move(backing_store));
// Should not crash
CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -538,12 +363,15 @@ THREADED_TEST(SkipArrayBufferDuringScavenge) {
Local<v8::Object> tmp = v8::Object::New(isolate);
uint8_t* store_ptr =
reinterpret_cast<uint8_t*>(*reinterpret_cast<uintptr_t*>(*tmp));
+ auto backing_store = v8::ArrayBuffer::NewBackingStore(
+ store_ptr, 8, [](void*, size_t, void*) {}, nullptr);
// Make `store_ptr` point to from space
CcTest::CollectGarbage(i::NEW_SPACE);
// Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, std::move(backing_store));
// Should not crash,
// i.e. backing store pointer should not be treated as a heap object pointer
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index af5858eaef..305840f29b 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -862,9 +862,11 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
CHECK(!value->BooleanValue(isolate));
}
-static void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
- v8::GenericNamedPropertyQueryCallback query,
- const char* source, int expected) {
+namespace {
+
+void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
+ v8::GenericNamedPropertyQueryCallback query,
+ const char* source, int expected) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
@@ -879,14 +881,13 @@ static void CheckInterceptorIC(v8::GenericNamedPropertyGetterCallback getter,
CHECK_EQ(expected, value->Int32Value(context.local()).FromJust());
}
-static void CheckInterceptorLoadIC(
- v8::GenericNamedPropertyGetterCallback getter, const char* source,
- int expected) {
+void CheckInterceptorLoadIC(v8::GenericNamedPropertyGetterCallback getter,
+ const char* source, int expected) {
CheckInterceptorIC(getter, nullptr, source, expected);
}
-static void InterceptorLoadICGetter(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+void InterceptorLoadICGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
@@ -896,6 +897,7 @@ static void InterceptorLoadICGetter(
info.GetReturnValue().Set(v8::Integer::New(isolate, 42));
}
+} // namespace
// This test should hit the load IC for the interceptor case.
THREADED_TEST(InterceptorLoadIC) {
@@ -912,9 +914,23 @@ THREADED_TEST(InterceptorLoadIC) {
// configurations of interceptor and explicit fields works fine
// (those cases are special cased to get better performance).
-static void InterceptorLoadXICGetter(
+namespace {
+
+void InterceptorLoadXICGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ info.GetReturnValue().Set(
+ v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust()
+ ? v8::Local<v8::Value>(v8::Integer::New(info.GetIsolate(), 42))
+ : v8::Local<v8::Value>());
+}
+
+void InterceptorLoadXICGetterWithSideEffects(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
+ CompileRun("interceptor_getter_side_effect()");
info.GetReturnValue().Set(
v8_str("x")
->Equals(info.GetIsolate()->GetCurrentContext(), name)
@@ -923,6 +939,7 @@ static void InterceptorLoadXICGetter(
: v8::Local<v8::Value>());
}
+} // namespace
THREADED_TEST(InterceptorLoadICWithFieldOnHolder) {
CheckInterceptorLoadIC(InterceptorLoadXICGetter,
@@ -1447,6 +1464,18 @@ void HasICQueryToggle(TKey name,
isolate, toggle ? v8::internal::ABSENT : v8::internal::NONE));
}
+template <typename TKey, v8::internal::PropertyAttributes attribute>
+void HasICQuerySideEffect(TKey name,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = CcTest::isolate();
+ CHECK_EQ(isolate, info.GetIsolate());
+ CompileRun("interceptor_query_side_effect()");
+ if (attribute != v8::internal::ABSENT) {
+ info.GetReturnValue().Set(v8::Integer::New(isolate, attribute));
+ }
+}
+
int named_query_counter = 0;
void NamedQueryCallback(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Integer>& info) {
@@ -1512,6 +1541,42 @@ THREADED_TEST(InterceptorHasICQueryToggle) {
500);
}
+THREADED_TEST(InterceptorStoreICWithSideEffectfulCallbacks) {
+ CheckInterceptorIC(EmptyInterceptorGetter,
+ HasICQuerySideEffect<Local<Name>, v8::internal::ABSENT>,
+ "let r;"
+ "let inside_side_effect = false;"
+ "let interceptor_query_side_effect = function() {"
+ " if (!inside_side_effect) {"
+ " inside_side_effect = true;"
+ " r.x = 153;"
+ " inside_side_effect = false;"
+ " }"
+ "};"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " r.x = i;"
+ "}",
+ 19);
+
+ CheckInterceptorIC(InterceptorLoadXICGetterWithSideEffects,
+ nullptr, // query callback is not provided
+ "let r;"
+ "let inside_side_effect = false;"
+ "let interceptor_getter_side_effect = function() {"
+ " if (!inside_side_effect) {"
+ " inside_side_effect = true;"
+ " r.y = 153;"
+ " inside_side_effect = false;"
+ " }"
+ "};"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " r.y = i;"
+ "}",
+ 19);
+}
+
static void InterceptorStoreICSetter(
Local<Name> key, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -1561,6 +1626,52 @@ THREADED_TEST(InterceptorStoreICWithNoSetter) {
CHECK_EQ(239 + 42, value->Int32Value(context.local()).FromJust());
}
+THREADED_TEST(EmptyInterceptorDoesNotShadowReadOnlyProperty) {
+ // Interceptor should not shadow readonly property 'x' on the prototype, and
+ // attempt to store to 'x' must throw.
+ CheckInterceptorIC(EmptyInterceptorGetter,
+ HasICQuery<Local<Name>, v8::internal::ABSENT>,
+ "'use strict';"
+ "let p = {};"
+ "Object.defineProperty(p, 'x', "
+ " {value: 153, writable: false});"
+ "o.__proto__ = p;"
+ "let result = 0;"
+ "let r;"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " try {"
+ " r.x = i;"
+ " } catch (e) {"
+ " result++;"
+ " }"
+ "}"
+ "result",
+ 20);
+}
+
+THREADED_TEST(InterceptorShadowsReadOnlyProperty) {
+ // Interceptor claims that it has a writable property 'x', so the existence
+ // of the readonly property 'x' on the prototype should not cause exceptions.
+ CheckInterceptorIC(InterceptorLoadXICGetter,
+ nullptr, // query callback
+ "'use strict';"
+ "let p = {};"
+ "Object.defineProperty(p, 'x', "
+ " {value: 153, writable: false});"
+ "o.__proto__ = p;"
+ "let result = 0;"
+ "let r;"
+ "for (var i = 0; i < 20; i++) {"
+ " r = { __proto__: o };"
+ " try {"
+ " r.x = i;"
+ " result++;"
+ " } catch (e) {}"
+ "}"
+ "result",
+ 20);
+}
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index edfaa98dd6..4a55b47e2b 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -179,6 +179,8 @@ TEST(StackTrace) {
// Checks that a StackFrame has certain expected values.
static void checkStackFrame(const char* expected_script_name,
+ const char* expected_script_source,
+ const char* expected_script_source_mapping_url,
const char* expected_func_name,
int expected_line_number, int expected_column,
bool is_eval, bool is_constructor,
@@ -186,12 +188,24 @@ static void checkStackFrame(const char* expected_script_name,
v8::HandleScope scope(CcTest::isolate());
v8::String::Utf8Value func_name(CcTest::isolate(), frame->GetFunctionName());
v8::String::Utf8Value script_name(CcTest::isolate(), frame->GetScriptName());
+ v8::String::Utf8Value script_source(CcTest::isolate(),
+ frame->GetScriptSource());
+ v8::String::Utf8Value script_source_mapping_url(
+ CcTest::isolate(), frame->GetScriptSourceMappingURL());
if (*script_name == nullptr) {
// The situation where there is no associated script, like for evals.
CHECK_NULL(expected_script_name);
} else {
CHECK_NOT_NULL(strstr(*script_name, expected_script_name));
}
+ CHECK_NOT_NULL(strstr(*script_source, expected_script_source));
+ if (*script_source_mapping_url == nullptr) {
+ CHECK_NULL(expected_script_source_mapping_url);
+ } else {
+ CHECK_NOT_NULL(expected_script_source_mapping_url);
+ CHECK_NOT_NULL(
+ strstr(*script_source_mapping_url, expected_script_source_mapping_url));
+ }
if (!frame->GetFunctionName().IsEmpty()) {
CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
}
@@ -202,6 +216,67 @@ static void checkStackFrame(const char* expected_script_name,
CHECK(frame->IsUserJavaScript());
}
+// Tests the C++ StackTrace API.
+
+// Test getting OVERVIEW information. Should ignore information that is not
+// script name, function name, line number, and column offset.
+const char* overview_source_eval = "new foo();";
+const char* overview_source =
+ "function bar() {\n"
+ " var y; AnalyzeStackInNativeCode(1);\n"
+ "}\n"
+ "function foo() {\n"
+ "\n"
+ " bar();\n"
+ "}\n"
+ "//# sourceMappingURL=http://foobar.com/overview.ts\n"
+ "var x;eval('new foo();');";
+
+// Test getting DETAILED information.
+const char* detailed_source =
+ "function bat() {AnalyzeStackInNativeCode(2);\n"
+ "}\n"
+ "\n"
+ "function baz() {\n"
+ " bat();\n"
+ "}\n"
+ "eval('new baz();');";
+
+// Test using function.name and function.displayName in stack trace
+const char function_name_source[] =
+ "function bar(function_name, display_name, testGroup) {\n"
+ " var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
+ " if (function_name) {\n"
+ " Object.defineProperty(f, 'name', { value: function_name });\n"
+ " }\n"
+ " if (display_name) {\n"
+ " f.displayName = display_name;"
+ " }\n"
+ " f()\n"
+ "}\n"
+ "bar('function.name', undefined, 3);\n"
+ "bar('function.name', 'function.displayName', 4);\n"
+ "bar(239, undefined, 5);\n";
+
+// Maybe it's a bit pathological to depend on the exact format of the wrapper
+// the Function constructor puts around it's input string. If this becomes a
+// hassle, maybe come up with some regex matching approach?
+const char function_name_source_anon3[] =
+ "(function anonymous(\n"
+ ") {\n"
+ "AnalyzeStackInNativeCode(3);\n"
+ "})";
+const char function_name_source_anon4[] =
+ "(function anonymous(\n"
+ ") {\n"
+ "AnalyzeStackInNativeCode(4);\n"
+ "})";
+const char function_name_source_anon5[] =
+ "(function anonymous(\n"
+ ") {\n"
+ "AnalyzeStackInNativeCode(5);\n"
+ "})";
+
static void AnalyzeStackInNativeCode(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
@@ -221,53 +296,55 @@ static void AnalyzeStackInNativeCode(
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kOverview);
CHECK_EQ(4, stackTrace->GetFrameCount());
- checkStackFrame(origin, "bar", 2, 10, false, false,
+ checkStackFrame(origin, overview_source, "//foobar.com/overview.ts", "bar",
+ 2, 10, false, false,
stackTrace->GetFrame(args.GetIsolate(), 0));
- checkStackFrame(origin, "foo", 6, 3, false, true,
- stackTrace->GetFrame(isolate, 1));
+ checkStackFrame(origin, overview_source, "//foobar.com/overview.ts", "foo",
+ 6, 3, false, true, stackTrace->GetFrame(isolate, 1));
// This is the source string inside the eval which has the call to foo.
- checkStackFrame(nullptr, "", 1, 1, true, false,
+ checkStackFrame(nullptr, "new foo();", nullptr, "", 1, 1, true, false,
stackTrace->GetFrame(isolate, 2));
// The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 8, 7, false, false,
- stackTrace->GetFrame(isolate, 3));
+ checkStackFrame(origin, overview_source, "//foobar.com/overview.ts", "", 9,
+ 7, false, false, stackTrace->GetFrame(isolate, 3));
} else if (testGroup == kDetailedTest) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
- checkStackFrame(origin, "bat", 4, 22, false, false,
- stackTrace->GetFrame(isolate, 0));
- checkStackFrame(origin, "baz", 8, 3, false, true,
+ checkStackFrame(origin, detailed_source, nullptr, "bat", 4, 22, false,
+ false, stackTrace->GetFrame(isolate, 0));
+ checkStackFrame(origin, detailed_source, nullptr, "baz", 8, 3, false, true,
stackTrace->GetFrame(isolate, 1));
bool is_eval = true;
// This is the source string inside the eval which has the call to baz.
- checkStackFrame(nullptr, "", 1, 1, is_eval, false,
+ checkStackFrame(nullptr, "new baz();", nullptr, "", 1, 1, is_eval, false,
stackTrace->GetFrame(isolate, 2));
// The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 10, 1, false, false,
+ checkStackFrame(origin, detailed_source, nullptr, "", 10, 1, false, false,
stackTrace->GetFrame(isolate, 3));
} else if (testGroup == kFunctionName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+ checkStackFrame(nullptr, function_name_source_anon3, nullptr,
+ "function.name", 3, 1, true, false,
stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameAndDisplayName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+ checkStackFrame(nullptr, function_name_source_anon4, nullptr,
+ "function.name", 3, 1, true, false,
stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameIsNotString) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
+ checkStackFrame(nullptr, function_name_source_anon5, nullptr, "", 3, 1,
+ true, false, stackTrace->GetFrame(isolate, 0));
}
}
-// Tests the C++ StackTrace API.
// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
// THREADED_TEST(CaptureStackTrace) {
TEST(CaptureStackTrace) {
@@ -279,17 +356,6 @@ TEST(CaptureStackTrace) {
v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
LocalContext context(nullptr, templ);
- // Test getting OVERVIEW information. Should ignore information that is not
- // script name, function name, line number, and column offset.
- const char* overview_source =
- "function bar() {\n"
- " var y; AnalyzeStackInNativeCode(1);\n"
- "}\n"
- "function foo() {\n"
- "\n"
- " bar();\n"
- "}\n"
- "var x;eval('new foo();');";
v8::Local<v8::String> overview_src = v8_str(overview_source);
v8::ScriptCompiler::Source script_source(overview_src,
v8::ScriptOrigin(isolate, origin));
@@ -302,15 +368,6 @@ TEST(CaptureStackTrace) {
CHECK(!overview_result.IsEmpty());
CHECK(overview_result->IsObject());
- // Test getting DETAILED information.
- const char* detailed_source =
- "function bat() {AnalyzeStackInNativeCode(2);\n"
- "}\n"
- "\n"
- "function baz() {\n"
- " bat();\n"
- "}\n"
- "eval('new baz();');";
v8::Local<v8::String> detailed_src = v8_str(detailed_source);
// Make the script using a non-zero line and column offset.
v8::ScriptOrigin detailed_origin(isolate, origin, 3, 5);
@@ -324,21 +381,6 @@ TEST(CaptureStackTrace) {
CHECK(!detailed_result.IsEmpty());
CHECK(detailed_result->IsObject());
- // Test using function.name and function.displayName in stack trace
- const char function_name_source[] =
- "function bar(function_name, display_name, testGroup) {\n"
- " var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
- " if (function_name) {\n"
- " Object.defineProperty(f, 'name', { value: function_name });\n"
- " }\n"
- " if (display_name) {\n"
- " f.displayName = display_name;"
- " }\n"
- " f()\n"
- "}\n"
- "bar('function.name', undefined, 3);\n"
- "bar('function.name', 'function.displayName', 4);\n"
- "bar(239, undefined, 5);\n";
v8::Local<v8::String> function_name_src =
v8::String::NewFromUtf8Literal(isolate, function_name_source);
v8::ScriptCompiler::Source script_source3(function_name_src,
@@ -353,33 +395,37 @@ TEST(CaptureStackTrace) {
}
static int report_count = 0;
+
+// Test uncaught exception
+const char uncaught_exception_source[] =
+ "function foo() {\n"
+ " throw 1;\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};";
+
static void StackTraceForUncaughtExceptionListener(
v8::Local<v8::Message> message, v8::Local<Value>) {
report_count++;
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK_EQ(2, stack_trace->GetFrameCount());
- checkStackFrame("origin", "foo", 2, 3, false, false,
+ checkStackFrame("origin", uncaught_exception_source, nullptr, "foo", 2, 3,
+ false, false,
stack_trace->GetFrame(message->GetIsolate(), 0));
- checkStackFrame("origin", "bar", 5, 3, false, false,
+ checkStackFrame("origin", uncaught_exception_source, nullptr, "bar", 5, 3,
+ false, false,
stack_trace->GetFrame(message->GetIsolate(), 1));
}
TEST(CaptureStackTraceForUncaughtException) {
- report_count = 0;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRunWithOrigin(
- "function foo() {\n"
- " throw 1;\n"
- "};\n"
- "function bar() {\n"
- " foo();\n"
- "};",
- "origin");
+ CompileRunWithOrigin(uncaught_exception_source, "origin");
v8::Local<v8::Object> global = env->Global();
Local<Value> trouble =
global->Get(env.local(), v8_str("bar")).ToLocalChecked();
@@ -392,40 +438,100 @@ TEST(CaptureStackTraceForUncaughtException) {
CHECK_EQ(1, report_count);
}
+// Test uncaught exception in a setter
+const char uncaught_setter_exception_source[] =
+ "var setters = ['column', 'lineNumber', 'scriptName',\n"
+ " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
+ " 'isConstructor'];\n"
+ "for (let i = 0; i < setters.length; i++) {\n"
+ " let prop = setters[i];\n"
+ " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
+ "}\n";
+
+static void StackTraceForUncaughtExceptionAndSettersListener(
+ v8::Local<v8::Message> message, v8::Local<Value> value) {
+ CHECK(value->IsObject());
+ v8::Isolate* isolate = message->GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ report_count++;
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK_EQ(1, stack_trace->GetFrameCount());
+ checkStackFrame(nullptr, "throw 'exception';", nullptr, nullptr, 1, 1, false,
+ false, stack_trace->GetFrame(isolate, 0));
+ v8::Local<v8::StackFrame> stack_frame = stack_trace->GetFrame(isolate, 0);
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+ CHECK(object
+ ->Set(context,
+ v8::String::NewFromUtf8Literal(isolate, "lineNumber"),
+ v8::Integer::New(isolate, stack_frame->GetLineNumber()))
+ .IsNothing());
+}
+
TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
+ report_count = 0;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> object = v8::Object::New(isolate);
+ isolate->AddMessageListener(StackTraceForUncaughtExceptionAndSettersListener,
+ object);
isolate->SetCaptureStackTraceForUncaughtExceptions(true, 1024,
v8::StackTrace::kDetailed);
- CompileRun(
- "var setters = ['column', 'lineNumber', 'scriptName',\n"
- " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
- " 'isConstructor'];\n"
- "for (var i = 0; i < setters.length; i++) {\n"
- " var prop = setters[i];\n"
- " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
- "}\n");
+ CompileRun(uncaught_setter_exception_source);
CompileRun("throw 'exception';");
isolate->SetCaptureStackTraceForUncaughtExceptions(false);
-}
+ isolate->RemoveMessageListeners(
+ StackTraceForUncaughtExceptionAndSettersListener);
+ CHECK(object
+ ->Get(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8Literal(isolate, "lineNumber"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK_EQ(report_count, 1);
+}
+
+const char functions_with_function_name[] =
+ "function gen(name, counter) {\n"
+ " var f = function foo() {\n"
+ " if (counter === 0)\n"
+ " throw 1;\n"
+ " gen(name, counter - 1)();\n"
+ " };\n"
+ " if (counter == 3) {\n"
+ " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
+ " } else {\n"
+ " Object.defineProperty(f, 'name', {writable:true});\n"
+ " if (counter == 2)\n"
+ " f.name = 42;\n"
+ " else\n"
+ " f.name = name + ':' + counter;\n"
+ " }\n"
+ " return f;\n"
+ "};"
+ "//# sourceMappingURL=local/functional.sc";
+
+const char functions_with_function_name_caller[] = "gen('foo', 3)();";
static void StackTraceFunctionNameListener(v8::Local<v8::Message> message,
v8::Local<Value>) {
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
v8::Isolate* isolate = message->GetIsolate();
CHECK_EQ(5, stack_trace->GetFrameCount());
- checkStackFrame("origin", "foo:0", 4, 7, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo:0", 4, 7, false, false,
stack_trace->GetFrame(isolate, 0));
- checkStackFrame("origin", "foo:1", 5, 27, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo:1", 5, 27, false, false,
stack_trace->GetFrame(isolate, 1));
- checkStackFrame("origin", "foo", 5, 27, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo", 5, 27, false, false,
stack_trace->GetFrame(isolate, 2));
- checkStackFrame("origin", "foo", 5, 27, false, false,
+ checkStackFrame("origin", functions_with_function_name, "local/functional.sc",
+ "foo", 5, 27, false, false,
stack_trace->GetFrame(isolate, 3));
- checkStackFrame("origin", "", 1, 14, false, false,
- stack_trace->GetFrame(isolate, 4));
+ checkStackFrame("origin", functions_with_function_name_caller, nullptr, "", 1,
+ 14, false, false, stack_trace->GetFrame(isolate, 4));
}
TEST(GetStackTraceContainsFunctionsWithFunctionName) {
@@ -433,29 +539,11 @@ TEST(GetStackTraceContainsFunctionsWithFunctionName) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- CompileRunWithOrigin(
- "function gen(name, counter) {\n"
- " var f = function foo() {\n"
- " if (counter === 0)\n"
- " throw 1;\n"
- " gen(name, counter - 1)();\n"
- " };\n"
- " if (counter == 3) {\n"
- " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
- " } else {\n"
- " Object.defineProperty(f, 'name', {writable:true});\n"
- " if (counter == 2)\n"
- " f.name = 42;\n"
- " else\n"
- " f.name = name + ':' + counter;\n"
- " }\n"
- " return f;\n"
- "};",
- "origin");
+ CompileRunWithOrigin(functions_with_function_name, "origin");
isolate->AddMessageListener(StackTraceFunctionNameListener);
isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRunWithOrigin("gen('foo', 3)();", "origin");
+ CompileRunWithOrigin(functions_with_function_name_caller, "origin");
isolate->SetCaptureStackTraceForUncaughtExceptions(false);
isolate->RemoveMessageListeners(StackTraceFunctionNameListener);
}
diff --git a/deps/v8/test/cctest/test-api-typed-array.cc b/deps/v8/test/cctest/test-api-typed-array.cc
index 59c7137525..a35aad8e37 100644
--- a/deps/v8/test/cctest/test-api-typed-array.cc
+++ b/deps/v8/test/cctest/test-api-typed-array.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/cctest/test-api.h"
-
#include "src/api/api-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "test/cctest/test-api.h"
using ::v8::Array;
using ::v8::Context;
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index c01c1ea791..5eafa420bc 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -71,9 +71,12 @@
#include "src/utils/utils.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
-#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/flag-utils.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#endif // V8_ENABLE_WEBASSEMBLY
static const bool kLogThreading = false;
@@ -378,6 +381,45 @@ THREADED_TEST(ReceiverSignature) {
}
}
+static void DoNothingCallback(const v8::FunctionCallbackInfo<v8::Value>&) {}
+
+// Regression test for issue chromium:1188563.
+THREADED_TEST(Regress1188563) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Set up some data for CallHandlerInfo.
+ v8::Local<v8::FunctionTemplate> data_constructor_templ =
+ v8::FunctionTemplate::New(isolate);
+ v8::Local<Function> data_constructor =
+ data_constructor_templ->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> data =
+ data_constructor->NewInstance(env.local()).ToLocalChecked();
+
+ // Setup templates and instance with accessor property.
+ v8::Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> callback =
+ v8::FunctionTemplate::New(isolate, DoNothingCallback, data);
+ v8::Local<v8::ObjectTemplate> instance_templ = fun->InstanceTemplate();
+ instance_templ->SetAccessorProperty(v8_str("accessor"), callback, callback);
+ Local<Value> test_object =
+ instance_templ->NewInstance(env.local()).ToLocalChecked();
+ // Setup global variables.
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("test_object"), test_object)
+ .FromJust());
+ CompileRun(
+ "function test() {"
+ " test_object.accessor;"
+ "};"
+ "%PrepareFunctionForOptimization(test);"
+ "try { test() } catch(e) {}"
+ "try { test() } catch(e) {}"
+ "%OptimizeFunctionOnNextCall(test);"
+ "test()");
+}
THREADED_TEST(HulIgennem) {
LocalContext env;
@@ -514,6 +556,7 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CHECK(source->IsExternalTwoByte());
+ CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
String::Encoding encoding = String::UNKNOWN_ENCODING;
@@ -541,6 +584,8 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
String::NewExternalOneByte(env->GetIsolate(), resource)
.ToLocalChecked();
CHECK(source->IsExternalOneByte());
+ CHECK(source->IsExternal());
+ CHECK(!source->IsExternalTwoByte());
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalOneByteStringResource());
String::Encoding encoding = String::UNKNOWN_ENCODING;
@@ -574,6 +619,7 @@ THREADED_TEST(ScriptMakingExternalString) {
CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK(!source->IsExternalTwoByte());
CHECK(!source->IsExternalOneByte());
+ CHECK(!source->IsExternal());
String::Encoding encoding = String::UNKNOWN_ENCODING;
CHECK(!source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
@@ -2655,20 +2701,17 @@ static void ThrowingSymbolAccessorGetter(
THREADED_TEST(AccessorIsPreservedOnAttributeChange) {
v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope scope(isolate);
LocalContext env;
v8::Local<v8::Value> res = CompileRun("var a = []; a;");
i::Handle<i::JSReceiver> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
- CHECK_EQ(
- 1,
- a->map().instance_descriptors(v8::kRelaxedLoad).number_of_descriptors());
+ CHECK_EQ(1, a->map().instance_descriptors(i_isolate).number_of_descriptors());
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
- CHECK_EQ(
- 0,
- a->map().instance_descriptors(v8::kRelaxedLoad).number_of_descriptors());
+ CHECK_EQ(0, a->map().instance_descriptors(i_isolate).number_of_descriptors());
// But we should still have an AccessorInfo.
- i::Handle<i::String> name = CcTest::i_isolate()->factory()->length_string();
- i::LookupIterator it(CcTest::i_isolate(), a, name,
+ i::Handle<i::String> name = i_isolate->factory()->length_string();
+ i::LookupIterator it(i_isolate, a, name,
i::LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_EQ(i::LookupIterator::ACCESSOR, it.state());
CHECK(it.GetAccessors()->IsAccessorInfo());
@@ -4728,6 +4771,13 @@ namespace {
// some particular way by calling the supplied |tester| function. The tests that
// use this purposely test only a single getter as the getter updates the cached
// state of the object which could affect the results of other functions.
+const char message_attributes_script[] =
+ R"javascript(
+ (function() {
+ throw new Error();
+ })();
+ )javascript";
+
void CheckMessageAttributes(std::function<void(v8::Local<v8::Context> context,
v8::Local<v8::Message> message)>
tester) {
@@ -4735,12 +4785,7 @@ void CheckMessageAttributes(std::function<void(v8::Local<v8::Context> context,
v8::HandleScope scope(context->GetIsolate());
TryCatch try_catch(context->GetIsolate());
- CompileRun(
- R"javascript(
- (function() {
- throw new Error();
- })();
- )javascript");
+ CompileRun(message_attributes_script);
CHECK(try_catch.HasCaught());
v8::Local<v8::Value> error = try_catch.Exception();
@@ -4763,38 +4808,47 @@ TEST(MessageGetLineNumber) {
TEST(MessageGetStartColumn) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(14, message->GetStartColumn(context).FromJust());
+ CHECK_EQ(12, message->GetStartColumn(context).FromJust());
});
}
TEST(MessageGetEndColumn) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(15, message->GetEndColumn(context).FromJust());
+ CHECK_EQ(13, message->GetEndColumn(context).FromJust());
});
}
TEST(MessageGetStartPosition) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(35, message->GetStartPosition());
+ CHECK_EQ(31, message->GetStartPosition());
});
}
TEST(MessageGetEndPosition) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
- CHECK_EQ(36, message->GetEndPosition());
+ CHECK_EQ(32, message->GetEndPosition());
});
}
+TEST(MessageGetSource) {
+ CheckMessageAttributes([](v8::Local<v8::Context> context,
+ v8::Local<v8::Message> message) {
+ std::string result(*v8::String::Utf8Value(
+ context->GetIsolate(), message->GetSource(context).ToLocalChecked()));
+ CHECK_EQ(message_attributes_script, result);
+ });
+}
+
TEST(MessageGetSourceLine) {
CheckMessageAttributes(
[](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
std::string result(*v8::String::Utf8Value(
context->GetIsolate(),
message->GetSourceLine(context).ToLocalChecked()));
- CHECK_EQ(" throw new Error();", result);
+ CHECK_EQ(" throw new Error();", result);
});
}
@@ -15463,7 +15517,7 @@ THREADED_TEST(ScriptContextDependence) {
101);
}
-
+#if V8_ENABLE_WEBASSEMBLY
static int asm_warning_triggered = 0;
static void AsmJsWarningListener(v8::Local<v8::Message> message,
@@ -15490,14 +15544,11 @@ TEST(AsmJsWarning) {
" return {};\n"
"}\n"
"module();");
-#if V8_ENABLE_WEBASSEMBLY
int kExpectedWarnings = 1;
-#else
- int kExpectedWarnings = 0;
-#endif
CHECK_EQ(kExpectedWarnings, asm_warning_triggered);
isolate->RemoveMessageListeners(AsmJsWarningListener);
}
+#endif // V8_ENABLE_WEBASSEMBLY
static int error_level_message_count = 0;
static int expected_error_level = 0;
@@ -16803,6 +16854,7 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
}
~VisitorImpl() override = default;
void VisitExternalString(v8::Local<v8::String> string) override {
+ CHECK(string->IsExternal());
if (string->IsExternalOneByte()) {
CHECK(!string->IsExternalTwoByte());
return;
@@ -16847,6 +16899,7 @@ TEST(ExternalizeOldSpaceTwoByteCons) {
cons->MakeExternal(resource);
CHECK(cons->IsExternalTwoByte());
+ CHECK(cons->IsExternal());
CHECK_EQ(resource, cons->GetExternalStringResource());
String::Encoding encoding;
CHECK_EQ(resource, cons->GetExternalStringResourceBase(&encoding));
@@ -16884,7 +16937,7 @@ TEST(VisitExternalStrings) {
v8::HandleScope scope(isolate);
const char string[] = "Some string";
uint16_t* two_byte_string = AsciiToTwoByteString(string);
- TestResource* resource[4];
+ TestResource* resource[5];
resource[0] = new TestResource(two_byte_string);
v8::Local<v8::String> string0 =
v8::String::NewExternalTwoByte(env->GetIsolate(), resource[0])
@@ -16912,11 +16965,29 @@ TEST(VisitExternalStrings) {
string3_i).is_null());
CHECK(string3_i->IsInternalizedString());
+ // Externalized one-byte string.
+ auto one_byte_resource =
+ new TestOneByteResource(i::StrDup(string), nullptr, 0);
+ v8::Local<v8::String> string4 =
+ String::NewExternalOneByte(env->GetIsolate(), one_byte_resource)
+ .ToLocalChecked();
+
// We need to add usages for string* to avoid warnings in GCC 4.7
CHECK(string0->IsExternalTwoByte());
CHECK(string1->IsExternalTwoByte());
CHECK(string2->IsExternalTwoByte());
CHECK(string3->IsExternalTwoByte());
+ CHECK(!string4->IsExternalTwoByte());
+ CHECK(string0->IsExternal());
+ CHECK(string1->IsExternal());
+ CHECK(string2->IsExternal());
+ CHECK(string3->IsExternal());
+ CHECK(string4->IsExternal());
+ CHECK(!string0->IsExternalOneByte());
+ CHECK(!string1->IsExternalOneByte());
+ CHECK(!string2->IsExternalOneByte());
+ CHECK(!string3->IsExternalOneByte());
+ CHECK(string4->IsExternalOneByte());
VisitorImpl visitor(resource);
isolate->VisitExternalResources(&visitor);
@@ -21391,13 +21462,6 @@ class RegExpInterruptTest {
string->MakeExternal(&two_byte_string_resource);
}
- static void ReenterIrregexp(v8::Isolate* isolate, void* data) {
- v8::HandleScope scope(isolate);
- v8::TryCatch try_catch(isolate);
- // Irregexp is not reentrant. This should crash.
- CompileRun("/((a*)*)*b/.exec('aaaaab')");
- }
-
private:
static void SignalSemaphore(v8::Isolate* isolate, void* data) {
reinterpret_cast<RegExpInterruptTest*>(data)->sem_.Signal();
@@ -21524,21 +21588,6 @@ TEST(RegExpInterruptAndMakeSubjectTwoByteExternal) {
test.RunTest(RegExpInterruptTest::MakeSubjectTwoByteExternal);
}
-TEST(RegExpInterruptAndReenterIrregexp) {
- // We only check in the runtime entry to irregexp, so make sure we don't hit
- // an interpreter.
- i::FLAG_regexp_tier_up_ticks = 0;
- i::FLAG_regexp_interpret_all = false;
- i::FLAG_enable_experimental_regexp_engine = false;
- // We want to be stuck in regexp execution, so no fallback to linear-time
- // engine.
- // TODO(mbid,v8:10765): Find a way to test interrupt support of the
- // experimental engine.
- i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
- RegExpInterruptTest test;
- test.RunTest(RegExpInterruptTest::ReenterIrregexp);
-}
-
class RequestInterruptTestBase {
public:
RequestInterruptTestBase()
@@ -21800,7 +21849,6 @@ class RequestInterruptTestWithMathAbs
}
};
-
TEST(RequestInterruptTestWithFunctionCall) {
RequestInterruptTestWithFunctionCall().RunTest();
}
@@ -21830,7 +21878,6 @@ TEST(RequestInterruptTestWithMathAbs) {
RequestInterruptTestWithMathAbs().RunTest();
}
-
class RequestMultipleInterrupts : public RequestInterruptTestBase {
public:
RequestMultipleInterrupts() : i_thread(this), counter_(0) {}
@@ -23571,7 +23618,14 @@ void RunStreamingTest(const char** chunks, v8::ScriptType type,
if (i::FLAG_harmony_top_level_await) {
v8::Local<v8::Promise> promise = result.As<v8::Promise>();
CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
- CHECK_EQ(13, promise->Result()->Int32Value(env.local()).FromJust());
+ CHECK(promise->Result()->IsUndefined());
+ // Fulfilled top-level await promises always resolve to undefined. Check
+ // the test result via a global variable.
+ CHECK_EQ(13, env->Global()
+ ->Get(env.local(), v8_str("Result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
} else {
CHECK(!result.IsEmpty());
CHECK_EQ(13, result->Int32Value(env.local()).FromJust());
@@ -23599,17 +23653,20 @@ void RunStreamingTest(const char** chunks,
TEST(StreamingSimpleScript) {
// This script is unrealistically small, since no one chunk is enough to fill
// the backing buffer of Scanner, let alone overflow it.
- const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
- nullptr};
+ const char* chunks[] = {"function foo() { ret",
+ "urn 13; } globalThis.Result = f", "oo(); ", nullptr};
RunStreamingTest(chunks);
}
TEST(StreamingScriptConstantArray) {
// When run with Ignition, tests that the streaming parser canonicalizes
// handles so that they are only added to the constant pool array once.
- const char* chunks[] = {
- "var a = {};", "var b = {};", "var c = 'testing';",
- "var d = 'testing';", "13;", nullptr};
+ const char* chunks[] = {"var a = {};",
+ "var b = {};",
+ "var c = 'testing';",
+ "var d = 'testing';",
+ "globalThis.Result = 13;",
+ nullptr};
RunStreamingTest(chunks);
}
@@ -23624,7 +23681,7 @@ TEST(StreamingScriptEvalShadowing) {
" function g() {\n"
" return y\n"
" }\n"
- " return g();\n"
+ " return (globalThis.Result = g());\n"
" })()\n"
"})()\n";
const char* chunks[] = {chunk1, nullptr};
@@ -23648,7 +23705,7 @@ TEST(StreamingBiggerScript) {
" for (i = 0; i < 13; ++i) { result = result + 1; }\n"
" return result;\n"
"}\n";
- const char* chunks[] = {chunk1, "foo(); ", nullptr};
+ const char* chunks[] = {chunk1, "globalThis.Result = foo(); ", nullptr};
RunStreamingTest(chunks);
}
@@ -23660,7 +23717,8 @@ TEST(StreamingScriptWithParseError) {
" // This will result in a parse error.\n"
" var if else then foo";
char chunk2[] = " 13\n";
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE,
false);
@@ -23671,7 +23729,8 @@ TEST(StreamingScriptWithParseError) {
" // This will be parsed successfully.\n"
" function foo() { return ";
char chunk2[] = " 13; }\n";
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks);
}
@@ -23688,7 +23747,7 @@ TEST(StreamingUtf8Script) {
" var foob\xec\x92\x81r = 13;\n"
" return foob\xec\x92\x81r;\n"
"}\n";
- const char* chunks[] = {chunk1, "foo(); ", nullptr};
+ const char* chunks[] = {chunk1, "globalThis.Result = foo(); ", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23712,7 +23771,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersSanityCheck) {
for (int i = 0; i < 3; ++i) {
chunk2[i] = reference[i];
}
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23733,7 +23793,8 @@ TEST(StreamingUtf8ScriptWithSplitCharacters) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23760,7 +23821,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
chunk2[0] = reference[0];
chunk2[1] = reference[1];
chunk3[0] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, chunk3,
+ "globalThis.Result = foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
// The small chunk is at the end of a character
@@ -23778,7 +23840,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, chunk3,
+ "globalThis.Result = foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
// Case 2: the script ends with a multi-byte character. Make sure that it's
@@ -23786,7 +23849,7 @@ TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
{
char chunk1[] =
"var foob\xec\x92\x81 = 13;\n"
- "foob\xec\x92\x81";
+ "globalThis.Result = foob\xec\x92\x81";
const char* chunks[] = {chunk1, nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23811,7 +23874,8 @@ TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk3[0] = reference[2];
- const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, chunk3, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23882,7 +23946,8 @@ TEST(StreamingScriptWithInvalidUtf8) {
"}\n";
for (int i = 0; i < 5; ++i) chunk1[strlen(chunk1) - 5 + i] = reference[i];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
}
@@ -23903,7 +23968,8 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -23924,7 +23990,8 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit2) {
chunk1[strlen(chunk1) - 1] = reference[0];
chunk2[0] = reference[1];
chunk2[1] = reference[2];
- const char* chunks[] = {chunk1, chunk2, "foo();", nullptr};
+ const char* chunks[] = {chunk1, chunk2, "globalThis.Result = foo();",
+ nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
}
@@ -24784,7 +24851,8 @@ TEST(ClassPrototypeCreationContext) {
TEST(SimpleStreamingScriptWithSourceURL) {
- const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo();\n",
+ const char* chunks[] = {"function foo() { ret",
+ "urn 13; } globalThis.Result = f", "oo();\n",
"//# sourceURL=bar2.js\n", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
"bar2.js");
@@ -24792,7 +24860,8 @@ TEST(SimpleStreamingScriptWithSourceURL) {
TEST(StreamingScriptWithSplitSourceURL) {
- const char* chunks[] = {"function foo() { ret", "urn 13; } f",
+ const char* chunks[] = {"function foo() { ret",
+ "urn 13; } globalThis.Result = f",
"oo();\n//# sourceURL=b", "ar2.js\n", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
"bar2.js");
@@ -24801,7 +24870,8 @@ TEST(StreamingScriptWithSplitSourceURL) {
TEST(StreamingScriptWithSourceMappingURLInTheMiddle) {
const char* chunks[] = {"function foo() { ret", "urn 13; }\n//#",
- " sourceMappingURL=bar2.js\n", "foo();", nullptr};
+ " sourceMappingURL=bar2.js\n",
+ "globalThis.Result = foo();", nullptr};
RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, true,
nullptr, "bar2.js");
}
@@ -25127,10 +25197,10 @@ THREADED_TEST(ReceiverConversionForAccessors) {
CHECK(CompileRun("acc.call(undefined) == 42")->BooleanValue(isolate));
}
-class FutexInterruptionThread : public v8::base::Thread {
+class TerminateExecutionThread : public v8::base::Thread {
public:
- explicit FutexInterruptionThread(v8::Isolate* isolate)
- : Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
+ explicit TerminateExecutionThread(v8::Isolate* isolate)
+ : Thread(Options("TerminateExecutionThread")), isolate_(isolate) {}
void Run() override {
// Wait a bit before terminating.
@@ -25142,14 +25212,13 @@ class FutexInterruptionThread : public v8::base::Thread {
v8::Isolate* isolate_;
};
-
TEST(FutexInterruption) {
i::FLAG_harmony_sharedarraybuffer = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
- FutexInterruptionThread timeout_thread(isolate);
+ TerminateExecutionThread timeout_thread(isolate);
v8::TryCatch try_catch(CcTest::isolate());
CHECK(timeout_thread.Start());
@@ -25162,6 +25231,28 @@ TEST(FutexInterruption) {
timeout_thread.Join();
}
+TEST(StackCheckTermination) {
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+
+ TerminateExecutionThread timeout_thread(isolate);
+
+ v8::TryCatch try_catch(isolate);
+ CHECK(timeout_thread.Start());
+ auto should_continue = [i_isolate]() {
+ using StackLimitCheck = i::StackLimitCheck;
+ STACK_CHECK(i_isolate, false);
+ return true;
+ };
+ while (should_continue()) {
+ }
+ if (i_isolate->has_pending_exception()) i_isolate->ReportPendingMessages();
+ CHECK(try_catch.HasTerminated());
+ timeout_thread.Join();
+}
+
static int nb_uncaught_exception_callback_calls = 0;
@@ -26705,6 +26796,7 @@ TEST(AtomicsWaitCallback) {
AtomicsWaitCallbackCommon(isolate, CompileRun(init), 4, 4);
}
+#if V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
namespace wasm {
@@ -26783,6 +26875,7 @@ TEST(WasmI64AtomicWaitCallback) {
} // namespace wasm
} // namespace internal
} // namespace v8
+#endif // V8_ENABLE_WEBASSEMBLY
TEST(BigIntAPI) {
LocalContext env;
@@ -27937,37 +28030,28 @@ void CallWithUnexpectedObjectType(v8::Local<v8::Value> receiver) {
}
class TestCFunctionInfo : public v8::CFunctionInfo {
- const v8::CTypeInfo& ReturnInfo() const override {
- static v8::CTypeInfo return_info =
- v8::CTypeInfo(v8::CTypeInfo::Type::kVoid);
- return return_info;
- }
-
- unsigned int ArgumentCount() const override { return 2; }
-
- const v8::CTypeInfo& ArgumentInfo(unsigned int index) const override {
- static v8::CTypeInfo type_info0 =
- v8::CTypeInfo(v8::CTypeInfo::Type::kV8Value);
- static v8::CTypeInfo type_info1 = v8::CTypeInfo(v8::CTypeInfo::Type::kBool);
- switch (index) {
- case 0:
- return type_info0;
- case 1:
- return type_info1;
- default:
- UNREACHABLE();
- }
- }
+ static constexpr unsigned int kArgCount = 2u;
- bool HasOptions() const override { return false; }
+ public:
+ TestCFunctionInfo()
+ : v8::CFunctionInfo(v8::CTypeInfo(v8::CTypeInfo::Type::kVoid), kArgCount,
+ arg_info_storage_),
+ arg_info_storage_{
+ v8::CTypeInfo(v8::CTypeInfo::Type::kV8Value),
+ v8::CTypeInfo(v8::CTypeInfo::Type::kBool),
+ } {}
+
+ private:
+ const v8::CTypeInfo arg_info_storage_[kArgCount];
};
void CheckDynamicTypeInfo() {
LocalContext env;
static TestCFunctionInfo type_info;
- v8::CFunction c_func =
- v8::CFunction::Make(ApiNumberChecker<bool>::FastCallback, &type_info);
+ v8::CFunction c_func = v8::CFunction(
+ reinterpret_cast<const void*>(ApiNumberChecker<bool>::FastCallback),
+ &type_info);
CHECK_EQ(c_func.ArgumentCount(), 2);
CHECK_EQ(c_func.ArgumentInfo(0).GetType(), v8::CTypeInfo::Type::kV8Value);
CHECK_EQ(c_func.ArgumentInfo(1).GetType(), v8::CTypeInfo::Type::kBool);
@@ -27979,7 +28063,6 @@ void CheckDynamicTypeInfo() {
TEST(FastApiStackSlot) {
#ifndef V8_LITE_MODE
if (i::FLAG_jitless) return;
- if (i::FLAG_turboprop) return;
FLAG_SCOPE_EXTERNAL(opt);
FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
@@ -28031,7 +28114,6 @@ TEST(FastApiStackSlot) {
TEST(FastApiCalls) {
#ifndef V8_LITE_MODE
if (i::FLAG_jitless) return;
- if (i::FLAG_turboprop) return;
FLAG_SCOPE_EXTERNAL(opt);
FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index a1d8cdfb7d..2784bfe16b 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -6832,63 +6832,6 @@ TEST(ldr_literal_range_max_dist_no_emission_2) {
#endif
-static const PrefetchOperation kPrfmOperations[] = {
- PLDL1KEEP, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
-
- PLIL1KEEP, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM,
-
- PSTL1KEEP, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM};
-
-TEST(prfm_regoffset_assem) {
- INIT_V8();
- SETUP();
-
- START();
- // The address used in prfm doesn't have to be valid.
- __ Mov(x0, 0x0123456789abcdef);
-
- CPURegList inputs(CPURegister::kRegister, kXRegSizeInBits, 10, 18);
- __ Mov(x10, 0);
- __ Mov(x11, 1);
- __ Mov(x12, 8);
- __ Mov(x13, 255);
- __ Mov(x14, -0);
- __ Mov(x15, -1);
- __ Mov(x16, -8);
- __ Mov(x17, -255);
- __ Mov(x18, 0xfedcba9876543210);
-
- for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
- // Unallocated prefetch operations are ignored, so test all of them.
- // We have to use the Assembler directly for this.
- CPURegList loop = inputs;
- while (!loop.IsEmpty()) {
- __ prfm(op, MemOperand(x0, Register::Create(loop.PopLowestIndex().code(),
- kXRegSizeInBits)));
- }
- }
-
- for (PrefetchOperation op : kPrfmOperations) {
- // Also test named operations.
- CPURegList loop = inputs;
- while (!loop.IsEmpty()) {
- Register input =
- Register::Create(loop.PopLowestIndex().code(), kXRegSizeInBits);
- __ prfm(op, MemOperand(x0, input, UXTW));
- __ prfm(op, MemOperand(x0, input, UXTW, 3));
- __ prfm(op, MemOperand(x0, input, LSL));
- __ prfm(op, MemOperand(x0, input, LSL, 3));
- __ prfm(op, MemOperand(x0, input, SXTW));
- __ prfm(op, MemOperand(x0, input, SXTW, 3));
- __ prfm(op, MemOperand(x0, input, SXTX));
- __ prfm(op, MemOperand(x0, input, SXTX, 3));
- }
- }
-
- END();
- RUN();
-}
-
TEST(add_sub_imm) {
INIT_V8();
SETUP();
diff --git a/deps/v8/test/cctest/test-code-pages.cc b/deps/v8/test/cctest/test-code-pages.cc
index d0ed8334a5..7d335f2174 100644
--- a/deps/v8/test/cctest/test-code-pages.cc
+++ b/deps/v8/test/cctest/test-code-pages.cc
@@ -70,18 +70,23 @@ bool PagesHasExactPage(std::vector<MemoryRange>* pages, Address search_page,
return it != pages->end();
}
-bool PagesContainsAddress(std::vector<MemoryRange>* pages,
- Address search_address) {
+bool PagesContainsRange(std::vector<MemoryRange>* pages, Address search_address,
+ size_t size) {
byte* addr = reinterpret_cast<byte*>(search_address);
auto it =
- std::find_if(pages->begin(), pages->end(), [addr](const MemoryRange& r) {
+ std::find_if(pages->begin(), pages->end(), [=](const MemoryRange& r) {
const byte* page_start = reinterpret_cast<const byte*>(r.start);
const byte* page_end = page_start + r.length_in_bytes;
- return addr >= page_start && addr < page_end;
+ return addr >= page_start && (addr + size) <= page_end;
});
return it != pages->end();
}
+bool PagesContainsAddress(std::vector<MemoryRange>* pages,
+ Address search_address) {
+ return PagesContainsRange(pages, search_address, 0);
+}
+
} // namespace
TEST(CodeRangeCorrectContents) {
@@ -99,8 +104,18 @@ TEST(CodeRangeCorrectContents) {
CHECK_EQ(2, pages->size());
CHECK(PagesHasExactPage(pages, code_range.begin(), code_range.size()));
CHECK(PagesHasExactPage(
- pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
- i_isolate->embedded_blob_code_size()));
+ pages, reinterpret_cast<Address>(i_isolate->CurrentEmbeddedBlobCode()),
+ i_isolate->CurrentEmbeddedBlobCodeSize()));
+ if (i_isolate->is_short_builtin_calls_enabled()) {
+ // In this case embedded blob code must be included via code_range.
+ CHECK(PagesContainsRange(
+ pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
+ i_isolate->embedded_blob_code_size()));
+ } else {
+ CHECK(PagesHasExactPage(
+ pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
+ i_isolate->embedded_blob_code_size()));
+ }
}
TEST(CodePagesCorrectContents) {
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 1ba26a81b8..67aafa8709 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -1890,13 +1890,21 @@ TEST(AllocateJSObjectFromMap) {
"object")));
JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES, 0,
"Normalize");
+ Handle<HeapObject> properties =
+ V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
+ ? Handle<HeapObject>(object->property_dictionary_swiss(), isolate)
+ : handle(object->property_dictionary(), isolate);
Handle<JSObject> result = Handle<JSObject>::cast(
- ft.Call(handle(object->map(), isolate),
- handle(object->property_dictionary(), isolate),
+ ft.Call(handle(object->map(), isolate), properties,
handle(object->elements(), isolate))
.ToHandleChecked());
CHECK_EQ(result->map(), object->map());
- CHECK_EQ(result->property_dictionary(), object->property_dictionary());
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CHECK_EQ(result->property_dictionary_swiss(),
+ object->property_dictionary_swiss());
+ } else {
+ CHECK_EQ(result->property_dictionary(), object->property_dictionary());
+ }
CHECK(!result->HasFastProperties());
#ifdef VERIFY_HEAP
isolate->heap()->Verify();
@@ -2445,6 +2453,90 @@ TEST(IsDebugActive) {
*debug_is_active = false;
}
+// Ensure that the kShortBuiltinCallsOldSpaceSizeThreshold constant can be used
+// for detecting whether the machine has >= 4GB of physical memory by checking
+// the max old space size.
+TEST(ShortBuiltinCallsThreshold) {
+ if (!V8_SHORT_BUILTIN_CALLS_BOOL) return;
+
+ const uint64_t kPhysicalMemoryThreshold = size_t{4} * GB;
+
+ size_t heap_size, old, young;
+
+ // If the physical memory is < kPhysicalMemoryThreshold then the old space
+ // size must be below the kShortBuiltinCallsOldSpaceThreshold.
+ heap_size = Heap::HeapSizeFromPhysicalMemory(kPhysicalMemoryThreshold - MB);
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young, &old);
+ CHECK_LT(old, kShortBuiltinCallsOldSpaceSizeThreshold);
+
+ // If the physical memory is >= kPhysicalMemoryThreshold then the old space
+ // size must be below the kShortBuiltinCallsOldSpaceThreshold.
+ heap_size = Heap::HeapSizeFromPhysicalMemory(kPhysicalMemoryThreshold);
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young, &old);
+ CHECK_GE(old, kShortBuiltinCallsOldSpaceSizeThreshold);
+
+ heap_size = Heap::HeapSizeFromPhysicalMemory(kPhysicalMemoryThreshold + MB);
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young, &old);
+ CHECK_GE(old, kShortBuiltinCallsOldSpaceSizeThreshold);
+}
+
+TEST(CallBuiltin) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate,
+ kNumParams + 1); // Include receiver.
+ PromiseBuiltinsAssembler m(asm_tester.state());
+
+ {
+ auto receiver = m.Parameter<Object>(1);
+ auto name = m.Parameter<Name>(2);
+ auto context = m.Parameter<Context>(kNumParams + 3);
+
+ auto value = m.CallBuiltin(Builtins::kGetProperty, context, receiver, name);
+ m.Return(value);
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Factory* factory = isolate->factory();
+ Handle<Name> name = factory->InternalizeUtf8String("a");
+ Handle<Object> value(Smi::FromInt(153), isolate);
+ Handle<JSObject> object = factory->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, object, name, value, NONE);
+
+ Handle<Object> result = ft.Call(object, name).ToHandleChecked();
+ CHECK_EQ(*value, *result);
+}
+
+TEST(TailCallBuiltin) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 2;
+ CodeAssemblerTester asm_tester(isolate,
+ kNumParams + 1); // Include receiver.
+ PromiseBuiltinsAssembler m(asm_tester.state());
+
+ {
+ auto receiver = m.Parameter<Object>(1);
+ auto name = m.Parameter<Name>(2);
+ auto context = m.Parameter<Context>(kNumParams + 3);
+
+ m.TailCallBuiltin(Builtins::kGetProperty, context, receiver, name);
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
+
+ Factory* factory = isolate->factory();
+ Handle<Name> name = factory->InternalizeUtf8String("a");
+ Handle<Object> value(Smi::FromInt(153), isolate);
+ Handle<JSObject> object = factory->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, object, name, value, NONE);
+
+ Handle<Object> result = ft.Call(object, name).ToHandleChecked();
+ CHECK_EQ(*value, *result);
+}
+
class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
public:
AppendJSArrayCodeStubAssembler(compiler::CodeAssemblerState* state,
@@ -2595,8 +2687,7 @@ TEST(IsPromiseHookEnabled) {
CodeStubAssembler m(asm_tester.state());
m.Return(
- m.SelectBooleanConstant(
- m.IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate()));
+ m.SelectBooleanConstant(m.IsPromiseHookEnabledOrHasAsyncEventDelegate()));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result =
@@ -3136,7 +3227,7 @@ TEST(DirectMemoryTest16BitWord32) {
for (size_t i = 0; i < element_count; ++i) {
for (size_t j = 0; j < element_count; ++j) {
- Node* loaded = m.LoadBufferData<Uint16T>(
+ TNode<Uint16T> loaded = m.LoadBufferData<Uint16T>(
buffer_node1, static_cast<int>(i * sizeof(int16_t)));
TNode<Word32T> masked = m.Word32And(loaded, constants[j]);
if ((buffer[j] & buffer[i]) != 0) {
@@ -3881,6 +3972,7 @@ TEST(InstructionSchedulingCallerSavedRegisters) {
FLAG_turbo_instruction_scheduling = old_turbo_instruction_scheduling;
}
+#if V8_ENABLE_WEBASSEMBLY
TEST(WasmInt32ToHeapNumber) {
Isolate* isolate(CcTest::InitIsolateOnce());
@@ -4107,6 +4199,7 @@ TEST(WasmTaggedToFloat64) {
}
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
TEST(SmiUntagLeftShiftOptimization) {
Isolate* isolate(CcTest::InitIsolateOnce());
@@ -4155,6 +4248,85 @@ TEST(SmiUntagComparisonOptimization) {
FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
}
+TEST(PopCount) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeAssemblerTester asm_tester(isolate);
+ CodeStubAssembler m(asm_tester.state());
+
+ const std::vector<std::pair<uint32_t, int>> test_cases = {
+ {0, 0},
+ {1, 1},
+ {(1 << 31), 1},
+ {0b01010101010101010101010101010101, 16},
+ {0b10101010101010101010101010101010, 16},
+ {0b11100011100000011100011111000111, 17} // arbitrarily chosen
+ };
+
+ for (std::pair<uint32_t, int> test_case : test_cases) {
+ uint32_t value32 = test_case.first;
+ uint64_t value64 = (static_cast<uint64_t>(value32) << 32) | value32;
+ int expected_pop32 = test_case.second;
+ int expected_pop64 = 2 * expected_pop32;
+
+ TNode<Int32T> pop32 = m.PopulationCount32(m.Uint32Constant(value32));
+ CSA_CHECK(&m, m.Word32Equal(pop32, m.Int32Constant(expected_pop32)));
+
+ if (m.Is64()) {
+ // TODO(emrich): enable once 64-bit operations are supported on 32-bit
+ // architectures.
+
+ TNode<Int64T> pop64 = m.PopulationCount64(m.Uint64Constant(value64));
+ CSA_CHECK(&m, m.Word64Equal(pop64, m.Int64Constant(expected_pop64)));
+ }
+ }
+ m.Return(m.UndefinedConstant());
+
+ FunctionTester ft(asm_tester.GenerateCode());
+ ft.Call();
+}
+
+TEST(CountTrailingZeros) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeAssemblerTester asm_tester(isolate);
+ CodeStubAssembler m(asm_tester.state());
+
+ const std::vector<std::pair<uint32_t, int>> test_cases = {
+ {1, 0},
+ {2, 1},
+ {(0b0101010'0000'0000), 9},
+ {(1 << 31), 31},
+ {std::numeric_limits<uint32_t>::max(), 0},
+ };
+
+ for (std::pair<uint32_t, int> test_case : test_cases) {
+ uint32_t value32 = test_case.first;
+ uint64_t value64 = static_cast<uint64_t>(value32) << 32;
+ int expected_ctz32 = test_case.second;
+ int expected_ctz64 = expected_ctz32 + 32;
+
+ TNode<Int32T> pop32 = m.CountTrailingZeros32(m.Uint32Constant(value32));
+ CSA_CHECK(&m, m.Word32Equal(pop32, m.Int32Constant(expected_ctz32)));
+
+ if (m.Is64()) {
+ // TODO(emrich): enable once 64-bit operations are supported on 32-bit
+ // architectures.
+
+ TNode<Int64T> pop64_ext =
+ m.CountTrailingZeros64(m.Uint64Constant(value32));
+ TNode<Int64T> pop64 = m.CountTrailingZeros64(m.Uint64Constant(value64));
+
+ CSA_CHECK(&m, m.Word64Equal(pop64_ext, m.Int64Constant(expected_ctz32)));
+ CSA_CHECK(&m, m.Word64Equal(pop64, m.Int64Constant(expected_ctz64)));
+ }
+ }
+ m.Return(m.UndefinedConstant());
+
+ FunctionTester ft(asm_tester.GenerateCode());
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index a74f3e6bd7..3b895d7420 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -947,9 +947,10 @@ static int AllocationSitesCount(Heap* heap) {
TEST(DecideToPretenureDuringCompilation) {
// The test makes use of optimization and relies on deterministic
// compilation.
- if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_minor_mc ||
- i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size ||
- i::FLAG_turbo_nci || i::FLAG_stress_concurrent_allocation) {
+ if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_always_sparkplug ||
+ i::FLAG_minor_mc || i::FLAG_stress_incremental_marking ||
+ i::FLAG_optimize_for_size || i::FLAG_turbo_nci ||
+ i::FLAG_stress_concurrent_allocation) {
return;
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 61ceae728f..cd1fa753ba 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -35,6 +35,7 @@
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
+#include "src/codegen/compilation-cache.h"
#include "src/codegen/source-position-table.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/spaces.h"
@@ -178,7 +179,8 @@ TEST(CodeEvents) {
v8::base::TimeDelta::FromMicroseconds(100), true);
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer.strings());
+ *code_observer.strings(),
+ *code_observer.weak_code_registry());
isolate->logger()->AddCodeEventListener(&profiler_listener);
// Enqueue code creation events.
@@ -243,7 +245,8 @@ TEST(TickEvents) {
profiles->StartProfiling("");
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer->strings());
+ *code_observer->strings(),
+ *code_observer->weak_code_registry());
isolate->logger()->AddCodeEventListener(&profiler_listener);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
@@ -404,7 +407,8 @@ TEST(Issue1398) {
profiles->StartProfiling("");
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer->strings());
+ *code_observer->strings(),
+ *code_observer->weak_code_registry());
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
@@ -558,7 +562,7 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
ProfilingMode mode, unsigned max_samples) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
- profiler_->SetSamplingInterval(100);
+ profiler_->SetSamplingInterval(50);
profiler_->StartProfiling(profile_name, {mode, max_samples, 0});
v8::internal::CpuProfiler* iprofiler =
@@ -1272,7 +1276,8 @@ static void TickLines(bool optimize) {
isolate->logger()->LogCompiledFunctions();
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor,
- *code_observer->strings());
+ *code_observer->strings(),
+ *code_observer->weak_code_registry());
// Enqueue code creation events.
i::Handle<i::String> str = factory->NewStringFromAsciiChecked(func_name);
@@ -2212,16 +2217,22 @@ TEST(FunctionDetails) {
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
reinterpret_cast<const i::CpuProfile*>(profile)->Print();
// The tree should look like this:
- // 0 (root) 0 #1
- // 0 "" 19 #2 no reason script_b:1
- // 0 baz 19 #3 TryCatchStatement script_b:3
- // 0 foo 18 #4 TryCatchStatement script_a:2
- // 1 bar 18 #5 no reason script_a:3
+ // 0 (root):0 3 0 #1
+ // 0 :0 0 5 #2 script_b:0
+ // 0 baz:3 0 5 #3 script_b:3
+ // bailed out due to 'Optimization is always disabled'
+ // 0 foo:4 0 4 #4 script_a:4
+ // bailed out due to 'Optimization is always disabled'
+ // 0 bar:5 0 4 #5 script_a:5
+ // bailed out due to 'Optimization is always disabled'
+ // 0 startProfiling:0 2 0 #6
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
CHECK_EQ(root->GetParent(), nullptr);
const v8::CpuProfileNode* script = GetChild(env, root, "");
CheckFunctionDetails(env->GetIsolate(), script, "", "script_b", true,
- script_b->GetUnboundScript()->GetId(), 1, 1, root);
+ script_b->GetUnboundScript()->GetId(),
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, root);
const v8::CpuProfileNode* baz = GetChild(env, script, "baz");
CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b", true,
script_b->GetUnboundScript()->GetId(), 3, 16, script);
@@ -2290,7 +2301,7 @@ TEST(FunctionDetailsInlining) {
// The tree should look like this:
// 0 (root) 0 #1
// 5 (program) 0 #6
- // 2 14 #2 script_a:1
+ // 2 14 #2 script_a:0
// ;;; deopted at script_id: 14 position: 299 with reason 'Insufficient
// type feedback for call'.
// 1 alpha 14 #4 script_a:1
@@ -2301,7 +2312,9 @@ TEST(FunctionDetailsInlining) {
CHECK_EQ(root->GetParent(), nullptr);
const v8::CpuProfileNode* script = GetChild(env, root, "");
CheckFunctionDetails(env->GetIsolate(), script, "", "script_a", false,
- script_a->GetUnboundScript()->GetId(), 1, 1, root);
+ script_a->GetUnboundScript()->GetId(),
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ v8::CpuProfileNode::kNoColumnNumberInfo, root);
const v8::CpuProfileNode* alpha = FindChild(env, script, "alpha");
// Return early if profiling didn't sample alpha.
if (!alpha) return;
@@ -4082,6 +4095,9 @@ TEST(BytecodeFlushEventsEagerLogging) {
FLAG_always_opt = false;
i::FLAG_optimize_for_size = false;
#endif // V8_LITE_MODE
+#if ENABLE_SPARKPLUG
+ FLAG_always_sparkplug = false;
+#endif // ENABLE_SPARKPLUG
i::FLAG_flush_bytecode = true;
i::FLAG_allow_natives_syntax = true;
@@ -4147,6 +4163,40 @@ TEST(BytecodeFlushEventsEagerLogging) {
}
}
+// Ensure that unused code entries are removed after GC with eager logging.
+TEST(ClearUnusedWithEagerLogging) {
+ ManualGCScope manual_gc;
+ TestSetup test_setup;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging);
+
+ CodeMap* code_map = profiler.code_map_for_test();
+ size_t initial_size = code_map->size();
+
+ {
+ // Create and run a new script and function, generating 2 code objects.
+ // Do this in a new context, so that some_func isn't retained by the
+ // context's global object past this scope.
+ i::HandleScope inner_scope(isolate);
+ LocalContext env;
+ CompileRun(
+ "function some_func() {}"
+ "some_func();");
+ CHECK_GT(code_map->size(), initial_size);
+ }
+
+ // Clear the compilation cache so that there are no more references to the
+ // given two functions.
+ isolate->compilation_cache()->Clear();
+
+ CcTest::CollectAllGarbage();
+
+ // Verify that the CodeMap's size is unchanged post-GC.
+ CHECK_EQ(code_map->size(), initial_size);
+}
+
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index f1c2d40027..6e3033d993 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/api/api-inl.h"
+#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
#include "src/flags/flags.h"
#include "src/heap/read-only-spaces.h"
@@ -421,9 +422,29 @@ static void FrameIterationCheck(
d::StackFrameResultPtr props = d::GetStackFrame(frame->fp(), &ReadMemory);
if (frame->is_java_script()) {
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
- CHECK_EQ(props->num_properties, 1);
+ CHECK_EQ(props->num_properties, 5);
+ auto js_function = js_frame->function();
CheckProp(*props->properties[0], "v8::internal::JSFunction",
- "currently_executing_jsfunction", js_frame->function().ptr());
+ "currently_executing_jsfunction", js_function.ptr());
+ auto shared_function_info = js_function.shared();
+ auto script = i::Script::cast(shared_function_info.script());
+ CheckProp(*props->properties[1], "v8::internal::Object", "script_name",
+ static_cast<i::Tagged_t>(script.name().ptr()));
+ CheckProp(*props->properties[2], "v8::internal::Object", "script_source",
+ static_cast<i::Tagged_t>(script.source().ptr()));
+
+ auto scope_info = shared_function_info.scope_info();
+ CheckProp(*props->properties[3], "v8::internal::Object", "function_name",
+ static_cast<i::Tagged_t>(scope_info.FunctionName().ptr()));
+
+ CheckProp(*props->properties[4], "", "function_character_offset");
+ const d::ObjectProperty& function_character_offset =
+ *props->properties[4];
+ CHECK_EQ(function_character_offset.num_struct_fields, 2);
+ CheckStructProp(*function_character_offset.struct_fields[0],
+ "v8::internal::Object", "start", 0);
+ CheckStructProp(*function_character_offset.struct_fields[1],
+ "v8::internal::Object", "end", 4);
} else {
CHECK_EQ(props->num_properties, 0);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 9ffc69b682..2723f5fa14 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -1354,6 +1354,49 @@ TEST(BreakPointApiAccessor) {
CheckDebuggerUnloaded();
}
+TEST(Regress1163547) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ DebugEventCounter delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+
+ i::Handle<i::BreakPoint> bp;
+
+ auto constructor_tmpl = v8::FunctionTemplate::New(env->GetIsolate());
+ auto prototype_tmpl = constructor_tmpl->PrototypeTemplate();
+ auto accessor_tmpl =
+ v8::FunctionTemplate::New(env->GetIsolate(), NoOpFunctionCallback);
+ prototype_tmpl->SetAccessorProperty(v8_str("f"), accessor_tmpl);
+
+ auto constructor =
+ constructor_tmpl->GetFunction(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("C"), constructor).ToChecked();
+
+ CompileRun("o = new C();");
+ v8::Local<v8::Function> function =
+ CompileRun("Object.getOwnPropertyDescriptor(C.prototype, 'f').get")
+ .As<v8::Function>();
+
+ // === Test API accessor ===
+ break_point_hit_count = 0;
+
+ // At this point, the C.prototype - which holds the "f" accessor - is in
+ // dictionary mode.
+ auto constructor_fun =
+ Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*constructor));
+ CHECK(!i::JSObject::cast(constructor_fun->prototype()).HasFastProperties());
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(function, 0);
+
+ CompileRun("o.f");
+ CHECK_EQ(1, break_point_hit_count);
+
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
TEST(BreakPointInlineApiFunction) {
i::FLAG_allow_natives_syntax = true;
LocalContext env;
@@ -4545,7 +4588,9 @@ UNINITIALIZED_TEST(LoadedAtStartupScripts) {
CHECK_EQ(count_by_type[i::Script::TYPE_NATIVE], 0);
CHECK_EQ(count_by_type[i::Script::TYPE_EXTENSION], 1);
CHECK_EQ(count_by_type[i::Script::TYPE_NORMAL], 1);
+#if V8_ENABLE_WEBASSEMBLY
CHECK_EQ(count_by_type[i::Script::TYPE_WASM], 0);
+#endif // V8_ENABLE_WEBASSEMBLY
CHECK_EQ(count_by_type[i::Script::TYPE_INSPECTOR], 0);
i::Handle<i::Script> gc_script =
@@ -5498,39 +5543,44 @@ TEST(TerminateOnResumeFromMicrotask) {
class FutexInterruptionThread : public v8::base::Thread {
public:
- FutexInterruptionThread(v8::Isolate* isolate, v8::base::Semaphore* sem)
+ FutexInterruptionThread(v8::Isolate* isolate, v8::base::Semaphore* enter,
+ v8::base::Semaphore* exit)
: Thread(Options("FutexInterruptionThread")),
isolate_(isolate),
- sem_(sem) {}
+ enter_(enter),
+ exit_(exit) {}
void Run() override {
- // Wait a bit before terminating.
- v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
- sem_->Wait();
+ enter_->Wait();
v8::debug::SetTerminateOnResume(isolate_);
+ exit_->Signal();
}
private:
v8::Isolate* isolate_;
- v8::base::Semaphore* sem_;
+ v8::base::Semaphore* enter_;
+ v8::base::Semaphore* exit_;
};
namespace {
class SemaphoreTriggerOnBreak : public v8::debug::DebugDelegate {
public:
- SemaphoreTriggerOnBreak() : sem_(0) {}
+ SemaphoreTriggerOnBreak() : enter_(0), exit_(0) {}
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
const std::vector<v8::debug::BreakpointId>&
inspector_break_points_hit) override {
break_count_++;
- sem_.Signal();
+ enter_.Signal();
+ exit_.Wait();
}
- v8::base::Semaphore* semaphore() { return &sem_; }
+ v8::base::Semaphore* enter() { return &enter_; }
+ v8::base::Semaphore* exit() { return &exit_; }
int break_count() const { return break_count_; }
private:
- v8::base::Semaphore sem_;
+ v8::base::Semaphore enter_;
+ v8::base::Semaphore exit_;
int break_count_ = 0;
};
} // anonymous namespace
@@ -5543,8 +5593,8 @@ TEST(TerminateOnResumeFromOtherThread) {
SemaphoreTriggerOnBreak delegate;
v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
- FutexInterruptionThread timeout_thread(env->GetIsolate(),
- delegate.semaphore());
+ FutexInterruptionThread timeout_thread(env->GetIsolate(), delegate.enter(),
+ delegate.exit());
CHECK(timeout_thread.Start());
v8::Local<v8::Context> context = env.local();
@@ -5575,7 +5625,7 @@ namespace {
class InterruptionBreakRightNow : public v8::base::Thread {
public:
explicit InterruptionBreakRightNow(v8::Isolate* isolate)
- : Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
+ : Thread(Options("InterruptionBreakRightNow")), isolate_(isolate) {}
void Run() override {
// Wait a bit before terminating.
diff --git a/deps/v8/test/cctest/test-descriptor-array.cc b/deps/v8/test/cctest/test-descriptor-array.cc
index a8b9e95952..1d933d2190 100644
--- a/deps/v8/test/cctest/test-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-descriptor-array.cc
@@ -56,7 +56,7 @@ void CheckDescriptorArrayLookups(Isolate* isolate, Handle<Map> map,
// Test C++ implementation.
{
DisallowGarbageCollection no_gc;
- DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map->instance_descriptors(isolate);
DCHECK(descriptors.IsSortedNoDuplicates());
int nof_descriptors = descriptors.number_of_descriptors();
@@ -91,8 +91,8 @@ void CheckTransitionArrayLookups(Isolate* isolate,
for (size_t i = 0; i < maps.size(); ++i) {
Map expected_map = *maps[i];
- Name name = expected_map.instance_descriptors(kRelaxedLoad)
- .GetKey(expected_map.LastAdded());
+ Name name = expected_map.instance_descriptors(isolate).GetKey(
+ expected_map.LastAdded());
Map map = transitions->SearchAndGetTargetForTesting(PropertyKind::kData,
name, NONE);
@@ -105,8 +105,8 @@ void CheckTransitionArrayLookups(Isolate* isolate,
if (!FLAG_jitless) {
for (size_t i = 0; i < maps.size(); ++i) {
Handle<Map> expected_map = maps[i];
- Handle<Name> name(expected_map->instance_descriptors(kRelaxedLoad)
- .GetKey(expected_map->LastAdded()),
+ Handle<Name> name(expected_map->instance_descriptors(isolate).GetKey(
+ expected_map->LastAdded()),
isolate);
Handle<Object> transition_map =
@@ -260,7 +260,7 @@ TEST(DescriptorArrayHashCollisionMassive) {
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
- map->instance_descriptors(kRelaxedLoad).Sort();
+ map->instance_descriptors(isolate).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
@@ -309,7 +309,7 @@ TEST(DescriptorArrayHashCollision) {
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
- map->instance_descriptors(kRelaxedLoad).Sort();
+ map->instance_descriptors(isolate).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 441ae53f32..551488ab21 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -1518,24 +1518,6 @@ TEST_(load_literal) {
CLEANUP();
}
-TEST(prfm_regoffset) {
- SET_UP_ASM();
-
- COMPARE(prfm(PLIL1KEEP, MemOperand(x1, x2)), "prfm plil1keep, [x1, x2]");
- COMPARE(prfm(PLIL1STRM, MemOperand(x3, w4, SXTW)),
- "prfm plil1strm, [x3, w4, sxtw]");
- COMPARE(prfm(PLIL2KEEP, MemOperand(x5, x6, LSL, 3)),
- "prfm plil2keep, [x5, x6, lsl #3]");
-
- COMPARE(prfm(PLIL2STRM, MemOperand(sp, xzr)), "prfm plil2strm, [sp, xzr]");
- COMPARE(prfm(PLIL3KEEP, MemOperand(sp, wzr, SXTW)),
- "prfm plil3keep, [sp, wzr, sxtw]");
- COMPARE(prfm(PLIL3STRM, MemOperand(sp, xzr, LSL, 3)),
- "prfm plil3strm, [sp, xzr, lsl #3]");
-
- CLEANUP();
-}
-
TEST_(cond_select) {
SET_UP_ASM();
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index b9a6d1a7c5..f387fd2953 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -14,7 +14,10 @@
#include "src/init/v8.h"
#include "src/objects/field-type.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/internal-index.h"
+#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-details.h"
#include "src/objects/property.h"
#include "src/objects/struct-inl.h"
#include "src/objects/transitions.h"
@@ -275,7 +278,7 @@ class Expectations {
CHECK_EQ(expected_nof, map.NumberOfOwnDescriptors());
CHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray descriptors = map.instance_descriptors();
CHECK(expected_nof <= number_of_properties_);
for (InternalIndex i : InternalIndex::Range(expected_nof)) {
if (!Check(descriptors, i)) {
@@ -444,8 +447,9 @@ class Expectations {
Handle<Object> getter(pair->getter(), isolate);
Handle<Object> setter(pair->setter(), isolate);
- InternalIndex descriptor = map->instance_descriptors(kRelaxedLoad)
- .SearchWithCache(isolate, *name, *map);
+ InternalIndex descriptor =
+ map->instance_descriptors(isolate).SearchWithCache(isolate, *name,
+ *map);
map = Map::TransitionToAccessorProperty(isolate, map, name, descriptor,
getter, setter, attributes);
CHECK(!map->is_deprecated());
@@ -460,6 +464,23 @@ class Expectations {
// branch.
//
+namespace {
+
+Handle<Map> ReconfigureProperty(Isolate* isolate, Handle<Map> map,
+ InternalIndex modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ DCHECK_EQ(kData, new_kind); // Only kData case is supported.
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureToDataField(modify_index, new_attributes,
+ PropertyConstness::kConst,
+ new_representation, new_field_type);
+}
+
+} // namespace
+
TEST(ReconfigureAccessorToNonExistingDataField) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -481,8 +502,8 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CHECK(expectations.Check(*map));
InternalIndex first(0);
- Handle<Map> new_map = Map::ReconfigureProperty(
- isolate, map, first, kData, NONE, Representation::None(), none_type);
+ Handle<Map> new_map = ReconfigureProperty(isolate, map, first, kData, NONE,
+ Representation::None(), none_type);
// |map| did not change except marked unstable.
CHECK(!map->is_deprecated());
CHECK(!map->is_stable());
@@ -496,8 +517,8 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CHECK(new_map->is_stable());
CHECK(expectations.Check(*new_map));
- Handle<Map> new_map2 = Map::ReconfigureProperty(
- isolate, map, first, kData, NONE, Representation::None(), none_type);
+ Handle<Map> new_map2 = ReconfigureProperty(isolate, map, first, kData, NONE,
+ Representation::None(), none_type);
CHECK_EQ(*new_map, *new_map2);
Handle<Object> value(Smi::zero(), isolate);
@@ -553,7 +574,7 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
InternalIndex first(0);
CHECK(obj->map()
- .instance_descriptors(kRelaxedLoad)
+ .instance_descriptors(isolate)
.GetStrongValue(first)
.IsAccessorPair());
@@ -669,7 +690,7 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
CHECK(expectations.Check(*map));
if (is_detached_map) {
- detach_point_map = Map::ReconfigureProperty(
+ detach_point_map = ReconfigureProperty(
isolate, detach_point_map, InternalIndex(detach_property_at_index),
kData, NONE, Representation::Double(), any_type);
expectations.SetDataField(detach_property_at_index,
@@ -702,8 +723,8 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
// Create new maps by generalizing representation of propX field.
Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(property_index),
- kData, NONE, to.representation, to.type);
+ ReconfigureProperty(isolate, map, InternalIndex(property_index), kData,
+ NONE, to.representation, to.type);
expectations.SetDataField(property_index, expected.constness,
expected.representation, expected.type);
@@ -977,8 +998,8 @@ TEST(GeneralizeFieldWithAccessorProperties) {
continue;
}
Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
- Representation::Double(), any_type);
+ ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
+ Representation::Double(), any_type);
maps[i] = new_map;
expectations.SetDataField(i, PropertyConstness::kMutable,
@@ -1103,16 +1124,7 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
CHECK(!map2->is_stable());
CHECK(!map2->is_deprecated());
CHECK_NE(*map2, *new_map);
- // If the "source" property was const then update constness expectations for
- // "source" map and ensure the deoptimization dependency was triggered.
- if (to.constness == PropertyConstness::kConst) {
- expectations2.SetDataField(kSplitProp, READ_ONLY,
- PropertyConstness::kMutable, to.representation,
- to.type);
- CHECK(code_src_field_const->marked_for_deoptimization());
- } else {
- CHECK(!code_src_field_const->marked_for_deoptimization());
- }
+ CHECK(!code_src_field_const->marked_for_deoptimization());
CHECK(expectations2.Check(*map2));
for (int i = kSplitProp; i < kPropCount; i++) {
@@ -1797,7 +1809,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldInPlace(
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
Handle<Map> new_map =
- Map::ReconfigureElementsKind(isolate, map2, PACKED_ELEMENTS);
+ MapUpdater{isolate, map2}.ReconfigureElementsKind(PACKED_ELEMENTS);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -2063,9 +2075,8 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
map2 = handle(target, isolate);
}
- map2 = Map::ReconfigureProperty(isolate, map2, InternalIndex(kSplitProp),
- kData, NONE, Representation::Double(),
- any_type);
+ map2 = ReconfigureProperty(isolate, map2, InternalIndex(kSplitProp), kData,
+ NONE, Representation::Double(), any_type);
expectations.SetDataField(kSplitProp, PropertyConstness::kMutable,
Representation::Double(), any_type);
@@ -2162,8 +2173,8 @@ static void TestGeneralizeFieldWithSpecialTransition(
Handle<Map> maps[kPropCount];
for (int i = 0; i < kPropCount; i++) {
Handle<Map> new_map =
- Map::ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
- to.representation, to.type);
+ ReconfigureProperty(isolate, map, InternalIndex(i), kData, NONE,
+ to.representation, to.type);
maps[i] = new_map;
expectations.SetDataField(i, expected.constness, expected.representation,
@@ -2829,13 +2840,12 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
InternalIndex first(0);
- CHECK(map->instance_descriptors(kRelaxedLoad)
+ CHECK(map->instance_descriptors(isolate)
.GetDetails(first)
.representation()
.Equals(expected_rep));
- CHECK_EQ(
- PropertyConstness::kConst,
- map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
+ CHECK_EQ(PropertyConstness::kConst,
+ map->instance_descriptors(isolate).GetDetails(first).constness());
// Store value2 to obj2 and check that it got same map and property details
// did not change.
@@ -2847,13 +2857,12 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors(kRelaxedLoad)
+ CHECK(map->instance_descriptors(isolate)
.GetDetails(first)
.representation()
.Equals(expected_rep));
- CHECK_EQ(
- PropertyConstness::kConst,
- map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
+ CHECK_EQ(PropertyConstness::kConst,
+ map->instance_descriptors(isolate).GetDetails(first).constness());
// Store value2 to obj1 and check that property became mutable.
Call(isolate, store_func, obj1, value2).Check();
@@ -2863,13 +2872,12 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors(kRelaxedLoad)
+ CHECK(map->instance_descriptors(isolate)
.GetDetails(first)
.representation()
.Equals(expected_rep));
- CHECK_EQ(
- expected_constness,
- map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
+ CHECK_EQ(expected_constness,
+ map->instance_descriptors(isolate).GetDetails(first).constness());
}
void TestStoreToConstantField_PlusMinusZero(const char* store_func_source,
@@ -3030,6 +3038,122 @@ TEST(RepresentationPredicatesAreInSync) {
}
}
+TEST(DeletePropertyGeneralizesConstness) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+
+ // Create a map with some properties.
+ Handle<Map> initial_map = Map::Create(isolate, kPropCount + 3);
+ Handle<Map> map = initial_map;
+ for (int i = 0; i < kPropCount; i++) {
+ Handle<String> name = CcTest::MakeName("prop", i);
+ map = Map::CopyWithField(isolate, map, name, any_type, NONE,
+ PropertyConstness::kConst, Representation::Smi(),
+ INSERT_TRANSITION)
+ .ToHandleChecked();
+ }
+ Handle<Map> parent_map = map;
+ CHECK(!map->is_deprecated());
+
+ Handle<String> name_x = CcTest::MakeString("x");
+ Handle<String> name_y = CcTest::MakeString("y");
+
+ map = Map::CopyWithField(isolate, parent_map, name_x, any_type, NONE,
+ PropertyConstness::kConst, Representation::Smi(),
+ INSERT_TRANSITION)
+ .ToHandleChecked();
+
+ // Create an object, initialize its properties and add a couple of clones.
+ Handle<JSObject> object1 = isolate->factory()->NewJSObjectFromMap(map);
+ for (int i = 0; i < kPropCount; i++) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, InternalIndex(i));
+ object1->FastPropertyAtPut(index, Smi::FromInt(i));
+ }
+ Handle<JSObject> object2 = isolate->factory()->CopyJSObject(object1);
+
+ CHECK(!map->is_deprecated());
+ CHECK(!parent_map->is_deprecated());
+
+ // Transition to Double must deprecate m1.
+ CHECK(!Representation::Smi().CanBeInPlaceChangedTo(Representation::Double()));
+
+ // Reconfigure one of the first properties to make the whole transition tree
+ // deprecated (including |parent_map| and |map|).
+ Handle<Map> new_map =
+ ReconfigureProperty(isolate, map, InternalIndex(0), PropertyKind::kData,
+ NONE, Representation::Double(), any_type);
+ CHECK(map->is_deprecated());
+ CHECK(parent_map->is_deprecated());
+ CHECK(!new_map->is_deprecated());
+ // The "x" property is still kConst.
+ CHECK_EQ(new_map->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kConst);
+
+ Handle<Map> new_parent_map = Map::Update(isolate, parent_map);
+ CHECK(!new_parent_map->is_deprecated());
+
+ // |new_parent_map| must have exactly one outgoing transition to |new_map|.
+ {
+ TransitionsAccessor ta(isolate, new_parent_map);
+ CHECK_EQ(ta.NumberOfTransitions(), 1);
+ CHECK_EQ(ta.GetTarget(0), *new_map);
+ }
+
+ // Deletion of the property from |object1| must migrate it to |new_parent_map|
+ // which is an up-to-date version of the |parent_map|. The |new_map|'s "x"
+ // property should be marked as mutable.
+ CHECK_EQ(object1->map(isolate), *map);
+ CHECK(Runtime::DeleteObjectProperty(isolate, object1, name_x,
+ LanguageMode::kSloppy)
+ .ToChecked());
+ CHECK_EQ(object1->map(isolate), *new_parent_map);
+ CHECK_EQ(new_map->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kMutable);
+
+ // Now add transitions to "x" and "y" properties from |new_parent_map|.
+ std::vector<Handle<Map>> transitions;
+ Handle<Object> value = handle(Smi::FromInt(0), isolate);
+ for (int i = 0; i < kPropertyAttributesCombinationsCount; i++) {
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
+
+ Handle<Map> tmp;
+ // Add some transitions to "x" and "y".
+ tmp = Map::TransitionToDataProperty(isolate, new_parent_map, name_x, value,
+ attributes, PropertyConstness::kConst,
+ StoreOrigin::kNamed);
+ CHECK(!tmp->map(isolate).is_dictionary_map());
+ transitions.push_back(tmp);
+
+ tmp = Map::TransitionToDataProperty(isolate, new_parent_map, name_y, value,
+ attributes, PropertyConstness::kConst,
+ StoreOrigin::kNamed);
+ CHECK(!tmp->map(isolate).is_dictionary_map());
+ transitions.push_back(tmp);
+ }
+
+ // Deletion of the property from |object2| must migrate it to |new_parent_map|
+ // which is an up-to-date version of the |parent_map|.
+ // All outgoing transitions from |new_map| that add "x" must be marked as
+ // mutable, transitions to other properties must remain const.
+ CHECK_EQ(object2->map(isolate), *map);
+ CHECK(Runtime::DeleteObjectProperty(isolate, object2, name_x,
+ LanguageMode::kSloppy)
+ .ToChecked());
+ CHECK_EQ(object2->map(isolate), *new_parent_map);
+ for (Handle<Map> m : transitions) {
+ if (m->GetLastDescriptorName(isolate) == *name_x) {
+ CHECK_EQ(m->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kMutable);
+
+ } else {
+ CHECK_EQ(m->GetLastDescriptorDetails(isolate).constness(),
+ PropertyConstness::kConst);
+ }
+ }
+}
+
} // namespace test_field_type_tracking
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 9112dc7a57..fe04cc1085 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -202,9 +202,11 @@ TEST(FlagsJitlessImplications) {
// fairly primitive and can break easily depending on the implication
// definition order in flag-definitions.h.
CHECK(!FLAG_opt);
+#if V8_ENABLE_WEBASSEMBLY
CHECK(!FLAG_validate_asm);
CHECK(!FLAG_asm_wasm_lazy_compilation);
CHECK(!FLAG_wasm_lazy_compilation);
+#endif // V8_ENABLE_WEBASSEMBLY
}
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 7440ea8823..2331c61bc6 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -80,7 +80,8 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
// Obtain SharedFunctionInfo for the function.
Handle<SharedFunctionInfo> shared_func_info =
Handle<SharedFunctionInfo>::cast(
- isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos));
+ isolate->debug()->FindInnermostContainingFunctionInfo(i_script,
+ func_pos));
// Verify inferred function name.
std::unique_ptr<char[]> inferred_name =
diff --git a/deps/v8/test/cctest/test-hashcode.cc b/deps/v8/test/cctest/test-hashcode.cc
index cf9d477ff9..c138ba04a2 100644
--- a/deps/v8/test/cctest/test-hashcode.cc
+++ b/deps/v8/test/cctest/test-hashcode.cc
@@ -29,24 +29,24 @@ int AddToSetAndGetHash(Isolate* isolate, Handle<JSObject> obj,
}
int GetPropertyDictionaryHash(Handle<JSObject> obj) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- return obj->property_dictionary_ordered().Hash();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ return obj->property_dictionary_swiss().Hash();
} else {
return obj->property_dictionary().Hash();
}
}
int GetPropertyDictionaryLength(Handle<JSObject> obj) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- return obj->property_dictionary_ordered().length();
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ return obj->property_dictionary_swiss().Capacity();
} else {
return obj->property_dictionary().length();
}
}
void CheckIsDictionaryModeObject(Handle<JSObject> obj) {
- if (V8_DICT_MODE_PROTOTYPES_BOOL) {
- CHECK(obj->raw_properties_or_hash().IsOrderedNameDictionary());
+ if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
+ CHECK(obj->raw_properties_or_hash().IsSwissNameDictionary());
} else {
CHECK(obj->raw_properties_or_hash().IsNameDictionary());
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index ed02cd1e37..d84e817051 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -459,12 +459,13 @@ TEST(HeapSnapshotCodeObjects) {
// Verify that non-compiled function doesn't contain references to "x"
// literal, while compiled function does. The scope info is stored in
- // FixedArray objects attached to the SharedFunctionInfo.
+ // ScopeInfo objects attached to the SharedFunctionInfo.
bool compiled_references_x = false, lazy_references_x = false;
for (int i = 0, count = compiled_sfi->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = compiled_sfi->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::kArray) {
+ if (node->GetType() == v8::HeapGraphNode::kHidden &&
+ !strcmp("system / ScopeInfo", GetName(node))) {
if (HasString(env->GetIsolate(), node, "x")) {
compiled_references_x = true;
break;
@@ -474,7 +475,8 @@ TEST(HeapSnapshotCodeObjects) {
for (int i = 0, count = lazy_sfi->GetChildrenCount(); i < count; ++i) {
const v8::HeapGraphEdge* prop = lazy_sfi->GetChild(i);
const v8::HeapGraphNode* node = prop->GetToNode();
- if (node->GetType() == v8::HeapGraphNode::kArray) {
+ if (node->GetType() == v8::HeapGraphNode::kHidden &&
+ !strcmp("system / ScopeInfo", GetName(node))) {
if (HasString(env->GetIsolate(), node, "x")) {
lazy_references_x = true;
break;
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index 13c94f3afc..529701c227 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -6,10 +6,13 @@
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
#include "src/handles/handles-inl.h"
-#include "src/wasm/code-space-access.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/code-space-access.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace test_icache {
@@ -170,6 +173,7 @@ CONDITIONAL_TEST(TestFlushICacheOfExecutable) {
#undef CONDITIONAL_TEST
+#if V8_ENABLE_WEBASSEMBLY
// Order of operation for this test case:
// perm(RWX) -> exec -> patch -> flush -> exec
TEST(TestFlushICacheOfWritableAndExecutable) {
@@ -197,6 +201,7 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
#undef __
diff --git a/deps/v8/test/cctest/test-js-to-wasm.cc b/deps/v8/test/cctest/test-js-to-wasm.cc
index 4d61e944dc..d0f5122a32 100644
--- a/deps/v8/test/cctest/test-js-to-wasm.cc
+++ b/deps/v8/test/cctest/test-js-to-wasm.cc
@@ -68,6 +68,9 @@ struct ExportedFunction {
DECLARE_EXPORTED_FUNCTION(nop, sigs.v_v(), WASM_CODE({WASM_NOP}))
+DECLARE_EXPORTED_FUNCTION(unreachable, sigs.v_v(),
+ WASM_CODE({WASM_UNREACHABLE}))
+
DECLARE_EXPORTED_FUNCTION(i32_square, sigs.i_i(),
WASM_CODE({WASM_LOCAL_GET(0), WASM_LOCAL_GET(0),
kExprI32Mul}))
@@ -457,9 +460,9 @@ class FastJSWasmCallTester {
";"
"function test() {"
" try {"
- " return " +
+ " return %ObserveNode(" +
exported_function_name +
- "(arg);"
+ "(arg));"
" } catch (e) {"
" return 0;"
" }"
@@ -485,13 +488,19 @@ class FastJSWasmCallTester {
// Executes a test function with a try/catch calling a Wasm function returning
// void.
- void CallAndCheckWithTryCatch_void(const std::string& exported_function_name,
- const v8::Local<v8::Value> arg0,
- const v8::Local<v8::Value> arg1) {
+ void CallAndCheckWithTryCatch_void(
+ const std::string& exported_function_name,
+ const std::vector<v8::Local<v8::Value>>& args) {
LocalContext env;
- CHECK((*env)->Global()->Set(env.local(), v8_str("arg0"), arg0).FromJust());
- CHECK((*env)->Global()->Set(env.local(), v8_str("arg1"), arg1).FromJust());
+ for (size_t i = 0; i < args.size(); i++) {
+ CHECK((*env)
+ ->Global()
+ ->Set(env.local(), v8_str(("arg" + std::to_string(i)).c_str()),
+ args[i])
+ .FromJust());
+ }
+ std::string js_args = ArgsToString(args.size());
std::string js_code =
"const importObj = {"
" env: {"
@@ -509,9 +518,9 @@ class FastJSWasmCallTester {
";"
"function test() {"
" try {"
- " " +
- exported_function_name +
- "(arg0, arg1);"
+ " %ObserveNode(" +
+ exported_function_name + "(" + js_args +
+ "));"
" return 1;"
" } catch (e) {"
" return 0;"
@@ -928,6 +937,13 @@ TEST(TestFastJSWasmCall_EagerDeopt) {
// Exception handling tests
+TEST(TestFastJSWasmCall_Unreachable) {
+ v8::HandleScope scope(CcTest::isolate());
+ FastJSWasmCallTester tester;
+ tester.AddExportedFunction(k_unreachable);
+ tester.CallAndCheckWithTryCatch_void("unreachable", {});
+}
+
TEST(TestFastJSWasmCall_Trap_i32) {
v8::HandleScope scope(CcTest::isolate());
FastJSWasmCallTester tester;
@@ -960,8 +976,8 @@ TEST(TestFastJSWasmCall_Trap_void) {
v8::HandleScope scope(CcTest::isolate());
FastJSWasmCallTester tester;
tester.AddExportedFunction(k_store_i32);
- tester.CallAndCheckWithTryCatch_void("store_i32", v8_int(0x7fffffff),
- v8_int(42));
+ tester.CallAndCheckWithTryCatch_void("store_i32",
+ {v8_int(0x7fffffff), v8_int(42)});
}
// BigInt
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 7838e8ae25..1291283515 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -209,6 +209,7 @@ Handle<JSWeakRef> MakeWeakRefAndKeepDuringJob(Isolate* isolate) {
} // namespace
TEST(TestRegister) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -246,6 +247,7 @@ TEST(TestRegister) {
}
TEST(TestRegisterWithKey) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -298,6 +300,7 @@ TEST(TestRegisterWithKey) {
}
TEST(TestWeakCellNullify1) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -332,6 +335,7 @@ TEST(TestWeakCellNullify1) {
}
TEST(TestWeakCellNullify2) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -365,6 +369,7 @@ TEST(TestWeakCellNullify2) {
}
TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -420,6 +425,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings1) {
TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
// Test that when all WeakCells for a key are popped, the key is removed from
// the key map.
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -470,6 +476,7 @@ TEST(TestJSFinalizationRegistryPopClearedCellHoldings2) {
}
TEST(TestUnregisterActiveCells) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -522,6 +529,7 @@ TEST(TestUnregisterActiveCells) {
}
TEST(TestUnregisterActiveAndClearedCells) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -577,6 +585,7 @@ TEST(TestUnregisterActiveAndClearedCells) {
}
TEST(TestWeakCellUnregisterTwice) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -624,6 +633,7 @@ TEST(TestWeakCellUnregisterTwice) {
}
TEST(TestWeakCellUnregisterPopped) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -664,6 +674,7 @@ TEST(TestWeakCellUnregisterPopped) {
}
TEST(TestWeakCellUnregisterNonexistentKey) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -676,6 +687,7 @@ TEST(TestWeakCellUnregisterNonexistentKey) {
}
TEST(TestJSWeakRef) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
@@ -704,6 +716,7 @@ TEST(TestJSWeakRef) {
}
TEST(TestJSWeakRefIncrementalMarking) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking) {
return;
}
@@ -739,6 +752,7 @@ TEST(TestJSWeakRefIncrementalMarking) {
}
TEST(TestJSWeakRefKeepDuringJob) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
@@ -776,6 +790,7 @@ TEST(TestJSWeakRefKeepDuringJob) {
}
TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking) {
return;
}
@@ -804,6 +819,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
}
TEST(TestRemoveUnregisterToken) {
+ FLAG_harmony_weak_refs = true;
CcTest::InitializeVM();
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -867,6 +883,7 @@ TEST(TestRemoveUnregisterToken) {
}
TEST(JSWeakRefScavengedInWorklist) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking || FLAG_single_generation) {
return;
}
@@ -911,6 +928,7 @@ TEST(JSWeakRefScavengedInWorklist) {
}
TEST(JSWeakRefTenuredInWorklist) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking || FLAG_single_generation) {
return;
}
@@ -958,6 +976,7 @@ TEST(JSWeakRefTenuredInWorklist) {
}
TEST(UnregisterTokenHeapVerifier) {
+ FLAG_harmony_weak_refs = true;
if (!FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
#ifdef VERIFY_HEAP
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index fd1f91a8eb..d1f88877cd 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -457,8 +457,10 @@ UNINITIALIZED_TEST(Issue539892) {
void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
const char* name, int length) override {}
+#if V8_ENABLE_WEBASSEMBLY
void LogRecordedBuffer(const i::wasm::WasmCode* code, const char* name,
int length) override {}
+#endif // V8_ENABLE_WEBASSEMBLY
};
SETUP_FLAGS();
@@ -577,8 +579,9 @@ UNINITIALIZED_TEST(LogInterpretedFramesNativeStack) {
logger.StopLogging();
- CHECK(logger.ContainsLine(
- {"InterpretedFunction", "testLogInterpretedFramesNativeStack"}));
+ CHECK(logger.ContainsLinesInOrder(
+ {{"LazyCompile", "testLogInterpretedFramesNativeStack"},
+ {"LazyCompile", "testLogInterpretedFramesNativeStack"}}));
}
isolate->Dispose();
}
@@ -629,7 +632,11 @@ UNINITIALIZED_TEST(LogInterpretedFramesNativeStackWithSerialization) {
.ToLocalChecked();
if (has_cache) {
logger.StopLogging();
- CHECK(logger.ContainsLine({"InterpretedFunction", "eyecatcher"}));
+ logger.PrintLog();
+ // Function is logged twice: once as interpreted, and once as the
+ // interpreter entry trampoline builtin.
+ CHECK(logger.ContainsLinesInOrder(
+ {{"Function", "eyecatcher"}, {"Function", "eyecatcher"}}));
}
v8::Local<v8::Value> arg = v8_num(3);
v8::Local<v8::Value> result =
@@ -667,13 +674,16 @@ UNINITIALIZED_TEST(ExternalCodeEventListener) {
"testCodeEventListenerBeforeStart('1', 1);";
CompileRun(source_text_before_start);
+ CHECK_EQ(code_event_handler.CountLines("Function",
+ "testCodeEventListenerBeforeStart"),
+ 0);
CHECK_EQ(code_event_handler.CountLines("LazyCompile",
"testCodeEventListenerBeforeStart"),
0);
code_event_handler.Enable();
- CHECK_GE(code_event_handler.CountLines("LazyCompile",
+ CHECK_GE(code_event_handler.CountLines("Function",
"testCodeEventListenerBeforeStart"),
1);
@@ -715,10 +725,12 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(isolate1, &source)
.ToLocalChecked();
- CHECK_EQ(code_event_handler.CountLines("Script", "f1"),
- i::FLAG_stress_background_compile ? 2 : 1);
- CHECK_EQ(code_event_handler.CountLines("Script", "f2"),
- i::FLAG_stress_background_compile ? 2 : 1);
+ CHECK_EQ(code_event_handler.CountLines("Function", "f1"),
+ 1 + (i::FLAG_stress_background_compile ? 1 : 0) +
+ (i::FLAG_always_sparkplug ? 1 : 0));
+ CHECK_EQ(code_event_handler.CountLines("Function", "f2"),
+ 1 + (i::FLAG_stress_background_compile ? 1 : 0) +
+ (i::FLAG_always_sparkplug ? 1 : 0));
cache = v8::ScriptCompiler::CreateCodeCache(script);
}
isolate1->Dispose();
@@ -743,8 +755,8 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerInnerFunctions) {
isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache)
.ToLocalChecked();
}
- CHECK_EQ(code_event_handler.CountLines("Script", "f1"), 1);
- CHECK_EQ(code_event_handler.CountLines("Script", "f2"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Function", "f1"), 1);
+ CHECK_EQ(code_event_handler.CountLines("Function", "f2"), 1);
}
isolate2->Dispose();
}
@@ -772,24 +784,24 @@ UNINITIALIZED_TEST(ExternalCodeEventListenerWithInterpretedFramesNativeStack) {
"testCodeEventListenerBeforeStart('1', 1);";
CompileRun(source_text_before_start);
- CHECK_EQ(code_event_handler.CountLines("InterpretedFunction",
+ CHECK_EQ(code_event_handler.CountLines("Function",
"testCodeEventListenerBeforeStart"),
0);
code_event_handler.Enable();
- CHECK_GE(code_event_handler.CountLines("InterpretedFunction",
+ CHECK_GE(code_event_handler.CountLines("Function",
"testCodeEventListenerBeforeStart"),
- 1);
+ 2);
const char* source_text_after_start =
"function testCodeEventListenerAfterStart(a,b) { return a + b };"
"testCodeEventListenerAfterStart('1', 1);";
CompileRun(source_text_after_start);
- CHECK_GE(code_event_handler.CountLines("InterpretedFunction",
+ CHECK_GE(code_event_handler.CountLines("LazyCompile",
"testCodeEventListenerAfterStart"),
- 1);
+ 2);
CHECK_EQ(
code_event_handler.CountLines("Builtin", "InterpreterEntryTrampoline"),
@@ -1192,101 +1204,3 @@ UNINITIALIZED_TEST(BuiltinsNotLoggedAsLazyCompile) {
}
isolate->Dispose();
}
-
-TEST(BytecodeFlushEvents) {
- SETUP_FLAGS();
-
-#ifndef V8_LITE_MODE
- i::FLAG_opt = false;
- i::FLAG_always_opt = false;
- i::FLAG_optimize_for_size = false;
-#endif // V8_LITE_MODE
- i::FLAG_flush_bytecode = true;
- i::FLAG_allow_natives_syntax = true;
-
- ManualGCScope manual_gc_scope;
-
- v8::Isolate* isolate = CcTest::isolate();
- i::Isolate* i_isolate = CcTest::i_isolate();
- i::Factory* factory = i_isolate->factory();
-
- struct FakeCodeEventLogger : public i::CodeEventLogger {
- explicit FakeCodeEventLogger(i::Isolate* isolate)
- : CodeEventLogger(isolate) {}
-
- void CodeMoveEvent(i::AbstractCode from, i::AbstractCode to) override {}
- void CodeDisableOptEvent(i::Handle<i::AbstractCode> code,
- i::Handle<i::SharedFunctionInfo> shared) override {
- }
-
- void BytecodeFlushEvent(Address compiled_data_start) override {
- // We only expect a single flush.
- CHECK_EQ(flushed_compiled_data_start, i::kNullAddress);
- flushed_compiled_data_start = compiled_data_start;
- }
-
- void LogRecordedBuffer(i::Handle<i::AbstractCode> code,
- i::MaybeHandle<i::SharedFunctionInfo> maybe_shared,
- const char* name, int length) override {}
- void LogRecordedBuffer(const i::wasm::WasmCode* code, const char* name,
- int length) override {}
-
- i::Address flushed_compiled_data_start = i::kNullAddress;
- };
-
- FakeCodeEventLogger code_event_logger(i_isolate);
-
- {
- ScopedLoggerInitializer logger(isolate);
- logger.logger()->AddCodeEventListener(&code_event_logger);
-
- const char* source =
- "function foo() {"
- " var x = 42;"
- " var y = 42;"
- " var z = x + y;"
- "};"
- "foo()";
- i::Handle<i::String> foo_name = factory->InternalizeUtf8String("foo");
-
- // This compile will add the code to the compilation cache.
- {
- v8::HandleScope scope(isolate);
- CompileRun(source);
- }
-
- // Check function is compiled.
- i::Handle<i::Object> func_value =
- i::Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
- .ToHandleChecked();
- CHECK(func_value->IsJSFunction());
- i::Handle<i::JSFunction> function =
- i::Handle<i::JSFunction>::cast(func_value);
- CHECK(function->shared().is_compiled());
-
- // The code will survive at least two GCs.
- CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage();
- CHECK(function->shared().is_compiled());
- CHECK_EQ(code_event_logger.flushed_compiled_data_start, i::kNullAddress);
-
- // Get the start address of the compiled data before flushing.
- i::HeapObject compiled_data =
- function->shared().GetBytecodeArray(i_isolate);
- i::Address compiled_data_start = compiled_data.address();
-
- // Simulate several GCs that use full marking.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- CcTest::CollectAllGarbage();
- }
-
- // foo should no longer be in the compilation cache
- CHECK(!function->shared().is_compiled());
- CHECK(!function->is_compiled());
-
- // Verify that foo() was in fact flushed.
- CHECK_EQ(code_event_logger.flushed_compiled_data_start,
- compiled_data_start);
- }
-}
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 8f348c4584..b0df384376 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -60,10 +60,18 @@ using F0 = int();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
__ pushq(kRootRegister);
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ __ pushq(kPointerCageBaseRegister);
+#endif
__ InitializeRootRegister();
}
-static void ExitCode(MacroAssembler* masm) { __ popq(kRootRegister); }
+static void ExitCode(MacroAssembler* masm) {
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ __ popq(kPointerCageBaseRegister);
+#endif
+ __ popq(kRootRegister);
+}
TEST(Smi) {
// Check that C++ Smi operations work as expected.
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 1b4d90628d..33acd05c15 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -77,6 +77,11 @@ TEST(NoSideEffectsToString) {
"Error: fisk hest");
CheckObject(isolate, factory->NewJSObject(isolate->object_function()),
"#<Object>");
+ CheckObject(
+ isolate,
+ factory->NewJSProxy(factory->NewJSObject(isolate->object_function()),
+ factory->NewJSObject(isolate->object_function())),
+ "#<Object>");
}
TEST(EnumCache) {
@@ -115,14 +120,10 @@ TEST(EnumCache) {
*env->Global()->Get(env.local(), v8_str("cc")).ToLocalChecked()));
// Check the transition tree.
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad),
- b->map().instance_descriptors(kRelaxedLoad));
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad),
- c->map().instance_descriptors(kRelaxedLoad));
- CHECK_NE(c->map().instance_descriptors(kRelaxedLoad),
- cc->map().instance_descriptors(kRelaxedLoad));
- CHECK_NE(b->map().instance_descriptors(kRelaxedLoad),
- cc->map().instance_descriptors(kRelaxedLoad));
+ CHECK_EQ(a->map().instance_descriptors(), b->map().instance_descriptors());
+ CHECK_EQ(b->map().instance_descriptors(), c->map().instance_descriptors());
+ CHECK_NE(c->map().instance_descriptors(), cc->map().instance_descriptors());
+ CHECK_NE(b->map().instance_descriptors(), cc->map().instance_descriptors());
// Check that the EnumLength is unset.
CHECK_EQ(a->map().EnumLength(), kInvalidEnumCacheSentinel);
@@ -131,13 +132,13 @@ TEST(EnumCache) {
CHECK_EQ(cc->map().EnumLength(), kInvalidEnumCacheSentinel);
// Check that the EnumCache is empty.
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
// The EnumCache is shared on the DescriptorArray, creating it on {cc} has no
@@ -149,15 +150,14 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), kInvalidEnumCacheSentinel);
CHECK_EQ(cc->map().EnumLength(), 3);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- EnumCache enum_cache =
- cc->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = cc->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
CHECK_EQ(enum_cache.keys().length(), 3);
CHECK_EQ(enum_cache.indices().length(), 3);
@@ -174,19 +174,14 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- EnumCache enum_cache =
- a->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = a->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
CHECK_EQ(enum_cache.keys().length(), 1);
CHECK_EQ(enum_cache.indices().length(), 1);
@@ -195,8 +190,7 @@ TEST(EnumCache) {
// Creating the EnumCache for {c} will create a new EnumCache on the shared
// DescriptorArray.
Handle<EnumCache> previous_enum_cache(
- a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- a->GetIsolate());
+ a->map().instance_descriptors().enum_cache(), a->GetIsolate());
Handle<FixedArray> previous_keys(previous_enum_cache->keys(),
a->GetIsolate());
Handle<FixedArray> previous_indices(previous_enum_cache->indices(),
@@ -208,8 +202,7 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), 3);
CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache =
- c->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -222,25 +215,20 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
}
// {b} can reuse the existing EnumCache, hence we only need to set the correct
// EnumLength on the map without modifying the cache itself.
previous_enum_cache =
- handle(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- a->GetIsolate());
+ handle(a->map().instance_descriptors().enum_cache(), a->GetIsolate());
previous_keys = handle(previous_enum_cache->keys(), a->GetIsolate());
previous_indices = handle(previous_enum_cache->indices(), a->GetIsolate());
CompileRun("var s = 0; for (let key in b) { s += b[key] };");
@@ -250,8 +238,7 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), 3);
CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache =
- c->map().instance_descriptors(kRelaxedLoad).enum_cache();
+ EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are not updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -262,18 +249,14 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
- CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
- enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
}
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 19a6d3779f..1c55995efa 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1619,7 +1619,6 @@ const char* ReadString(unsigned* start) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyLogicalAssignment,
};
enum ParserSyncTestResult {
@@ -1630,15 +1629,11 @@ enum ParserSyncTestResult {
void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.contains(kAllowNatives);
- i::FLAG_harmony_logical_assignment =
- flags.contains(kAllowHarmonyLogicalAssignment);
}
void SetParserFlags(i::UnoptimizedCompileFlags* compile_flags,
base::EnumSet<ParserFlag> flags) {
compile_flags->set_allow_natives_syntax(flags.contains(kAllowNatives));
- compile_flags->set_allow_harmony_logical_assignment(
- flags.contains(kAllowHarmonyLogicalAssignment));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
@@ -4328,6 +4323,7 @@ TEST(MaybeAssignedTopLevel) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
namespace {
i::Scope* DeserializeFunctionScope(i::Isolate* isolate, i::Zone* zone,
@@ -4370,7 +4366,6 @@ TEST(AsmModuleFlag) {
CHECK(s->IsAsmModule() && s->AsDeclarationScope()->is_asm_module());
}
-
TEST(UseAsmUseCount) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
@@ -4383,7 +4378,7 @@ TEST(UseAsmUseCount) {
"function bar() { \"use asm\"; var baz = 1; }");
CHECK_LT(0, use_counts[v8::Isolate::kUseAsm]);
}
-
+#endif // V8_ENABLE_WEBASSEMBLY
TEST(StrictModeUseCount) {
i::Isolate* isolate = CcTest::i_isolate();
@@ -12401,9 +12396,7 @@ TEST(LogicalAssignmentDestructuringErrors) {
};
// clang-format on
- static const ParserFlag flags[] = {kAllowHarmonyLogicalAssignment};
- RunParserSyncTest(context_data, error_data, kError, nullptr, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
} // namespace test_parsing
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
index 7256a5876a..48b72a004f 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
@@ -121,7 +121,7 @@ TEST(DisasmPoisonPolymorphicLoad) {
"csdb", // spec. barrier
"ldur w<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
// branchful decompress
- "add x<<BSt>>, x26, x<<BSt>>", // Add root to ref
+ "add x<<BSt>>, x2[68], x<<BSt>>", // Add root to ref
"and x<<BSt>>, x<<BSt>>, " + kPReg, // apply the poison
"ldur w<<Prop:[0-9]+>>, \\[x<<BSt>>, #[0-9]+\\]", // load the property
"and x<<Prop>>, x<<Prop>>, " + kPReg, // apply the poison
@@ -194,7 +194,7 @@ TEST(DisasmPoisonMonomorphicLoadFloat64) {
"csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
"csdb", // spec. barrier
"ldur w<<F1:[0-9]+>>, \\[<<Obj>>, #11\\]", // load heap number
- "add x<<F1>>, x26, x<<F1>>", // Decompress ref
+ "add x<<F1>>, x2[68], x<<F1>>", // Decompress ref
"and x<<F1>>, x<<F1>>, " + kPReg, // apply the poison
"add <<Addr:x[0-9]+>>, x<<F1>>, #0x[0-9a-f]+", // addr. calculation
"and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 7460e9df8f..ab56d6e7cc 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -352,10 +352,6 @@ TEST(CodeMapMoveAndDeleteCode) {
code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700)); // Deprecate bbb.
CHECK(!code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(entry1, code_map.FindEntry(ToAddress(0x1700)));
- CodeEntry* entry3 = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "ccc");
- code_map.AddCode(ToAddress(0x1750), entry3, 0x100);
- CHECK(!code_map.FindEntry(ToAddress(0x1700)));
- CHECK_EQ(entry3, code_map.FindEntry(ToAddress(0x1750)));
}
TEST(CodeMapClear) {
@@ -962,6 +958,63 @@ TEST(NodeSourceTypes) {
CHECK_EQ(unresolved_node->source_type(), v8::CpuProfileNode::kUnresolved);
}
+TEST(CodeMapRemoveCode) {
+ StringsStorage strings;
+ CodeMap code_map(strings);
+
+ CodeEntry* entry = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ code_map.AddCode(ToAddress(0x1000), entry, 0x100);
+ CHECK(code_map.RemoveCode(entry));
+ CHECK(!code_map.FindEntry(ToAddress(0x1000)));
+
+ // Test that when two entries share the same address, we remove only the
+ // entry that we desired to.
+ CodeEntry* colliding_entry1 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry* colliding_entry2 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ code_map.AddCode(ToAddress(0x1000), colliding_entry1, 0x100);
+ code_map.AddCode(ToAddress(0x1000), colliding_entry2, 0x100);
+
+ CHECK(code_map.RemoveCode(colliding_entry1));
+ CHECK_EQ(code_map.FindEntry(ToAddress(0x1000)), colliding_entry2);
+
+ CHECK(code_map.RemoveCode(colliding_entry2));
+ CHECK(!code_map.FindEntry(ToAddress(0x1000)));
+}
+
+TEST(CodeMapMoveOverlappingCode) {
+ StringsStorage strings;
+ CodeMap code_map(strings);
+ CodeEntry* colliding_entry1 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry* colliding_entry2 =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "bbb");
+ CodeEntry* after_entry =
+ new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "ccc");
+
+ code_map.AddCode(ToAddress(0x1400), colliding_entry1, 0x200);
+ code_map.AddCode(ToAddress(0x1400), colliding_entry2, 0x200);
+ code_map.AddCode(ToAddress(0x1800), after_entry, 0x200);
+
+ CHECK_EQ(colliding_entry1->instruction_start(), ToAddress(0x1400));
+ CHECK_EQ(colliding_entry2->instruction_start(), ToAddress(0x1400));
+ CHECK_EQ(after_entry->instruction_start(), ToAddress(0x1800));
+
+ CHECK(code_map.FindEntry(ToAddress(0x1400)));
+ CHECK_EQ(code_map.FindEntry(ToAddress(0x1800)), after_entry);
+
+ code_map.MoveCode(ToAddress(0x1400), ToAddress(0x1600));
+
+ CHECK(!code_map.FindEntry(ToAddress(0x1400)));
+ CHECK(code_map.FindEntry(ToAddress(0x1600)));
+ CHECK_EQ(code_map.FindEntry(ToAddress(0x1800)), after_entry);
+
+ CHECK_EQ(colliding_entry1->instruction_start(), ToAddress(0x1600));
+ CHECK_EQ(colliding_entry2->instruction_start(), ToAddress(0x1600));
+ CHECK_EQ(after_entry->instruction_start(), ToAddress(0x1800));
+}
+
} // namespace test_profile_generator
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index cf66f54f4f..2884dfd136 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1730,8 +1730,7 @@ TEST(CodeSerializerPromotedToCompilationCache) {
MaybeHandle<SharedFunctionInfo> shared =
isolate->compilation_cache()->LookupScript(
- src, src, 0, 0, v8::ScriptOriginOptions(), isolate->native_context(),
- LanguageMode::kSloppy);
+ src, src, 0, 0, v8::ScriptOriginOptions(), LanguageMode::kSloppy);
CHECK(*shared.ToHandleChecked() == *copy);
@@ -3348,6 +3347,13 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
FreeCurrentEmbeddedBlob();
}
+MaybeLocal<v8::Module> ResolveCallback(Local<v8::Context> context,
+ Local<v8::String> specifier,
+ Local<v8::FixedArray> import_assertions,
+ Local<v8::Module> referrer) {
+ return {};
+}
+
UNINITIALIZED_TEST(SnapshotCreatorAddData) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
@@ -3397,11 +3403,23 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
v8::AccessorSignature::New(isolate,
v8::FunctionTemplate::New(isolate));
+ v8::ScriptOrigin origin(isolate, v8_str(""), {}, {}, {}, {}, {}, {}, {},
+ true);
+ v8::ScriptCompiler::Source source(
+ v8::String::NewFromUtf8Literal(
+ isolate, "export let a = 42; globalThis.a = {};"),
+ origin);
+ v8::Local<v8::Module> module =
+ v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ module->InstantiateModule(context, ResolveCallback).ToChecked();
+ module->Evaluate(context).ToLocalChecked();
+
CHECK_EQ(0u, creator.AddData(context, object));
CHECK_EQ(1u, creator.AddData(context, v8_str("context-dependent")));
CHECK_EQ(2u, creator.AddData(context, persistent_number_1.Get(isolate)));
CHECK_EQ(3u, creator.AddData(context, object_template));
CHECK_EQ(4u, creator.AddData(context, persistent_context.Get(isolate)));
+ CHECK_EQ(5u, creator.AddData(context, module));
creator.AddContext(context);
CHECK_EQ(0u, creator.AddData(v8_str("context-independent")));
@@ -3460,7 +3478,19 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
CHECK_EQ(*v8::Utils::OpenHandle(*serialized_context),
*v8::Utils::OpenHandle(*context));
- CHECK(context->GetDataFromSnapshotOnce<v8::Value>(5).IsEmpty());
+ v8::Local<v8::Module> serialized_module =
+ context->GetDataFromSnapshotOnce<v8::Module>(5).ToLocalChecked();
+ CHECK(context->GetDataFromSnapshotOnce<v8::Context>(5).IsEmpty());
+ {
+ v8::Context::Scope context_scope(context);
+ v8::Local<v8::Object> mod_ns =
+ serialized_module->GetModuleNamespace().As<v8::Object>();
+ CHECK(mod_ns->Get(context, v8_str("a"))
+ .ToLocalChecked()
+ ->StrictEquals(v8_num(42.0)));
+ }
+
+ CHECK(context->GetDataFromSnapshotOnce<v8::Value>(6).IsEmpty());
// Check serialized data on the isolate.
string = isolate->GetDataFromSnapshotOnce<v8::String>(0).ToLocalChecked();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 41aa707231..735040a4c5 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -2096,6 +2096,84 @@ TEST(InternalizeExternalStringUncachedWithCopyTwoByte) {
CHECK(internal->IsInternalizedString());
}
+// Show that we cache the data pointer for internal, external and uncached
+// strings with cacheable resources through MakeExternal. One byte version.
+TEST(CheckCachedDataInternalExternalUncachedString) {
+ CcTest::InitializeVM();
+ Factory* factory = CcTest::i_isolate()->factory();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // Due to different size restrictions the string needs to be small but not too
+ // small. One of these restrictions is whether pointer compression is enabled.
+#ifdef V8_COMPRESS_POINTERS
+ const char* raw_small = "small string";
+#elif V8_TARGET_ARCH_32_BIT
+ const char* raw_small = "smol";
+#else
+ const char* raw_small = "smalls";
+#endif // V8_COMPRESS_POINTERS
+
+ Handle<String> string =
+ factory->InternalizeString(factory->NewStringFromAsciiChecked(raw_small));
+ OneByteResource* resource =
+ new OneByteResource(i::StrDup(raw_small), strlen(raw_small));
+
+ // Check it is external, internalized, and uncached with a cacheable resource.
+ string->MakeExternal(resource);
+ CHECK(string->IsOneByteRepresentation());
+ CHECK(string->IsExternalString());
+ CHECK(string->IsInternalizedString());
+
+ // Check that the external string is uncached, its resource is cacheable, and
+ // that we indeed cached it.
+ Handle<ExternalOneByteString> external_string =
+ Handle<ExternalOneByteString>::cast(string);
+ CHECK(external_string->is_uncached());
+ CHECK(external_string->resource()->IsCacheable());
+ CHECK_NOT_NULL(external_string->resource()->cached_data());
+ CHECK_EQ(external_string->resource()->cached_data(),
+ external_string->resource()->data());
+}
+
+// Show that we cache the data pointer for internal, external and uncached
+// strings with cacheable resources through MakeExternal. One byte version.
+TEST(CheckCachedDataInternalExternalUncachedStringTwoByte) {
+ CcTest::InitializeVM();
+ Factory* factory = CcTest::i_isolate()->factory();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // Due to different size restrictions the string needs to be small but not too
+ // small. One of these restrictions is whether pointer compression is enabled.
+#ifdef V8_COMPRESS_POINTERS
+ const char* raw_small = "small string";
+#elif V8_TARGET_ARCH_32_BIT
+ const char* raw_small = "smol";
+#else
+ const char* raw_small = "smalls";
+#endif // V8_COMPRESS_POINTERS
+
+ Handle<String> string =
+ factory->InternalizeString(factory->NewStringFromAsciiChecked(raw_small));
+ Resource* resource =
+ new Resource(AsciiToTwoByteString(raw_small), strlen(raw_small));
+
+ // Check it is external, internalized, and uncached with a cacheable resource.
+ string->MakeExternal(resource);
+ CHECK(string->IsTwoByteRepresentation());
+ CHECK(string->IsExternalString());
+ CHECK(string->IsInternalizedString());
+
+ // Check that the external string is uncached, its resource is cacheable, and
+ // that we indeed cached it.
+ Handle<ExternalTwoByteString> external_string =
+ Handle<ExternalTwoByteString>::cast(string);
+ CHECK(external_string->is_uncached());
+ CHECK(external_string->resource()->IsCacheable());
+ CHECK_NOT_NULL(external_string->resource()->cached_data());
+ CHECK_EQ(external_string->resource()->cached_data(),
+ external_string->resource()->data());
+}
+
} // namespace test_strings
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
new file mode 100644
index 0000000000..5c730883e8
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
@@ -0,0 +1,466 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/cpu-features.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+#include "test/cctest/test-swiss-name-dictionary-shared-tests.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+// The non-SIMD SwissNameDictionary implementation requires 64 bit integer
+// operations, which CSA/Torque don't offer on 32 bit platforms. Therefore, we
+// cannot run the CSA version of the tests on 32 bit platforms. The only
+// exception is IA32, where we can use SSE and don't need 64 bit integers.
+// TODO(v8:11330) The Torque SIMD implementation is not specific to SSE (like
+// the C++ one), but works on other platforms. It should be possible to create a
+// workaround where on 32 bit, non-IA32 platforms we use the "portable", non-SSE
+// implementation on the C++ side (which uses a group size of 8) and create a
+// special version of the SIMD Torque implementation that works for group size 8
+// instead of 16.
+#if V8_TARGET_ARCH_64_BIT || V8_TARGET_ARCH_IA32
+
+// Executes tests by executing CSA/Torque versions of dictionary operations.
+// See RuntimeTestRunner for description of public functions.
+class CSATestRunner {
+ public:
+ CSATestRunner(Isolate* isolate, int initial_capacity, KeyCache& keys);
+
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ static bool IsEnabled() {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ CpuFeatures::SupportedFeatures();
+ return CpuFeatures::IsSupported(CpuFeature::AVX) ||
+ CpuFeatures::IsSupported(CpuFeature::SSSE3);
+#else
+ // Other 64-bit architectures always support the required operations.
+ return true;
+#endif
+ }
+
+ void Add(Handle<Name> key, Handle<Object> value, PropertyDetails details);
+ InternalIndex FindEntry(Handle<Name> key);
+ void Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details);
+ void Delete(InternalIndex entry);
+ void RehashInplace();
+ void Shrink();
+
+ Handle<FixedArray> GetData(InternalIndex entry);
+ void CheckCounts(base::Optional<int> capacity, base::Optional<int> elements,
+ base::Optional<int> deleted);
+ void CheckEnumerationOrder(const std::vector<std::string>& expected_keys);
+ void CheckCopy();
+ void VerifyHeap();
+
+ void PrintTable();
+
+ Handle<SwissNameDictionary> table;
+
+ private:
+ using Label = compiler::CodeAssemblerLabel;
+ template <class T>
+ using TVariable = compiler::TypedCodeAssemblerVariable<T>;
+
+ void CheckAgainstReference();
+
+ void Allocate(Handle<Smi> capacity);
+
+ Isolate* isolate_;
+
+ // Used to mirror all operations using C++ versions of all operations,
+ // yielding a reference to compare against.
+ Handle<SwissNameDictionary> reference_;
+
+ // CSA functions execute the corresponding dictionary operation.
+ compiler::FunctionTester find_entry_ft_;
+ compiler::FunctionTester get_data_ft_;
+ compiler::FunctionTester put_ft_;
+ compiler::FunctionTester delete_ft_;
+ compiler::FunctionTester add_ft_;
+ compiler::FunctionTester allocate_ft_;
+ compiler::FunctionTester get_counts_ft_;
+ compiler::FunctionTester copy_ft_;
+
+ // Used to create the FunctionTesters above.
+ static Handle<Code> create_get_data(Isolate* isolate);
+ static Handle<Code> create_find_entry(Isolate* isolate);
+ static Handle<Code> create_put(Isolate* isolate);
+ static Handle<Code> create_delete(Isolate* isolate);
+ static Handle<Code> create_add(Isolate* isolate);
+ static Handle<Code> create_allocate(Isolate* isolate);
+ static Handle<Code> create_get_counts(Isolate* isolate);
+ static Handle<Code> create_copy(Isolate* isolate);
+
+ // Number of parameters of each of the tester functions above.
+ static constexpr int kFindEntryParams = 2; // (table, key)
+ static constexpr int kGetDataParams = 2; // (table, entry)
+ static constexpr int kPutParams = 4; // (table, entry, value, details)
+ static constexpr int kDeleteParams = 2; // (table, entry)
+ static constexpr int kAddParams = 4; // (table, key, value, details)
+ static constexpr int kAllocateParams = 1; // (capacity)
+ static constexpr int kGetCountsParams = 1; // (table)
+ static constexpr int kCopyParams = 1; // (table)
+};
+
+CSATestRunner::CSATestRunner(Isolate* isolate, int initial_capacity,
+ KeyCache& keys)
+ : isolate_{isolate},
+ reference_{isolate_->factory()->NewSwissNameDictionaryWithCapacity(
+ initial_capacity, AllocationType::kYoung)},
+ find_entry_ft_(create_find_entry(isolate), kFindEntryParams),
+ get_data_ft_(create_get_data(isolate), kGetDataParams),
+ put_ft_{create_put(isolate), kPutParams},
+ delete_ft_{create_delete(isolate), kDeleteParams},
+ add_ft_{create_add(isolate), kAddParams},
+ allocate_ft_{create_allocate(isolate), kAllocateParams},
+ get_counts_ft_{create_get_counts(isolate), kGetCountsParams},
+ copy_ft_{create_copy(isolate), kCopyParams} {
+ Allocate(handle(Smi::FromInt(initial_capacity), isolate));
+}
+
+void CSATestRunner::Add(Handle<Name> key, Handle<Object> value,
+ PropertyDetails details) {
+ ReadOnlyRoots roots(isolate_);
+ reference_ =
+ SwissNameDictionary::Add(isolate_, reference_, key, value, details);
+
+ Handle<Smi> details_smi = handle(details.AsSmi(), isolate_);
+ Handle<Oddball> success =
+ add_ft_.CallChecked<Oddball>(table, key, value, details_smi);
+
+ if (*success == roots.false_value()) {
+ // |add_ft_| does not resize and indicates the need to do so by returning
+ // false.
+ int capacity = table->Capacity();
+ int used_capacity = table->UsedCapacity();
+ CHECK_GT(used_capacity + 1,
+ SwissNameDictionary::MaxUsableCapacity(capacity));
+
+ table = SwissNameDictionary::Add(isolate_, table, key, value, details);
+ }
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::Allocate(Handle<Smi> capacity) {
+ // We must handle |capacity| == 0 specially, because
+ // AllocateSwissNameDictionary (just like AllocateNameDictionary) always
+ // returns a non-zero sized table.
+ if (capacity->value() == 0) {
+ table = ReadOnlyRoots(isolate_).empty_swiss_property_dictionary_handle();
+ } else {
+ table = allocate_ft_.CallChecked<SwissNameDictionary>(capacity);
+ }
+
+ CheckAgainstReference();
+}
+
+InternalIndex CSATestRunner::FindEntry(Handle<Name> key) {
+ Handle<Smi> index = find_entry_ft_.CallChecked<Smi>(table, key);
+ if (index->value() == SwissNameDictionary::kNotFoundSentinel) {
+ return InternalIndex::NotFound();
+ } else {
+ return InternalIndex(index->value());
+ }
+}
+
+Handle<FixedArray> CSATestRunner::GetData(InternalIndex entry) {
+ DCHECK(entry.is_found());
+
+ return get_data_ft_.CallChecked<FixedArray>(
+ table, handle(Smi::FromInt(entry.as_int()), isolate_));
+}
+
+void CSATestRunner::CheckCounts(base::Optional<int> capacity,
+ base::Optional<int> elements,
+ base::Optional<int> deleted) {
+ Handle<FixedArray> counts = get_counts_ft_.CallChecked<FixedArray>(table);
+
+ if (capacity.has_value()) {
+ CHECK_EQ(Smi::FromInt(capacity.value()), counts->get(0));
+ }
+
+ if (elements.has_value()) {
+ CHECK_EQ(Smi::FromInt(elements.value()), counts->get(1));
+ }
+
+ if (deleted.has_value()) {
+ CHECK_EQ(Smi::FromInt(deleted.value()), counts->get(2));
+ }
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::CheckEnumerationOrder(
+ const std::vector<std::string>& expected_keys) {
+ // Not implemented in CSA. Making this a no-op (rather than forbidding
+ // executing CSA tests with this operation) because CheckEnumerationOrder is
+ // also used by some tests whose main goal is not to test the enumeration
+ // order.
+}
+
+void CSATestRunner::Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details) {
+ DCHECK(entry.is_found());
+ reference_->ValueAtPut(entry, *new_value);
+ reference_->DetailsAtPut(entry, new_details);
+
+ Handle<Smi> entry_smi = handle(Smi::FromInt(entry.as_int()), isolate_);
+ Handle<Smi> details_smi = handle(new_details.AsSmi(), isolate_);
+
+ put_ft_.Call(table, entry_smi, new_value, details_smi);
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::Delete(InternalIndex entry) {
+ DCHECK(entry.is_found());
+ reference_ = SwissNameDictionary::DeleteEntry(isolate_, reference_, entry);
+
+ Handle<Smi> entry_smi = handle(Smi::FromInt(entry.as_int()), isolate_);
+ table = delete_ft_.CallChecked<SwissNameDictionary>(table, entry_smi);
+
+ CheckAgainstReference();
+}
+
+void CSATestRunner::RehashInplace() {
+ // There's no CSA version of this. Use IsRuntimeTest to ensure that we only
+ // run a test using this if it's a runtime test.
+ UNREACHABLE();
+}
+
+void CSATestRunner::Shrink() {
+ // There's no CSA version of this. Use IsRuntimeTest to ensure that we only
+ // run a test using this if it's a runtime test.
+ UNREACHABLE();
+}
+
+void CSATestRunner::CheckCopy() {
+ Handle<SwissNameDictionary> copy =
+ copy_ft_.CallChecked<SwissNameDictionary>(table);
+ CHECK(table->EqualsForTesting(*copy));
+}
+
+void CSATestRunner::VerifyHeap() {
+#if VERIFY_HEAP
+ table->SwissNameDictionaryVerify(isolate_, true);
+#endif
+}
+
+void CSATestRunner::PrintTable() {
+#ifdef OBJECT_PRINT
+ table->SwissNameDictionaryPrint(std::cout);
+#endif
+}
+
+Handle<Code> CSATestRunner::create_find_entry(Isolate* isolate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!IsEnabled()) {
+ return isolate->builtins()->builtin_handle(Builtins::kIllegal);
+ }
+ STATIC_ASSERT(kFindEntryParams == 2); // (table, key)
+ compiler::CodeAssemblerTester asm_tester(isolate, kFindEntryParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<Name> key = m.Parameter<Name>(2);
+
+ Label done(&m);
+ TVariable<IntPtrT> entry_var(
+ m.IntPtrConstant(SwissNameDictionary::kNotFoundSentinel), &m);
+
+ // |entry_var| defaults to |kNotFoundSentinel| meaning that one label
+ // suffices.
+ m.SwissNameDictionaryFindEntry(table, key, &done, &entry_var, &done);
+
+ m.Bind(&done);
+ m.Return(m.SmiFromIntPtr(entry_var.value()));
+ }
+
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_get_data(Isolate* isolate) {
+ STATIC_ASSERT(kGetDataParams == 2); // (table, entry)
+ compiler::CodeAssemblerTester asm_tester(isolate, kGetDataParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<IntPtrT> entry = m.SmiToIntPtr(m.Parameter<Smi>(2));
+
+ TNode<FixedArray> data = m.AllocateZeroedFixedArray(m.IntPtrConstant(3));
+
+ TNode<Object> key = m.LoadSwissNameDictionaryKey(table, entry);
+ TNode<Object> value = m.LoadValueByKeyIndex(table, entry);
+ TNode<Smi> details = m.SmiFromUint32(m.LoadDetailsByKeyIndex(table, entry));
+
+ m.StoreFixedArrayElement(data, 0, key);
+ m.StoreFixedArrayElement(data, 1, value);
+ m.StoreFixedArrayElement(data, 2, details);
+
+ m.Return(data);
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_put(Isolate* isolate) {
+ STATIC_ASSERT(kPutParams == 4); // (table, entry, value, details)
+ compiler::CodeAssemblerTester asm_tester(isolate, kPutParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<Smi> entry = m.Parameter<Smi>(2);
+ TNode<Object> value = m.Parameter<Object>(3);
+ TNode<Smi> details = m.Parameter<Smi>(4);
+
+ TNode<IntPtrT> entry_intptr = m.SmiToIntPtr(entry);
+
+ m.StoreValueByKeyIndex(table, entry_intptr, value,
+ WriteBarrierMode::UPDATE_WRITE_BARRIER);
+ m.StoreDetailsByKeyIndex(table, entry_intptr, details);
+
+ m.Return(m.UndefinedConstant());
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_delete(Isolate* isolate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!IsEnabled()) {
+ return isolate->builtins()->builtin_handle(Builtins::kIllegal);
+ }
+ STATIC_ASSERT(kDeleteParams == 2); // (table, entry)
+ compiler::CodeAssemblerTester asm_tester(isolate, kDeleteParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<IntPtrT> entry = m.SmiToIntPtr(m.Parameter<Smi>(2));
+
+ TVariable<SwissNameDictionary> shrunk_table_var(table, &m);
+ Label done(&m);
+
+ m.SwissNameDictionaryDelete(table, entry, &done, &shrunk_table_var);
+ m.Goto(&done);
+
+ m.Bind(&done);
+ m.Return(shrunk_table_var.value());
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_add(Isolate* isolate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!IsEnabled()) {
+ return isolate->builtins()->builtin_handle(Builtins::kIllegal);
+ }
+ STATIC_ASSERT(kAddParams == 4); // (table, key, value, details)
+ compiler::CodeAssemblerTester asm_tester(isolate, kAddParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+ TNode<Name> key = m.Parameter<Name>(2);
+ TNode<Object> value = m.Parameter<Object>(3);
+ TNode<Smi> details = m.Parameter<Smi>(4);
+
+ Label needs_resize(&m);
+
+ TNode<Int32T> d32 = m.SmiToInt32(details);
+ TNode<Uint8T> d = m.UncheckedCast<Uint8T>(d32);
+
+ m.SwissNameDictionaryAdd(table, key, value, d, &needs_resize);
+ m.Return(m.TrueConstant());
+
+ m.Bind(&needs_resize);
+ m.Return(m.FalseConstant());
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_allocate(Isolate* isolate) {
+ STATIC_ASSERT(kAllocateParams == 1); // (capacity)
+ compiler::CodeAssemblerTester asm_tester(isolate, kAllocateParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<IntPtrT> capacity = m.SmiToIntPtr(m.Parameter<Smi>(1));
+
+ TNode<SwissNameDictionary> table =
+ m.AllocateSwissNameDictionaryWithCapacity(capacity);
+
+ m.Return(table);
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_get_counts(Isolate* isolate) {
+ STATIC_ASSERT(kGetCountsParams == 1); // (table)
+ compiler::CodeAssemblerTester asm_tester(isolate, kGetCountsParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+
+ TNode<IntPtrT> capacity =
+ m.ChangeInt32ToIntPtr(m.LoadSwissNameDictionaryCapacity(table));
+ TNode<IntPtrT> elements =
+ m.LoadSwissNameDictionaryNumberOfElements(table, capacity);
+ TNode<IntPtrT> deleted =
+ m.LoadSwissNameDictionaryNumberOfDeletedElements(table, capacity);
+
+ TNode<FixedArray> results = m.AllocateZeroedFixedArray(m.IntPtrConstant(3));
+
+ auto check_and_add = [&](TNode<IntPtrT> value, int array_index) {
+ CSA_ASSERT(&m, m.UintPtrGreaterThanOrEqual(value, m.IntPtrConstant(0)));
+ CSA_ASSERT(&m, m.UintPtrLessThanOrEqual(
+ value, m.IntPtrConstant(Smi::kMaxValue)));
+ TNode<Smi> smi = m.SmiFromIntPtr(value);
+ m.StoreFixedArrayElement(results, array_index, smi);
+ };
+
+ check_and_add(capacity, 0);
+ check_and_add(elements, 1);
+ check_and_add(deleted, 2);
+
+ m.Return(results);
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+Handle<Code> CSATestRunner::create_copy(Isolate* isolate) {
+ STATIC_ASSERT(kCopyParams == 1); // (table)
+ compiler::CodeAssemblerTester asm_tester(isolate, kCopyParams + 1);
+ CodeStubAssembler m(asm_tester.state());
+ {
+ TNode<SwissNameDictionary> table = m.Parameter<SwissNameDictionary>(1);
+
+ m.Return(m.CopySwissNameDictionary(table));
+ }
+ return asm_tester.GenerateCodeCloseAndEscape();
+}
+
+void CSATestRunner::CheckAgainstReference() {
+ CHECK(table->EqualsForTesting(*reference_));
+}
+
+// Executes the tests defined in test-swiss-name-dictionary-shared-tests.h as if
+// they were defined in this file, using the CSATestRunner. See comments in
+// test-swiss-name-dictionary-shared-tests.h and in
+// swiss-name-dictionary-infra.h for details.
+const char kCSATestFileName[] = __FILE__;
+SharedSwissTableTests<CSATestRunner, kCSATestFileName> execute_shared_tests_csa;
+
+#endif
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
new file mode 100644
index 0000000000..539d71c823
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.cc
@@ -0,0 +1,139 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+namespace {
+std::vector<PropertyDetails> MakeDistinctDetails() {
+ std::vector<PropertyDetails> result(32, PropertyDetails::Empty());
+
+ int i = 0;
+ for (PropertyKind kind : {PropertyKind::kAccessor, PropertyKind::kAccessor}) {
+ for (PropertyConstness constness :
+ {PropertyConstness::kConst, PropertyConstness::kMutable}) {
+ for (bool writeable : {true, false}) {
+ for (bool enumerable : {true, false}) {
+ for (bool configurable : {true, false}) {
+ uint8_t attrs = static_cast<uint8_t>(PropertyAttributes::NONE);
+ if (!writeable) attrs |= PropertyAttributes::READ_ONLY;
+ if (!enumerable) {
+ attrs |= PropertyAttributes::DONT_ENUM;
+ }
+ if (!configurable) {
+ attrs |= PropertyAttributes::DONT_DELETE;
+ }
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(attrs);
+ PropertyDetails details(kind, attributes,
+ PropertyCellType::kNoCell);
+ details = details.CopyWithConstness(constness);
+ result[i++] = details;
+ }
+ }
+ }
+ }
+ }
+ return result;
+}
+
+} // namespace
+
+// To enable more specific testing, we allow overriding the H1 and H2 hashes for
+// a key before adding it to the SwissNameDictionary. The necessary overriding
+// of the stored hash happens here. Symbols are compared by identity, we cache
+// the Symbol associcated with each std::string key. This means that using
+// "my_key" twice in the same TestSequence will return the same Symbol
+// associcated with "my_key" both times. This also means that within a given
+// TestSequence, we cannot use the same (std::string) key with different faked
+// hashes.
+Handle<Name> CreateKeyWithHash(Isolate* isolate, KeyCache& keys,
+ const Key& key) {
+ Handle<Symbol> key_symbol;
+ auto iter = keys.find(key.str);
+
+ if (iter == keys.end()) {
+ // We haven't seen the the given string as a key in the current
+ // TestSequence. Create it, fake its hash if requested and cache it.
+
+ key_symbol = isolate->factory()->NewSymbol();
+
+ // We use the description field to store the original string key for
+ // debugging.
+ Handle<String> description =
+ isolate->factory()->NewStringFromAsciiChecked(key.str.c_str());
+ key_symbol->set_description(*description);
+
+ CachedKey new_info = {key_symbol, key.h1_override, key.h2_override};
+ keys[key.str] = new_info;
+
+ if (key.h1_override || key.h2_override) {
+ uint32_t actual_hash = key_symbol->hash();
+ int fake_hash = actual_hash;
+ if (key.h1_override) {
+ uint32_t override_with = key.h1_override.value().value;
+
+ // We cannot override h1 with 0 unless we also override h2 with a
+ // non-zero value. Otherwise, the overall hash may become 0 (which is
+ // forbidden) based on the (nondeterminstic) choice of h2.
+ CHECK_IMPLIES(override_with == 0,
+ key.h2_override && key.h2_override.value().value != 0);
+
+ fake_hash = (override_with << swiss_table::kH2Bits) |
+ swiss_table::H2(actual_hash);
+ }
+ if (key.h2_override) {
+ // Unset 7 bits belonging to H2:
+ fake_hash &= ~((1 << swiss_table::kH2Bits) - 1);
+
+ uint8_t override_with = key.h2_override.value().value;
+
+ // Same as above, but for h2: Prevent accidentally creating 0 fake hash.
+ CHECK_IMPLIES(override_with == 0,
+ key.h1_override && key.h1_override.value().value != 0);
+
+ CHECK_LT(key.h2_override.value().value, 1 << swiss_table::kH2Bits);
+ fake_hash |= swiss_table::H2(override_with);
+ }
+
+ // Ensure that just doing a shift below is correct.
+ static_assert(Name::kNofHashBitFields == 2, "This test needs updating");
+ static_assert(Name::kHashNotComputedMask == 1,
+ "This test needs updating");
+ static_assert(Name::kIsNotIntegerIndexMask == 2,
+ "This test needs updating");
+
+ // Prepare what to put into the hash field.
+ uint32_t hash_field = fake_hash << Name::kHashShift;
+ CHECK_NE(hash_field, 0);
+
+ key_symbol->set_raw_hash_field(hash_field);
+ CHECK_EQ(fake_hash, key_symbol->hash());
+ }
+
+ return key_symbol;
+ } else {
+ // We've seen this key before. Return the cached version.
+ CachedKey& cached_info = iter->second;
+
+ // Internal consistency check: Make sure that we didn't request something
+ // else w.r.t. hash faking when using this key before. If so, the test case
+ // would make inconsistent assumptions about how the hashes should be faked
+ // and be broken.
+ CHECK_EQ(cached_info.h1_override, key.h1_override);
+ CHECK_EQ(cached_info.h2_override, key.h2_override);
+
+ return cached_info.key_symbol;
+ }
+}
+
+const std::vector<PropertyDetails> distinct_property_details =
+ MakeDistinctDetails();
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-infra.h b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.h
new file mode 100644
index 0000000000..60ac78477a
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-infra.h
@@ -0,0 +1,321 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_CCTEST_TEST_SWISS_NAME_DICTIONARY_INFRA_H_
+#define V8_TEST_CCTEST_TEST_SWISS_NAME_DICTIONARY_INFRA_H_
+
+#include <memory>
+#include <utility>
+
+#include "src/codegen/code-stub-assembler.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/swiss-name-dictionary-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+using Value = std::string;
+using ValueOpt = base::Optional<Value>;
+using PropertyDetailsOpt = base::Optional<PropertyDetails>;
+using IndexOpt = base::Optional<InternalIndex>;
+
+static const ValueOpt kNoValue;
+static const PropertyDetailsOpt kNoDetails;
+static const base::Optional<int> kNoInt;
+static const IndexOpt kIndexUnknown;
+
+static const std::vector<int> interesting_initial_capacities = {
+ 4,
+ 8,
+ 16,
+ 128,
+ 1 << (sizeof(uint16_t) * 8),
+ 1 << (sizeof(uint16_t) * 8 + 1)};
+
+// Capacities for tests that may timeout on larger capacities when
+// sanitizers/CFI are enabled.
+// TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run by
+// the test suite, which will speed things up.
+#if defined(THREAD_SANITIZER) || defined(V8_ENABLE_CONTROL_FLOW_INTEGRITY)
+static const std::vector<int> capacities_for_slow_sanitizer_tests = {4, 8, 16,
+ 128, 1024};
+#else
+static const std::vector<int> capacities_for_slow_sanitizer_tests =
+ interesting_initial_capacities;
+#endif
+
+// Capacities for tests that are generally slow, so that they don't use the
+// maximum capacities in debug mode.
+// TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run by
+// the test suite, which will speed things up.
+#if DEBUG
+static const std::vector<int> capacities_for_slow_debug_tests = {4, 8, 16, 128,
+ 1024};
+#else
+static const std::vector<int> capacities_for_slow_debug_tests =
+ interesting_initial_capacities;
+#endif
+
+extern const std::vector<PropertyDetails> distinct_property_details;
+
+// Wrapping this in a struct makes the tests a bit more readable.
+struct FakeH1 {
+ uint32_t value;
+
+ explicit FakeH1(int value) : value{static_cast<uint32_t>(value)} {}
+
+ bool operator==(const FakeH1& other) const { return value == other.value; }
+};
+
+// Wrapping this in a struct makes the tests a bit more readable.
+struct FakeH2 {
+ uint8_t value;
+
+ bool operator==(const FakeH2& other) const { return value == other.value; }
+};
+
+using FakeH1Opt = base::Optional<FakeH1>;
+using FakeH2Opt = base::Optional<FakeH2>;
+
+// Representation of keys used when writing test cases.
+struct Key {
+ std::string str;
+
+ // If present, contains the value we faked the key's H1 hash with.
+ FakeH1Opt h1_override = FakeH1Opt();
+
+ // If present, contains the value we faked the key's H2 hash with.
+ FakeH2Opt h2_override = FakeH2Opt();
+};
+
+// Internal representation of keys. See |create_key_with_hash| for details.
+struct CachedKey {
+ Handle<Symbol> key_symbol;
+
+ // If present, contains the value we faked the key's H1 hash with.
+ FakeH1Opt h1_override;
+
+ // If present, contains the value we faked the key's H2 hash with.
+ FakeH2Opt h2_override;
+};
+
+using KeyCache = std::unordered_map<std::string, CachedKey>;
+
+Handle<Name> CreateKeyWithHash(Isolate* isolate, KeyCache& keys,
+ const Key& key);
+
+class RuntimeTestRunner;
+class CSATestRunner;
+
+// Abstraction over executing a sequence of operations on a single hash table.
+// Actually performing those operations is done by the TestRunner.
+template <typename TestRunner>
+class TestSequence {
+ public:
+ explicit TestSequence(Isolate* isolate, int initial_capacity)
+ : isolate{isolate},
+ initial_capacity{initial_capacity},
+ keys_{},
+ runner_{isolate, initial_capacity, keys_} {}
+
+ // Determines whether or not to run VerifyHeap after each operation. Can make
+ // debugging easier.
+ static constexpr bool kVerifyAfterEachStep = false;
+
+ void Add(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
+ runner_.Add(key, value, details);
+
+ if (kVerifyAfterEachStep) {
+ runner_.VerifyHeap();
+ }
+ }
+
+ void Add(const Key& key, ValueOpt value = kNoValue,
+ PropertyDetailsOpt details = kNoDetails) {
+ if (!value) {
+ value = "dummy_value";
+ }
+
+ if (!details) {
+ details = PropertyDetails::Empty();
+ }
+
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, key);
+ Handle<Object> value_handle = isolate->factory()->NewStringFromAsciiChecked(
+ value.value().c_str(), AllocationType::kYoung);
+
+ Add(key_handle, value_handle, details.value());
+ }
+
+ void UpdateByKey(Handle<Name> key, Handle<Object> new_value,
+ PropertyDetails new_details) {
+ InternalIndex entry = runner_.FindEntry(key);
+ CHECK(entry.is_found());
+ runner_.Put(entry, new_value, new_details);
+
+ if (kVerifyAfterEachStep) {
+ runner_.VerifyHeap();
+ }
+ }
+
+ void UpdateByKey(const Key& existing_key, Value new_value,
+ PropertyDetails new_details) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, existing_key);
+ Handle<Object> value_handle = isolate->factory()->NewStringFromAsciiChecked(
+ new_value.c_str(), AllocationType::kYoung);
+
+ UpdateByKey(key_handle, value_handle, new_details);
+ }
+
+ void DeleteByKey(Handle<Name> key) {
+ InternalIndex entry = runner_.FindEntry(key);
+ CHECK(entry.is_found());
+ runner_.Delete(entry);
+
+ if (kVerifyAfterEachStep) {
+ runner_.VerifyHeap();
+ }
+ }
+
+ void DeleteByKey(const Key& existing_key) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, existing_key);
+
+ DeleteByKey(key_handle);
+ }
+
+ void CheckDataAtKey(Handle<Name> key, IndexOpt expected_index_opt,
+ base::Optional<Handle<Object>> expected_value_opt,
+ PropertyDetailsOpt expected_details_opt) {
+ InternalIndex actual_index = runner_.FindEntry(key);
+
+ if (expected_index_opt) {
+ CHECK_EQ(expected_index_opt.value(), actual_index);
+ }
+
+ if (actual_index.is_found()) {
+ Handle<FixedArray> data = runner_.GetData(actual_index);
+ CHECK_EQ(*key, data->get(0));
+
+ if (expected_value_opt) {
+ CHECK(expected_value_opt.value()->StrictEquals(data->get(1)));
+ }
+
+ if (expected_details_opt) {
+ CHECK_EQ(expected_details_opt.value().AsSmi(), data->get(2));
+ }
+ }
+ }
+
+ void CheckDataAtKey(const Key& expected_key, IndexOpt expected_index,
+ ValueOpt expected_value = kNoValue,
+ PropertyDetailsOpt expected_details = kNoDetails) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, expected_key);
+ base::Optional<Handle<Object>> value_handle_opt;
+ if (expected_value) {
+ value_handle_opt = isolate->factory()->NewStringFromAsciiChecked(
+ expected_value.value().c_str(), AllocationType::kYoung);
+ }
+
+ CheckDataAtKey(key_handle, expected_index, value_handle_opt,
+ expected_details);
+ }
+
+ void CheckKeyAbsent(Handle<Name> key) {
+ CHECK(runner_.FindEntry(key).is_not_found());
+ }
+
+ void CheckKeyAbsent(const Key& expected_key) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, expected_key);
+ CheckKeyAbsent(key_handle);
+ }
+
+ void CheckHasKey(const Key& expected_key) {
+ Handle<Name> key_handle = CreateKeyWithHash(isolate, keys_, expected_key);
+
+ CHECK(runner_.FindEntry(key_handle).is_found());
+ }
+
+ void CheckCounts(base::Optional<int> capacity,
+ base::Optional<int> elements = base::Optional<int>(),
+ base::Optional<int> deleted = base::Optional<int>()) {
+ runner_.CheckCounts(capacity, elements, deleted);
+ }
+
+ void CheckEnumerationOrder(const std::vector<std::string>& keys) {
+ runner_.CheckEnumerationOrder(keys);
+ }
+
+ void RehashInplace() { runner_.RehashInplace(); }
+
+ void Shrink() { runner_.Shrink(); }
+
+ void CheckCopy() { runner_.CheckCopy(); }
+
+ static constexpr bool IsRuntimeTest() {
+ return std::is_same<TestRunner, RuntimeTestRunner>::value;
+ }
+
+ void VerifyHeap() { runner_.VerifyHeap(); }
+
+ // Just for debugging
+ void Print() { runner_.PrintTable(); }
+
+ static std::vector<int> boundary_indices(int capacity) {
+ if (capacity == 4 && SwissNameDictionary::MaxUsableCapacity(4) < 4) {
+ // If we cannot put 4 entries in a capacity 4 table without resizing, just
+ // work with 3 boundary indices.
+ return {0, capacity - 2, capacity - 1};
+ }
+ return {0, 1, capacity - 2, capacity - 1};
+ }
+
+ // Contains all possible PropertyDetails suitable for storing in a
+ // SwissNameDictionary (i.e., PropertyDetails for dictionary mode objects
+ // without storing an enumeration index). Used to ensure that we can correctly
+ // store an retrieve all possible such PropertyDetails.
+ static const std::vector<PropertyDetails> distinct_property_details;
+
+ static void WithAllInterestingInitialCapacities(
+ std::function<void(TestSequence&)> manipulate_sequence) {
+ WithInitialCapacities(interesting_initial_capacities, manipulate_sequence);
+ }
+
+ static void WithInitialCapacity(
+ int capacity, std::function<void(TestSequence&)> manipulate_sequence) {
+ WithInitialCapacities({capacity}, manipulate_sequence);
+ }
+
+ // For each capacity in |capacities|, create a TestSequence and run the given
+ // function on it.
+ static void WithInitialCapacities(
+ const std::vector<int>& capacities,
+ std::function<void(TestSequence&)> manipulate_sequence) {
+ for (int capacity : capacities) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope{isolate};
+ TestSequence<TestRunner> s(isolate, capacity);
+ manipulate_sequence(s);
+ }
+ }
+
+ Isolate* const isolate;
+ const int initial_capacity;
+
+ private:
+ // Caches keys used in this TestSequence. See |create_key_with_hash| for
+ // details.
+ KeyCache keys_;
+ TestRunner runner_;
+};
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_CCTEST_TEST_SWISS_NAME_DICTIONARY_INFRA_H_
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h b/deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h
new file mode 100644
index 0000000000..96ad222b62
--- /dev/null
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-shared-tests.h
@@ -0,0 +1,942 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_CCTEST_TEST_SWISS_HASH_TABLE_SHARED_TESTS_H_
+#define V8_TEST_CCTEST_TEST_SWISS_HASH_TABLE_SHARED_TESTS_H_
+
+#include <algorithm>
+#include <string>
+
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+
+namespace v8 {
+namespace internal {
+namespace test_swiss_hash_table {
+
+// The name of the test-*.cc file that executes the tests below with the
+// RuntimeTestRunner.
+extern const char kRuntimeTestFileName[];
+
+// The name of the test-*.cc file that executes the tests below with the
+// CSATestRunner.
+extern const char kCSATestFileName[];
+
+// This class contains test cases for SwissNameDictionary that can be executed
+// by different "test runners", which are supplied as a template parameter. The
+// TestRunner determines how the operations on dictionaries are actually
+// executed. Currently there are two TestRunners: RuntimeTestRunner calls C++
+// functions, whereas CSATestRunner executes dictionary operations by executing
+// CSA-generated code.
+// To execute the tests, just create an instance of the class below with an
+// appropriate TestRunner.
+// Whenever creating an instance of this class in a file bar.cc, the template
+// parameter |kTestFileName| should be set to the name of the file that
+// *instantiates the class* (i.e., "bar.cc"). This ensures that the tests
+// defined below are then registred within the overall cctest machinery as if
+// they were directly written within bar.cc.
+template <typename TestRunner, char const* kTestFileName>
+struct SharedSwissTableTests {
+ STATIC_ASSERT((std::is_same<TestRunner, RuntimeTestRunner>::value) ||
+ (std::is_same<TestRunner, CSATestRunner>::value));
+
+ SharedSwissTableTests() {
+ CHECK(kTestFileName == kRuntimeTestFileName ||
+ kTestFileName == kCSATestFileName);
+ }
+
+ using TS = TestSequence<TestRunner>;
+
+ //
+ // Helpers
+ //
+
+ // We add this value when we want to create fake H1 values to prevent us from
+ // accidentally creating an overall hash of 0, which is forbidden. Due to all
+ // H1 values are used modulo the capacity of the table, this has no further
+ // effects. Note that using just this value itself as an H1 value means that a
+ // key will (try to) occupy bucket 0.
+ static const int kBigModulus = (1 << 22);
+ STATIC_ASSERT(SwissNameDictionary::IsValidCapacity(kBigModulus));
+
+ // Returns elements from TS::distinct_property_details in a determinstic
+ // order. Subsequent calls with increasing |index| (and the same |offset|)
+ // will return pairwise different values until |index| has risen by more than
+ // {TS::distinct_property_details.size()}.
+ static PropertyDetails distinct_details(int index, int offset = 0) {
+ int size = static_cast<int>(distinct_property_details.size());
+ return distinct_property_details[(index + offset) % size];
+ }
+
+ // Adds elements at the boundaries of the table, e.g. to buckets 0, 1,
+ // Capacity() - 2, and Capacity() - 1. (But only three of those if the table
+ // can't hold 4 elements without resizing).
+ static void AddAtBoundaries(TS& s) {
+ int capacity = s.initial_capacity;
+ std::vector<int> interesting_indices = s.boundary_indices(capacity);
+
+ s.CheckCounts(capacity, 0, 0);
+
+ int count = 0;
+ for (int index : interesting_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "v" + std::to_string(index);
+ PropertyDetails details = distinct_details(count++);
+ s.Add(Key{key, FakeH1{index + kBigModulus}}, value, details);
+ }
+
+ // We didn't want to cause a resize:
+ s.CheckCounts(capacity);
+ }
+
+ // Adds |count| entries to the table, using their unmodified hashes, of the
+ // form key_i -> (value_i, details_i), where key_i and value_i are build from
+ // appending the actual index (e.g., 0, ...., counts - 1) to |key_prefix| and
+ // |value_prefix|, respectively. The property details are taken from
+ // |distinct_property_details|.
+ static void AddMultiple(TS& s, int count, std::string key_prefix = "key",
+ std::string value_prefix = "value",
+ int details_offset = 0) {
+ for (int i = 0; i < count; ++i) {
+ std::string key = key_prefix + std::to_string(i);
+ std::string value = value_prefix + std::to_string(i);
+ PropertyDetails d = distinct_details(i);
+ s.Add(Key{key}, value, d);
+ }
+ }
+
+ // Checks that |count| entries exist, as they would have been added by a call
+ // to AddMultiple with the same arguments.
+ static void CheckMultiple(TS& s, int count, std::string key_prefix = "key",
+ std::string value_prefix = "value",
+ int details_offset = 0) {
+ DCHECK_LE(count,
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity));
+
+ std::vector<std::string> expected_keys;
+ for (int i = 0; i < count; ++i) {
+ std::string key = key_prefix + std::to_string(i);
+ expected_keys.push_back(key);
+ std::string value = value_prefix + std::to_string(i);
+ int details_index =
+ (details_offset + i) % distinct_property_details.size();
+ PropertyDetails d = distinct_property_details[details_index];
+ s.CheckDataAtKey(Key{key}, kIndexUnknown, value, d);
+ }
+ s.CheckEnumerationOrder(expected_keys);
+ }
+
+ //
+ // Start of actual tests.
+ //
+
+ MEMBER_TEST(Allocation) {
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ // The test runner does the allocation automatically.
+ s.CheckCounts(s.initial_capacity, 0, 0);
+ s.VerifyHeap();
+ });
+ }
+
+ // Simple test for adding entries. Also uses non-Symbol keys and non-String
+ // values, which is not supported by the higher-level testing infrastructure.
+ MEMBER_TEST(SimpleAdd) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ Handle<String> key1 = s.isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value1 =
+ s.isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details1 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.CheckCounts(4, 0, 0);
+ s.CheckKeyAbsent(key1);
+
+ s.Add(key1, value1, details1);
+ s.CheckDataAtKey(key1, kIndexUnknown, value1, details1);
+ s.CheckCounts(4, 1, 0);
+
+ Handle<Symbol> key2 = s.isolate->factory()->NewSymbol();
+ Handle<Smi> value2 = handle(Smi::FromInt(123), s.isolate);
+ PropertyDetails details2 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.CheckKeyAbsent(key2);
+ s.Add(key2, value2, details2);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2, details2);
+ s.CheckCounts(4, 2, 0);
+ });
+ }
+
+ // Simple test for updating existing entries. Also uses non-Symbol keys and
+ // non-String values, which is not supported by the higher-level testing
+ // infrastructure.
+ MEMBER_TEST(SimpleUpdate) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ Handle<String> key1 = s.isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value1 =
+ s.isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details1 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key1, value1, details1);
+
+ Handle<Symbol> key2 = s.isolate->factory()->NewSymbol();
+ Handle<Smi> value2 = handle(Smi::FromInt(123), s.isolate);
+ PropertyDetails details2 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key2, value2, details2);
+
+ // Until here same operations as in Test "Add".
+
+ Handle<Smi> value1_updated = handle(Smi::FromInt(456), s.isolate);
+ Handle<String> value2_updated =
+ s.isolate->factory()->InternalizeUtf8String("updated");
+ PropertyDetails details1_updated = details2;
+ PropertyDetails details2_updated = details1;
+
+ s.UpdateByKey(key1, value1_updated, details1_updated);
+ s.CheckDataAtKey(key1, kIndexUnknown, value1_updated, details1_updated);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2, details2);
+
+ s.UpdateByKey(key2, value2_updated, details2_updated);
+ s.CheckDataAtKey(key1, kIndexUnknown, value1_updated, details1_updated);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2_updated, details2_updated);
+ s.CheckCounts(4, 2, 0);
+ });
+ }
+
+ // Simple test for deleting existing entries. Also uses non-Symbol keys and
+ // non-String values, which is not supported by the higher-level testing
+ // infrastructure.
+ MEMBER_TEST(SimpleDelete) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ Handle<String> key1 = s.isolate->factory()->InternalizeUtf8String("foo");
+ Handle<String> value1 =
+ s.isolate->factory()->InternalizeUtf8String("bar");
+ PropertyDetails details1 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key1, value1, details1);
+
+ Handle<Symbol> key2 = s.isolate->factory()->NewSymbol();
+ Handle<Smi> value2 = handle(Smi::FromInt(123), s.isolate);
+ PropertyDetails details2 =
+ PropertyDetails(PropertyKind::kData, PropertyAttributes::DONT_DELETE,
+ PropertyCellType::kNoCell);
+
+ s.Add(key2, value2, details2);
+
+ // Until here same operations as in Test "Add".
+
+ s.DeleteByKey(key1);
+ s.CheckKeyAbsent(key1);
+ s.CheckDataAtKey(key2, kIndexUnknown, value2, details2);
+ s.CheckCounts(4, 1, 1);
+
+ s.DeleteByKey(key2);
+ s.CheckKeyAbsent(key1);
+ s.CheckKeyAbsent(key2);
+ s.CheckCounts(4, 0, 0);
+ });
+ }
+
+ // Adds entries that occuppy the boundaries (first and last
+ // buckets) of the hash table.
+ MEMBER_TEST(AddAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ AddAtBoundaries(s);
+
+ int capacity = s.initial_capacity;
+
+ std::vector<int> boundary_indices = s.boundary_indices(capacity);
+ int size = static_cast<int>(boundary_indices.size());
+
+ int count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "v" + std::to_string(index);
+ PropertyDetails details = distinct_details(count++);
+
+ s.CheckDataAtKey(Key{key, FakeH1{index + kBigModulus}},
+ InternalIndex(index), value, details);
+ }
+ s.CheckCounts(capacity, size, 0);
+ });
+ }
+
+ // Adds entries that occuppy the boundaries of the hash table, then updates
+ // their values and property details.
+ MEMBER_TEST(UpdateAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ AddAtBoundaries(s);
+
+ int capacity = s.initial_capacity;
+
+ std::vector<int> boundary_indices = s.boundary_indices(capacity);
+ int size = static_cast<int>(boundary_indices.size());
+
+ int count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "newv" + std::to_string(index);
+ // setting offset means getting other PropertyDetails than before
+ PropertyDetails details = distinct_details(count++, size);
+
+ s.UpdateByKey(Key{key, FakeH1{index + kBigModulus}}, value, details);
+ }
+
+ count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "newv" + std::to_string(index);
+ PropertyDetails details = distinct_details(count++, size);
+
+ s.CheckDataAtKey(Key{key, FakeH1{index + kBigModulus}},
+ InternalIndex(index), value, details);
+ }
+ });
+ }
+
+ // Adds entries that occuppy the boundaries of the hash table, then updates
+ // their values and property details.
+ MEMBER_TEST(DeleteAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ // The maximum value of {TS::boundary_indices(capacity).size()} for any
+ // |capacity|.
+ int count = 4;
+
+ // Due to shrink-on-delete, we create a new dictionary prior to each
+ // deletion, so that we don't re-hash (which would defeat the purpose of
+ // this test).
+ for (int i = 0; i < count; ++i) {
+ // In this iteration, we delete the i-th element of |boundary_indices|.
+
+ TS::WithAllInterestingInitialCapacities([&](TS& s) {
+ std::vector<int> boundary_indices =
+ TS::boundary_indices(s.initial_capacity);
+ int number_of_entries = static_cast<int>(boundary_indices.size());
+ DCHECK_GE(count, number_of_entries);
+
+ if (i >= static_cast<int>(boundary_indices.size())) {
+ // Nothing to do.
+ return;
+ }
+
+ AddAtBoundaries(s);
+
+ int entry_to_delete = boundary_indices[i];
+ int h1 = entry_to_delete + kBigModulus;
+
+ // We know that the key in question was added at bucket
+ // |entry_to_delete| by AddAtBoundaries.
+ Key key = Key{"k" + std::to_string(entry_to_delete), FakeH1{h1}};
+ s.DeleteByKey(key);
+ s.CheckKeyAbsent(key);
+
+ // Account for the fact that a shrink-on-delete may have happened.
+ int expected_capacity = number_of_entries - 1 < s.initial_capacity / 4
+ ? s.initial_capacity / 2
+ : s.initial_capacity;
+ s.CheckCounts(expected_capacity, number_of_entries - 1);
+ });
+ }
+ }
+
+ // Adds entries that occuppy the boundaries of the hash table, then add
+ // further entries targeting the same buckets.
+ MEMBER_TEST(OverwritePresentAtBoundaries) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithAllInterestingInitialCapacities([](TS& s) {
+ AddAtBoundaries(s);
+
+ int capacity = s.initial_capacity;
+
+ std::vector<int> boundary_indices = s.boundary_indices(capacity);
+
+ std::vector<std::string> keys, values;
+ std::vector<PropertyDetails> details;
+
+ int count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "additional_k" + std::to_string(index);
+ std::string value = "additional_v" + std::to_string(index);
+
+ PropertyDetails d = distinct_details(count++);
+ keys.push_back(key);
+ values.push_back(value);
+ details.push_back(d);
+ s.Add(Key{key, FakeH1{index + kBigModulus}}, value, d);
+ }
+
+ count = 0;
+ for (int entry : boundary_indices) {
+ std::string key = keys[count];
+ std::string value = values[count];
+ PropertyDetails d = details[count];
+
+ // We don't know the indices where the new entries will land.
+ s.CheckDataAtKey(Key{key, FakeH1{entry + kBigModulus}},
+ base::Optional<InternalIndex>(), value, d);
+ count++;
+ }
+
+ // The entries added by AddAtBoundaries must also still be there, at their
+ // original indices.
+ count = 0;
+ for (int index : boundary_indices) {
+ std::string key = "k" + std::to_string(index);
+ std::string value = "v" + std::to_string(index);
+ PropertyDetails details = distinct_property_details.at(count++);
+ s.CheckDataAtKey(Key{key, FakeH1{index + kBigModulus}},
+ InternalIndex(index), value, details);
+ }
+ });
+ }
+
+ MEMBER_TEST(Empty) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacities({0}, [](TS& s) {
+ // FindEntry on empty table succeeds.
+ s.CheckKeyAbsent(Key{"some non-existing key"});
+ });
+
+ TS::WithInitialCapacities({0}, [](TS& s) {
+ PropertyDetails d = PropertyDetails::Empty();
+
+ // Adding to empty table causes resize.
+ s.Add(Key{"some key"}, "some value", d);
+ s.CheckDataAtKey(Key{"some key"}, kIndexUnknown, "some value", d);
+
+ s.CheckCounts(SwissNameDictionary::kInitialCapacity, 1, 0);
+ });
+
+ TS::WithInitialCapacity(0, [](TS& s) { s.CheckEnumerationOrder({}); });
+
+ // Inplace rehashing and shrinking don't have CSA versions.
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacity(0, [](TS& s) {
+ s.RehashInplace();
+ s.CheckCounts(0, 0, 0);
+ s.VerifyHeap();
+ });
+
+ TS::WithInitialCapacity(0, [](TS& s) {
+ s.Shrink();
+ s.CheckCounts(0, 0, 0);
+ s.VerifyHeap();
+ });
+ }
+ }
+
+ // We test that hash tables get resized/rehashed correctly by repeatedly
+ // adding an deleting elements.
+ MEMBER_TEST(Resize1) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(0, [](TS& s) {
+ // Should be at least 8 so that we capture the transition from 8 bit to 16
+ // bit meta table entries:
+ const int max_exponent = 9;
+
+ // For all |exponent| between 0 and |max_exponent|, we add 2^|exponent|
+ // entries, and then delete every second one of those. Note that we do
+ // this all on a single table, meaning that the entries from the previous
+ // value of |exponent| are still present.
+ int added = 0;
+ int deleted = 0;
+ int offset = 0;
+ for (int exponent = 0; exponent <= max_exponent; ++exponent) {
+ int count = 1 << exponent;
+ for (int i = 0; i < count; ++i) {
+ std::string key = "key" + std::to_string(offset + i);
+ std::string value = "value" + std::to_string(offset + i);
+
+ s.Add(Key{key}, value, distinct_details(i, offset));
+ ++added;
+ }
+ for (int i = 0; i < count; i += 2) {
+ if (offset + i == 0) {
+ continue;
+ }
+ std::string key = "key" + std::to_string(offset + i);
+ s.DeleteByKey(Key{key});
+ ++deleted;
+ }
+
+ s.CheckCounts(kNoInt, added - deleted, kNoInt);
+ offset += count;
+ }
+
+ // Some internal consistency checks on the test itself:
+ DCHECK_EQ((1 << (max_exponent + 1)) - 1, offset);
+ DCHECK_EQ(offset, added);
+ DCHECK_EQ(offset / 2, deleted);
+
+ // Check that those entries that we expect are indeed present.
+ for (int i = 0; i < offset; i += 2) {
+ std::string key = "key" + std::to_string(i);
+ std::string value = "value" + std::to_string(i);
+
+ s.CheckDataAtKey(Key{key}, kIndexUnknown, value, distinct_details(i));
+ }
+ s.VerifyHeap();
+ });
+ }
+
+ // Check that we resize exactly when expected.
+ MEMBER_TEST(Resize2) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacities({4, 8, 16, 128}, [](TS& s) {
+ int count = SwissNameDictionary::MaxUsableCapacity(s.initial_capacity);
+
+ AddMultiple(s, count, "resize2");
+
+ // No resize:
+ s.CheckCounts(s.initial_capacity, count, 0);
+
+ s.Add(Key{"key causing resize"});
+ s.CheckCounts(2 * s.initial_capacity, count + 1, 0);
+ });
+ }
+
+ // There are certain capacities where we can fill every single bucket of the
+ // table before resizing (i.e., the max load factor is 100% for those
+ // particular configurations. Test that this works as intended.
+ MEMBER_TEST(AtFullCapacity) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ // Determine those capacities, allowing 100% max load factor. We trust
+ // MaxUsableCapacity to tell us which capacities that are (e.g., 4 and 8),
+ // because we tested that function separately elsewhere.
+ std::vector<int> capacities_allowing_full_utilization;
+ for (int c = SwissNameDictionary::kInitialCapacity;
+ c <= static_cast<int>(SwissNameDictionary::kGroupWidth); c *= 2) {
+ if (SwissNameDictionary::MaxUsableCapacity(c) == c) {
+ capacities_allowing_full_utilization.push_back(c);
+ }
+ }
+
+ DCHECK_IMPLIES(SwissNameDictionary::kGroupWidth == 16,
+ capacities_allowing_full_utilization.size() > 0);
+
+ TS::WithInitialCapacities(capacities_allowing_full_utilization, [](TS& s) {
+ AddMultiple(s, s.initial_capacity, "k_full_capacity", "v_full_capacity");
+
+ // No resize must have happened.
+ s.CheckCounts(s.initial_capacity, s.initial_capacity, 0);
+
+ CheckMultiple(s, s.initial_capacity, "k_full_capacity",
+ "v_full_capacity");
+
+ // Must make sure that the first |SwissNameDictionary::kGroupWidth|
+ // entries of the ctrl table contain a kEmpty, so that an unsuccessful
+ // search stop, instead of going into an infinite loop. Therefore, search
+ // for a fake key whose H1 is 0, making us start from ctrl table bucket 0.
+ s.CheckKeyAbsent(Key{"non_existing_key", FakeH1{0}, FakeH2{1}});
+ });
+ }
+
+ MEMBER_TEST(EnumerationOrder) {
+ // TODO(v8:11330) Disabling this for now until the real CSA testing has
+ // landed.
+ if (true) return;
+
+ // This test times out on sanitizer builds in CSA mode when testing the
+ // larger capacities.
+ // TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run
+ // by the test suite, which will speed things up.
+ std::vector<int> capacities_to_test =
+ TS::IsRuntimeTest() ? interesting_initial_capacities
+ : capacities_for_slow_sanitizer_tests;
+
+ TS::WithInitialCapacities(capacities_to_test, [](TS& s) {
+ std::vector<std::string> expected_keys;
+ int count = std::min(
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity), 1000);
+
+ for (int i = 0; i < count; ++i) {
+ std::string key = "enumkey" + std::to_string(i);
+ expected_keys.push_back(key);
+ s.Add(Key{key});
+ }
+ s.CheckEnumerationOrder(expected_keys);
+
+ // Delete some entries.
+
+ std::string last_key = "enumkey" + std::to_string(count - 1);
+ s.DeleteByKey(Key{"enumkey0"});
+ s.DeleteByKey(Key{"enumkey1"});
+ s.DeleteByKey(Key{last_key});
+
+ auto should_be_deleted = [&](const std::string& k) -> bool {
+ return k == "enumkey0" || k == "enumkey1" || k == last_key;
+ };
+ expected_keys.erase(
+ std::remove_if(expected_keys.begin(), expected_keys.end(),
+ should_be_deleted),
+ expected_keys.end());
+ DCHECK_EQ(expected_keys.size(), count - 3);
+
+ s.CheckEnumerationOrder(expected_keys);
+
+ if (s.initial_capacity <= 1024) {
+ // Now cause a resize. Doing + 4 on top of the maximum usable capacity
+ // rather than just + 1 because in the case where the initial capacity
+ // is 4 and the group size is 8, the three deletes above caused a
+ // shrink, which in this case was just a rehash. So we need to add 4
+ // elements to cause a resize.
+ int resize_at =
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity) + 4;
+
+ for (int i = count; i < resize_at; ++i) {
+ std::string key = "enumkey" + std::to_string(i);
+ expected_keys.push_back(key);
+ s.Add(Key{key});
+ }
+ s.CheckCounts(2 * s.initial_capacity);
+ s.CheckEnumerationOrder(expected_keys);
+ }
+ });
+ }
+
+ // Make sure that keys with colliding H1 and same H2 don't get mixed up.
+ MEMBER_TEST(SameH2) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ int i = 0;
+ TS::WithAllInterestingInitialCapacities([&](TS& s) {
+ // Let's try a few differnet values for h1, starting at big_modulus;.
+ int first_h1 = i * 13 + kBigModulus;
+ int second_h1 = first_h1 + s.initial_capacity;
+
+ int first_entry = first_h1 % s.initial_capacity;
+ int second_entry = (first_h1 + 1) % s.initial_capacity;
+
+ // Add two keys with same H1 modulo capacity and same H2.
+ Key k1{"first_key", FakeH1{first_h1}, FakeH2{42}};
+ Key k2{"second_key", FakeH1{second_h1}, FakeH2{42}};
+
+ s.Add(k1, "v1");
+ s.Add(k2, "v2");
+
+ s.CheckDataAtKey(k1, InternalIndex(first_entry), "v1");
+ s.CheckDataAtKey(k2, InternalIndex(second_entry), "v2");
+
+ // Deletion works, too.
+ s.DeleteByKey(k2);
+ s.CheckHasKey(k1);
+ s.CheckKeyAbsent(k2);
+
+ ++i;
+ });
+ }
+
+ // Check that we can delete a key and add it again.
+ MEMBER_TEST(ReAddSameKey) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(4, [](TS& s) {
+ s.Add(Key{"some_key"}, "some_value", distinct_details(0));
+ s.DeleteByKey(Key{"some_key"});
+ s.Add(Key{"some_key"}, "new_value", distinct_details(1));
+ s.CheckDataAtKey(Key{"some_key"}, kIndexUnknown, "new_value",
+ distinct_details(1));
+ s.CheckEnumerationOrder({"some_key"});
+ });
+ }
+
+ // Make sure that we continue probing if there is no match in the first
+ // group and that the quadratic probing for choosing subsequent groups to
+ // probe works as intended.
+ MEMBER_TEST(BeyondInitialGroup) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(128, [](TS& s) {
+ int h1 = 33; // Arbitrarily chosen.
+ int count = 37; // Will lead to more than 2 groups being filled.
+
+ for (int i = 0; i < count; ++i) {
+ std::string key = "key" + std::to_string(i);
+ std::string value = "value" + std::to_string(i);
+
+ s.Add(Key{key, FakeH1{h1}}, value);
+ }
+
+ s.CheckDataAtKey(Key{"key36", FakeH1{h1}}, kIndexUnknown, "value36");
+
+ // Deleting something shouldn't disturb further additions.
+ s.DeleteByKey(Key{"key14", FakeH1{h1}});
+ s.DeleteByKey(Key{"key15", FakeH1{h1}});
+ s.DeleteByKey(Key{"key16", FakeH1{h1}});
+ s.DeleteByKey(Key{"key17", FakeH1{h1}});
+
+ s.Add(Key{"key37", FakeH1{h1}}, "value37");
+ s.CheckDataAtKey(Key{"key37", FakeH1{h1}}, kIndexUnknown, "value37");
+ });
+ }
+
+ // Check that we correclty "wrap around" when probing the control table. This
+ // means that when we probe a group starting at a bucket such that there are
+ // fewer than kGroupWidth bucktets before the end of the control table, we
+ // (logically) continue at bucket 0. Note that actually, we use the copy of
+ // first group at the end of the control table.
+ MEMBER_TEST(WrapAround) {
+ // TODO(v8:11330) Disabling this for now until the real CSA testing has
+ // landed.
+ if (true) {
+ return;
+ }
+
+ // This test times out in CSA mode when testing the larger capacities.
+ // TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run
+ // by the test suite, which will speed things up.
+ std::vector<int> capacities_to_test = TS::IsRuntimeTest()
+ ? interesting_initial_capacities
+ : capacities_for_slow_debug_tests;
+
+ int width = SwissNameDictionary::kGroupWidth;
+ for (int offset_from_end = 0; offset_from_end < width; ++offset_from_end) {
+ TS::WithInitialCapacities(capacities_to_test, [&](TS& s) {
+ int capacity = s.initial_capacity;
+ int first_bucket = capacity - offset_from_end;
+
+ // How many entries to add (carefully chosen not to cause a resize).
+ int filler_entries =
+ std::min(width, SwissNameDictionary::MaxUsableCapacity(capacity)) -
+ 1;
+
+ if (first_bucket < 0 ||
+ // No wraparound in this case:
+ first_bucket + filler_entries < capacity) {
+ return;
+ }
+
+ // Starting at bucket |first_bucket|, add a sequence of |kGroupWitdth|
+ // - 1 (if table can take that many, see calculation of |filler_entries|
+ // above) entries in a single collision chain.
+ for (int f = 0; f < filler_entries; ++f) {
+ std::string key = "filler" + std::to_string(f);
+ s.Add(Key{key, FakeH1{first_bucket}});
+ }
+
+ // ... then add a final key which (unless table too small) will end up
+ // in the last bucket belonging to the group started at |first_bucket|.
+ // Check that we can indeed find it.
+ s.Add(Key{"final_key", FakeH1{first_bucket}});
+ s.CheckDataAtKey(Key{"final_key", FakeH1{first_bucket}},
+ InternalIndex(filler_entries - offset_from_end));
+
+ // + 1 due to the final key.
+ s.CheckCounts(s.initial_capacity, filler_entries + 1, 0);
+
+ // Now delete the entries in between and make sure that this
+ // doesn't break anything.
+ for (int f = 0; f < filler_entries; ++f) {
+ std::string key = "filler" + std::to_string(f);
+ s.DeleteByKey(Key{key, FakeH1{first_bucket}});
+ }
+
+ s.CheckHasKey(Key{"final_key", FakeH1{first_bucket}});
+ });
+ }
+ }
+
+ MEMBER_TEST(RehashInplace) {
+ // This test may fully fill the table and hardly depends on the underlying
+ // shape (e.g., meta table structure). Thus not testing overly large
+ // capacities.
+ std::vector<int> capacities_to_test = {4, 8, 16, 128, 1024};
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacities(capacities_to_test, [](TS& s) {
+ if (s.initial_capacity <= 8) {
+ // Add 3 elements, which will not cause a resize. Then delete the
+ // first key before rehasing.
+
+ AddMultiple(s, 3);
+ s.DeleteByKey(Key{"key0"});
+
+ // We shouldn't have done a resize on deletion or addition:
+ s.CheckCounts(s.initial_capacity, 2, 1);
+
+ s.RehashInplace();
+
+ s.CheckDataAtKey(Key{"key1"}, kIndexUnknown, "value1");
+ s.CheckDataAtKey(Key{"key2"}, kIndexUnknown, "value2");
+ s.CheckEnumerationOrder({"key1", "key2"});
+ } else {
+ int count =
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity) - 5;
+ AddMultiple(s, count);
+
+ s.DeleteByKey(Key{"key1"});
+ s.DeleteByKey(Key{"key2"});
+ s.DeleteByKey(Key{"key" + std::to_string(count - 1)});
+
+ // We shouldn't have done a resize on deletion or addition:
+ s.CheckCounts(s.initial_capacity, count - 3, 3);
+
+ s.RehashInplace();
+
+ std::vector<std::string> expected_enum_order;
+ for (int i = 0; i < count; ++i) {
+ if (i == 1 || i == 2 || i == count - 1) {
+ // These are the keys we deleted.
+ continue;
+ }
+
+ std::string key = "key" + std::to_string(i);
+ PropertyDetails d =
+ distinct_property_details[i % distinct_property_details.size()];
+ s.CheckDataAtKey(Key{key}, kIndexUnknown,
+ "value" + std::to_string(i), d);
+
+ expected_enum_order.push_back(key);
+ }
+
+ s.CheckEnumerationOrder(expected_enum_order);
+ }
+ });
+ }
+ }
+
+ MEMBER_TEST(Shrink) {
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacity(32, [&](TS& s) {
+ // Filling less than a forth of the table:
+ int count = 4;
+
+ AddMultiple(s, count);
+
+ s.Shrink();
+
+ CheckMultiple(s, count, "key", "value", 0);
+
+ // Shrink doesn't shrink to fit, but only halves the capacity.
+ int expected_capacity = s.initial_capacity / 2;
+ s.CheckCounts(expected_capacity, 4, 0);
+
+ s.CheckEnumerationOrder({"key0", "key1", "key2", "key3"});
+ s.VerifyHeap();
+ });
+ }
+ }
+
+ MEMBER_TEST(ShrinkToInitial) {
+ // When shrinking, we never go below SwissNameDictionary::kInitialCapacity.
+ if (TS::IsRuntimeTest()) {
+ TS::WithInitialCapacity(8, [&](TS& s) {
+ s.Shrink();
+
+ s.CheckCounts(SwissNameDictionary::kInitialCapacity, 0, 0);
+ });
+ }
+ }
+
+ MEMBER_TEST(ShrinkOnDelete) {
+ // TODO(v8:11330): Remove once CSA implementation has a fallback for
+ // non-SSSE3/AVX configurations.
+ if (!TestRunner::IsEnabled()) return;
+ TS::WithInitialCapacity(32, [](TS& s) {
+ // Adds key0 ... key9:
+ AddMultiple(s, 10);
+
+ // We remove some entries. Each time less than a forth of the table is
+ // used by present entries, it's shrunk to half.
+
+ s.DeleteByKey(Key{"key9"});
+ s.DeleteByKey(Key{"key8"});
+
+ s.CheckCounts(32, 8, 2);
+
+ s.DeleteByKey(Key{"key7"});
+
+ // Deleted count is 0 after rehash.
+ s.CheckCounts(16, 7, 0);
+ });
+ }
+
+ MEMBER_TEST(Copy) {
+ // TODO(v8:11330) Disabling this for now until the real CSA testing has
+ // landed.
+ if (true) return;
+
+ // This test times out on sanitizer builds in CSA mode when testing the
+ // larger capacities.
+ // TODO(v8:11330) Revisit this once the actual CSA/Torque versions are run
+ // by the test suite, which will speed things up.
+ std::vector<int> capacities_to_test =
+ TS::IsRuntimeTest() ? interesting_initial_capacities
+ : capacities_for_slow_sanitizer_tests;
+ TS::WithInitialCapacities(capacities_to_test, [](TS& s) {
+ int fill = std::min(
+ 1000,
+ // -2 due to the two manually added keys below.
+ SwissNameDictionary::MaxUsableCapacity(s.initial_capacity) - 2);
+ AddMultiple(s, fill);
+
+ // Occupy first and last bucket (another key may occuppy these already,
+ // but let's don't bother with that):
+ s.Add(Key{"first_bucket_key", FakeH1{kBigModulus}});
+ s.Add(Key{"last_bucket_key", FakeH1{s.initial_capacity - 1}});
+
+ // We shouldn't have caused a resize.
+ s.CheckCounts(s.initial_capacity);
+
+ // Creates a copy and compares it against the original. In order to check
+ // copying of large dictionary, need to check before deletion due to
+ // shrink-on-delete kicking in.
+ s.CheckCopy();
+
+ // Let's delete a few entries, most notably the first and last two in enum
+ // order and the keys (potentially) occupying the first and last bucket.
+ s.DeleteByKey(Key{"key0"});
+ if (fill > 1) {
+ s.DeleteByKey(Key{"key1"});
+ }
+ s.DeleteByKey(Key{"first_bucket_key", FakeH1{kBigModulus}});
+ s.DeleteByKey(Key{"last_bucket_key", FakeH1{s.initial_capacity - 1}});
+
+ s.CheckCopy();
+ });
+ }
+};
+
+} // namespace test_swiss_hash_table
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_CCTEST_TEST_SWISS_HASH_TABLE_SHARED_TESTS_H_
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary.cc b/deps/v8/test/cctest/test-swiss-name-dictionary.cc
index e274eed358..0aabd5981d 100644
--- a/deps/v8/test/cctest/test-swiss-name-dictionary.cc
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary.cc
@@ -4,11 +4,153 @@
#include "src/objects/swiss-name-dictionary-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/test-swiss-name-dictionary-infra.h"
+#include "test/cctest/test-swiss-name-dictionary-shared-tests.h"
namespace v8 {
namespace internal {
namespace test_swiss_hash_table {
+// Executes tests by executing C++ versions of dictionary operations.
+class RuntimeTestRunner {
+ public:
+ RuntimeTestRunner(Isolate* isolate, int initial_capacity, KeyCache& keys)
+ : isolate_{isolate}, keys_{keys} {
+ table = isolate->factory()->NewSwissNameDictionaryWithCapacity(
+ initial_capacity, AllocationType::kYoung);
+ }
+
+ // The runtime implementations does not depend on the CPU features and
+ // therefore always work.
+ static bool IsEnabled() { return true; }
+
+ void Add(Handle<Name> key, Handle<Object> value, PropertyDetails details);
+ InternalIndex FindEntry(Handle<Name> key);
+ // Updates the value and property details of the given entry.
+ void Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details);
+ void Delete(InternalIndex entry);
+ void RehashInplace();
+ void Shrink();
+
+ // Retrieves data associated with |entry|, which must be an index pointing to
+ // an existing entry. The returned array contains key, value, property details
+ // in that order.
+ Handle<FixedArray> GetData(InternalIndex entry);
+
+ // Tests that the current table has the given capacity, and number of
+ // (deleted) elements, based on which optional values are present.
+ void CheckCounts(base::Optional<int> capacity, base::Optional<int> elements,
+ base::Optional<int> deleted);
+ // Checks that |expected_keys| contains exactly the keys in the current table,
+ // in the given order.
+ void CheckEnumerationOrder(const std::vector<std::string>& expected_keys);
+ void CheckCopy();
+ void VerifyHeap();
+
+ // Just for debugging.
+ void PrintTable();
+
+ Handle<SwissNameDictionary> table;
+
+ private:
+ Isolate* isolate_;
+ KeyCache& keys_;
+};
+
+void RuntimeTestRunner::Add(Handle<Name> key, Handle<Object> value,
+ PropertyDetails details) {
+ Handle<SwissNameDictionary> updated_table =
+ SwissNameDictionary::Add(isolate_, this->table, key, value, details);
+ this->table = updated_table;
+}
+
+InternalIndex RuntimeTestRunner::FindEntry(Handle<Name> key) {
+ return table->FindEntry(isolate_, key);
+}
+
+Handle<FixedArray> RuntimeTestRunner::GetData(InternalIndex entry) {
+ if (entry.is_found()) {
+ Handle<FixedArray> data = isolate_->factory()->NewFixedArray(3);
+ data->set(0, table->KeyAt(entry));
+ data->set(1, table->ValueAt(entry));
+ data->set(2, table->DetailsAt(entry).AsSmi());
+ return data;
+ } else {
+ return handle(ReadOnlyRoots(isolate_).empty_fixed_array(), isolate_);
+ }
+}
+
+void RuntimeTestRunner::Put(InternalIndex entry, Handle<Object> new_value,
+ PropertyDetails new_details) {
+ CHECK(entry.is_found());
+
+ table->ValueAtPut(entry, *new_value);
+ table->DetailsAtPut(entry, new_details);
+}
+
+void RuntimeTestRunner::Delete(InternalIndex entry) {
+ CHECK(entry.is_found());
+ table = table->DeleteEntry(isolate_, table, entry);
+}
+
+void RuntimeTestRunner::CheckCounts(base::Optional<int> capacity,
+ base::Optional<int> elements,
+ base::Optional<int> deleted) {
+ if (capacity.has_value()) {
+ CHECK_EQ(capacity.value(), table->Capacity());
+ }
+ if (elements.has_value()) {
+ CHECK_EQ(elements.value(), table->NumberOfElements());
+ }
+ if (deleted.has_value()) {
+ CHECK_EQ(deleted.value(), table->NumberOfDeletedElements());
+ }
+}
+
+void RuntimeTestRunner::CheckEnumerationOrder(
+ const std::vector<std::string>& expected_keys) {
+ ReadOnlyRoots roots(isolate_);
+ int i = 0;
+ for (InternalIndex index : table->IterateEntriesOrdered()) {
+ Object key;
+ if (table->ToKey(roots, index, &key)) {
+ CHECK_LT(i, expected_keys.size());
+ Handle<Name> expected_key =
+ CreateKeyWithHash(isolate_, this->keys_, Key{expected_keys[i]});
+
+ CHECK_EQ(key, *expected_key);
+ ++i;
+ }
+ }
+ CHECK_EQ(i, expected_keys.size());
+}
+
+void RuntimeTestRunner::RehashInplace() { table->Rehash(isolate_); }
+
+void RuntimeTestRunner::Shrink() {
+ table = SwissNameDictionary::Shrink(isolate_, table);
+}
+
+void RuntimeTestRunner::CheckCopy() {
+ Handle<SwissNameDictionary> copy =
+ SwissNameDictionary::ShallowCopy(isolate_, table);
+
+ CHECK(table->EqualsForTesting(*copy));
+}
+
+void RuntimeTestRunner::VerifyHeap() {
+#if VERIFY_HEAP
+ table->SwissNameDictionaryVerify(isolate_, true);
+#endif
+}
+
+void RuntimeTestRunner::PrintTable() {
+#ifdef OBJECT_PRINT
+ table->SwissNameDictionaryPrint(std::cout);
+#endif
+}
+
TEST(CapacityFor) {
for (int elements = 0; elements <= 32; elements++) {
int capacity = SwissNameDictionary::CapacityFor(elements);
@@ -76,6 +218,14 @@ TEST(SizeFor) {
CHECK_EQ(SwissNameDictionary::SizeFor(8), size_8);
}
+// Executes the tests defined in test-swiss-name-dictionary-shared-tests.h as if
+// they were defined in this file, using the RuntimeTestRunner. See comments in
+// test-swiss-name-dictionary-shared-tests.h and in
+// swiss-name-dictionary-infra.h for details.
+const char kRuntimeTestFileName[] = __FILE__;
+SharedSwissTableTests<RuntimeTestRunner, kRuntimeTestFileName>
+ execute_shared_tests_runtime;
+
} // namespace test_swiss_hash_table
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index 867d0f90b9..0134befedd 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -71,30 +71,6 @@ TEST(CopyContentsView) {
TestArrayBufferViewContents(&env, true);
}
-
-TEST(AllocateNotExternal) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- void* memory = reinterpret_cast<Isolate*>(env->GetIsolate())
- ->array_buffer_allocator()
- ->Allocate(1024);
-
-// Keep the test until the functions are removed.
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- v8::Local<v8::ArrayBuffer> buffer =
- v8::ArrayBuffer::New(env->GetIsolate(), memory, 1024,
- v8::ArrayBufferCreationMode::kInternalized);
- CHECK(!buffer->IsExternal());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
-
- CHECK_EQ(memory, buffer->GetBackingStore()->Data());
-}
-
void TestSpeciesProtector(char* code,
bool invalidates_species_protector = true) {
v8::Isolate::CreateParams create_params;
diff --git a/deps/v8/test/cctest/test-verifiers.cc b/deps/v8/test/cctest/test-verifiers.cc
index 80e2517cd2..8e393ae163 100644
--- a/deps/v8/test/cctest/test-verifiers.cc
+++ b/deps/v8/test/cctest/test-verifiers.cc
@@ -70,7 +70,7 @@ TEST_PAIR(TestWrongStrongTypeInIndexedStructField) {
v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
Handle<Object> o = v8::Utils::OpenHandle(*v);
Handle<Map> map(Handle<HeapObject>::cast(o)->map(), i_isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(i_isolate),
i_isolate);
int offset = DescriptorArray::OffsetOfDescriptorAt(1) +
DescriptorArray::kEntryKeyOffset;
@@ -102,7 +102,7 @@ TEST_PAIR(TestWrongWeakTypeInIndexedStructField) {
v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
Handle<Object> o = v8::Utils::OpenHandle(*v);
Handle<Map> map(Handle<HeapObject>::cast(o)->map(), i_isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(i_isolate),
i_isolate);
int offset = DescriptorArray::OffsetOfDescriptorAt(0) +
DescriptorArray::kEntryValueOffset;
diff --git a/deps/v8/test/cctest/test-web-snapshots.cc b/deps/v8/test/cctest/test-web-snapshots.cc
new file mode 100644
index 0000000000..b7f314318a
--- /dev/null
+++ b/deps/v8/test/cctest/test-web-snapshots.cc
@@ -0,0 +1,131 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/web-snapshot/web-snapshot.h"
+#include "test/cctest/cctest-utils.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void TestWebSnapshot(const char* snapshot_source, const char* test_source,
+ const char* expected_result, uint32_t string_count,
+ uint32_t map_count, uint32_t context_count,
+ uint32_t function_count, uint32_t object_count) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+
+ CompileRun(snapshot_source);
+ WebSnapshotData snapshot_data;
+ {
+ std::vector<std::string> exports;
+ exports.push_back("foo");
+ WebSnapshotSerializer serializer(isolate);
+ CHECK(serializer.TakeSnapshot(context, exports, snapshot_data));
+ CHECK(!serializer.has_error());
+ CHECK_NOT_NULL(snapshot_data.buffer);
+ CHECK_EQ(string_count, serializer.string_count());
+ CHECK_EQ(map_count, serializer.map_count());
+ CHECK_EQ(context_count, serializer.context_count());
+ CHECK_EQ(function_count, serializer.function_count());
+ CHECK_EQ(object_count, serializer.object_count());
+ }
+
+ {
+ v8::Local<v8::Context> new_context = CcTest::NewContext();
+ v8::Context::Scope context_scope(new_context);
+ WebSnapshotDeserializer deserializer(isolate);
+ CHECK(deserializer.UseWebSnapshot(snapshot_data.buffer,
+ snapshot_data.buffer_size));
+ CHECK(!deserializer.has_error());
+ v8::Local<v8::String> result = CompileRun(test_source).As<v8::String>();
+ CHECK(result->Equals(new_context, v8_str(expected_result)).FromJust());
+ CHECK_EQ(string_count, deserializer.string_count());
+ CHECK_EQ(map_count, deserializer.map_count());
+ CHECK_EQ(context_count, deserializer.context_count());
+ CHECK_EQ(function_count, deserializer.function_count());
+ CHECK_EQ(object_count, deserializer.object_count());
+ }
+}
+
+} // namespace
+
+TEST(Minimal) {
+ const char* snapshot_source = "var foo = {'key': 'lol'};";
+ const char* test_source = "foo.key";
+ const char* expected_result = "lol";
+ uint32_t kStringCount = 3; // 'foo', 'key', 'lol'
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 0;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+TEST(Function) {
+ const char* snapshot_source =
+ "var foo = {'key': function() { return '11525'; }};";
+ const char* test_source = "foo.key()";
+ const char* expected_result = "11525";
+ uint32_t kStringCount = 3; // 'foo', 'key', function source code
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 0;
+ uint32_t kFunctionCount = 1;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+TEST(InnerFunctionWithContext) {
+ const char* snapshot_source =
+ "var foo = {'key': (function() {\n"
+ " let result = '11525';\n"
+ " function inner() { return result; }\n"
+ " return inner;\n"
+ " })()};";
+ const char* test_source = "foo.key()";
+ const char* expected_result = "11525";
+ // Strings: 'foo', 'key', function source code (inner), 'result', '11525'
+ uint32_t kStringCount = 5;
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 1;
+ uint32_t kFunctionCount = 1;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+TEST(InnerFunctionWithContextAndParentContext) {
+ const char* snapshot_source =
+ "var foo = {'key': (function() {\n"
+ " let part1 = '11';\n"
+ " function inner() {\n"
+ " let part2 = '525';\n"
+ " function innerinner() {\n"
+ " return part1 + part2;\n"
+ " }\n"
+ " return innerinner;\n"
+ " }\n"
+ " return inner();\n"
+ " })()};";
+ const char* test_source = "foo.key()";
+ const char* expected_result = "11525";
+ // Strings: 'foo', 'key', function source code (innerinner), 'part1', 'part2',
+ // '11', '525'
+ uint32_t kStringCount = 7;
+ uint32_t kMapCount = 1;
+ uint32_t kContextCount = 2;
+ uint32_t kFunctionCount = 1;
+ uint32_t kObjectCount = 1;
+ TestWebSnapshot(snapshot_source, test_source, expected_result, kStringCount,
+ kMapCount, kContextCount, kFunctionCount, kObjectCount);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-backing-store.cc b/deps/v8/test/cctest/wasm/test-backing-store.cc
index f8010d3031..2dc2fc89a7 100644
--- a/deps/v8/test/cctest/test-backing-store.cc
+++ b/deps/v8/test/cctest/wasm/test-backing-store.cc
@@ -5,12 +5,12 @@
#include "src/api/api-inl.h"
#include "src/objects/backing-store.h"
#include "src/wasm/wasm-objects.h"
-
#include "test/cctest/cctest.h"
#include "test/cctest/manually-externalized-buffer.h"
namespace v8 {
namespace internal {
+namespace wasm {
using testing::ManuallyExternalizedBuffer;
@@ -22,7 +22,7 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
HandleScope scope(isolate);
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
- wasm::kWasmPageSize, InitializedFlag::kZeroInitialized);
+ kWasmPageSize, InitializedFlag::kZeroInitialized);
Handle<JSArrayBuffer> buffer = result.ToHandleChecked();
// Embedder requests contents.
@@ -81,5 +81,6 @@ TEST(BackingStore_Reclaim) {
}
#endif
+} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index dd1dfea0f1..07084b2265 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -609,6 +609,7 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WasmGCTester tester(execution_tier);
const byte type_index = tester.DefineArray(wasm::kWasmI32, true);
+ const byte fp_type_index = tester.DefineArray(wasm::kWasmF64, true);
ValueType kRefTypes[] = {ref(type_index)};
FunctionSig sig_q_v(1, 0, kRefTypes);
ValueType kOptRefType = optref(type_index);
@@ -655,6 +656,20 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WASM_RTT_CANON(type_index)),
kExprEnd});
+ // Tests that fp arrays work properly.
+ // f: a = [10.0, 10.0, 10.0]; a[1] = 42.42; return static_cast<int64>(a[1]);
+ double result_value = 42.42;
+ const byte kTestFpArray = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(fp_type_index)},
+ {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_WITH_RTT(
+ fp_type_index, WASM_F64(10.0), WASM_I32V(3),
+ WASM_RTT_CANON(fp_type_index))),
+ WASM_ARRAY_SET(fp_type_index, WASM_LOCAL_GET(0), WASM_I32V(1),
+ WASM_F64(result_value)),
+ WASM_I32_SCONVERT_F64(
+ WASM_ARRAY_GET(fp_type_index, WASM_LOCAL_GET(0), WASM_I32V(1))),
+ kExprEnd});
+
tester.CompileModule();
tester.CheckResult(kGetElem, 12, 0);
@@ -663,6 +678,7 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
tester.CheckHasThrown(kGetElem, 3);
tester.CheckHasThrown(kGetElem, -1);
tester.CheckResult(kGetLength, 42);
+ tester.CheckResult(kTestFpArray, static_cast<int32_t>(result_value));
MaybeHandle<Object> h_result = tester.GetResultObject(kAllocate);
CHECK(h_result.ToHandleChecked()->IsWasmArray());
@@ -863,6 +879,105 @@ WASM_COMPILED_EXEC_TEST(BasicRtt) {
tester.CheckResult(kRefCast, 43);
}
+WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
+ WasmGCTester tester(execution_tier);
+ byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
+ byte subtype_index =
+ tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmS128, false)});
+ ValueType sig_types[] = {kWasmS128, kWasmI32, kWasmF64};
+ FunctionSig sig(1, 2, sig_types);
+ byte sig_index = tester.DefineSignature(&sig);
+
+ const byte kRefTestNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(WASM_REF_NULL(type_index), WASM_RTT_CANON(subtype_index)),
+ kExprEnd});
+ const byte kRefTestUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(type_index)),
+ kExprEnd});
+ const byte kRefTestUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(WASM_REF_NULL(subtype_index), WASM_RTT_CANON(type_index)),
+ kExprEnd});
+ const byte kRefTestUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(sig_index)),
+ kExprEnd});
+ const byte kRefTestUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(WASM_REF_NULL(subtype_index), WASM_RTT_CANON(sig_index)),
+ kExprEnd});
+ const byte kRefTestUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST(
+ WASM_STRUCT_NEW_DEFAULT(type_index, WASM_RTT_CANON(type_index)),
+ WASM_RTT_CANON(sig_index)),
+ kExprEnd});
+
+ const byte kRefCastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(type_index),
+ WASM_RTT_CANON(subtype_index))),
+ kExprEnd});
+ const byte kRefCastUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(type_index))),
+ kExprEnd});
+ const byte kRefCastUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(subtype_index),
+ WASM_RTT_CANON(type_index))),
+ kExprEnd});
+ const byte kRefCastUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT(
+ subtype_index,
+ WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+ const byte kRefCastUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(subtype_index),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+ const byte kRefCastUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT(type_index, WASM_RTT_CANON(type_index)),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+
+ tester.CompileModule();
+
+ tester.CheckResult(kRefTestNull, 0);
+ tester.CheckResult(kRefTestUpcast, 1);
+ tester.CheckResult(kRefTestUpcastNull, 0);
+ tester.CheckResult(kRefTestUnrelated, 0);
+ tester.CheckResult(kRefTestUnrelatedNull, 0);
+ tester.CheckResult(kRefTestUnrelatedNonNullable, 0);
+
+ tester.CheckResult(kRefCastNull, 1);
+ tester.CheckResult(kRefCastUpcast, 0);
+ tester.CheckResult(kRefCastUpcastNull, 1);
+ tester.CheckHasThrown(kRefCastUnrelated);
+ tester.CheckResult(kRefCastUnrelatedNull, 1);
+ tester.CheckHasThrown(kRefCastUnrelatedNonNullable);
+}
+
WASM_EXEC_TEST(NoDepthRtt) {
WasmGCTester tester(execution_tier);
@@ -871,14 +986,19 @@ WASM_EXEC_TEST(NoDepthRtt) {
tester.DefineStruct({F(wasm::kWasmI32, true), F(wasm::kWasmI32, true)});
const byte empty_struct_index = tester.DefineStruct({});
+ ValueType kRttTypeNoDepth = ValueType::Rtt(type_index);
+ FunctionSig sig_t1_v_nd(1, 0, &kRttTypeNoDepth);
ValueType kRttSubtypeNoDepth = ValueType::Rtt(subtype_index);
FunctionSig sig_t2_v_nd(1, 0, &kRttSubtypeNoDepth);
+ const byte kRttTypeCanon = tester.DefineFunction(
+ &sig_t1_v_nd, {}, {WASM_RTT_CANON(type_index), kExprEnd});
const byte kRttSubtypeCanon = tester.DefineFunction(
&sig_t2_v_nd, {}, {WASM_RTT_CANON(subtype_index), kExprEnd});
const byte kRttSubtypeSub = tester.DefineFunction(
&sig_t2_v_nd, {},
- {WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index)), kExprEnd});
+ {WASM_RTT_SUB(subtype_index, WASM_CALL_FUNCTION0(kRttTypeCanon)),
+ kExprEnd});
const byte kTestCanon = tester.DefineFunction(
tester.sigs.i_v(), {optref(type_index)},
@@ -1059,23 +1179,46 @@ WASM_COMPILED_EXEC_TEST(CallRef) {
tester.CheckResult(caller, 47, 5);
}
-WASM_COMPILED_EXEC_TEST(RefTestCastNull) {
+WASM_COMPILED_EXEC_TEST(CallReftypeParameters) {
WasmGCTester tester(execution_tier);
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
-
- const byte kRefTestNull = tester.DefineFunction(
- tester.sigs.i_v(), {},
- {WASM_REF_TEST(WASM_REF_NULL(type_index), WASM_RTT_CANON(type_index)),
+ ValueType kRefType{optref(type_index)};
+ ValueType sig_types[] = {kWasmI32, kRefType, kRefType, kRefType, kRefType,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(1, 8, sig_types);
+ byte adder = tester.DefineFunction(
+ &sig, {},
+ {WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(0)),
+ WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(1)),
+ WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(2)),
+ WASM_I32_ADD(
+ WASM_STRUCT_GET(type_index, 0, WASM_LOCAL_GET(3)),
+ WASM_I32_ADD(
+ WASM_LOCAL_GET(4),
+ WASM_I32_ADD(WASM_LOCAL_GET(5),
+ WASM_I32_ADD(WASM_LOCAL_GET(6),
+ WASM_LOCAL_GET(7)))))))),
kExprEnd});
-
- const byte kRefCastNull = tester.DefineFunction(
+ byte caller = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(type_index),
- WASM_RTT_CANON(type_index))),
+ {WASM_CALL_FUNCTION(adder,
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(2),
+ WASM_RTT_CANON(type_index)),
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(4),
+ WASM_RTT_CANON(type_index)),
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(8),
+ WASM_RTT_CANON(type_index)),
+ WASM_STRUCT_NEW_WITH_RTT(type_index, WASM_I32V(16),
+ WASM_RTT_CANON(type_index)),
+ WASM_I32V(32), WASM_I32V(64), WASM_I32V(128),
+ WASM_I32V(256)),
kExprEnd});
+
tester.CompileModule();
- tester.CheckResult(kRefTestNull, 0);
- tester.CheckResult(kRefCastNull, 1);
+ tester.CheckResult(caller, 510);
}
WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
diff --git a/deps/v8/test/cctest/wasm/test-grow-memory.cc b/deps/v8/test/cctest/wasm/test-grow-memory.cc
index 662c037a58..d3ad66aa4b 100644
--- a/deps/v8/test/cctest/wasm/test-grow-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-grow-memory.cc
@@ -83,7 +83,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(6)), WASM_DROP,
WASM_MEMORY_SIZE};
EMIT_CODE_WITH_END(f, code);
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
index f5847d1fb1..551d8f214b 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-inspection.cc
@@ -6,6 +6,7 @@
#include "src/wasm/wasm-debug.h"
#include "test/cctest/cctest.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
@@ -89,6 +90,8 @@ class LiftoffCompileEnvironment {
return debug_side_table_via_compilation;
}
+ TestingModuleBuilder* builder() { return &wasm_runner_.builder(); }
+
private:
static void CheckTableEquals(const DebugSideTable& a,
const DebugSideTable& b) {
@@ -177,7 +180,7 @@ struct DebugSideTableEntry {
// Check for equality, but ignore exact register and stack offset.
static bool CheckValueEquals(const DebugSideTable::Entry::Value& a,
const DebugSideTable::Entry::Value& b) {
- return a.index == b.index && a.kind == b.kind && a.kind == b.kind &&
+ return a.index == b.index && a.type == b.type && a.storage == b.storage &&
(a.storage != DebugSideTable::Entry::kConstant ||
a.i32_const == b.i32_const);
}
@@ -189,7 +192,7 @@ std::ostream& operator<<(std::ostream& out, const DebugSideTableEntry& entry) {
out << "stack height " << entry.stack_height << ", changed: {";
const char* comma = "";
for (auto& v : entry.changed_values) {
- out << comma << v.index << ":" << name(v.kind) << " ";
+ out << comma << v.index << ":" << v.type.name() << " ";
switch (v.storage) {
case DebugSideTable::Entry::kConstant:
out << "const:" << v.i32_const;
@@ -213,26 +216,26 @@ std::ostream& operator<<(std::ostream& out,
#endif // DEBUG
// Named constructors to make the tests more readable.
-DebugSideTable::Entry::Value Constant(int index, ValueKind kind,
+DebugSideTable::Entry::Value Constant(int index, ValueType type,
int32_t constant) {
DebugSideTable::Entry::Value value;
value.index = index;
- value.kind = kind;
+ value.type = type;
value.storage = DebugSideTable::Entry::kConstant;
value.i32_const = constant;
return value;
}
-DebugSideTable::Entry::Value Register(int index, ValueKind kind) {
+DebugSideTable::Entry::Value Register(int index, ValueType type) {
DebugSideTable::Entry::Value value;
value.index = index;
- value.kind = kind;
+ value.type = type;
value.storage = DebugSideTable::Entry::kRegister;
return value;
}
-DebugSideTable::Entry::Value Stack(int index, ValueKind kind) {
+DebugSideTable::Entry::Value Stack(int index, ValueType type) {
DebugSideTable::Entry::Value value;
value.index = index;
- value.kind = kind;
+ value.type = type;
value.storage = DebugSideTable::Entry::kStack;
return value;
}
@@ -296,9 +299,9 @@ TEST(Liftoff_debug_side_table_simple) {
CheckDebugSideTable(
{
// function entry, locals in registers.
- {2, {Register(0, kI32), Register(1, kI32)}},
+ {2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
// OOL stack check, locals spilled, stack still empty.
- {2, {Stack(0, kI32), Stack(1, kI32)}},
+ {2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
},
debug_side_table.get());
}
@@ -312,9 +315,9 @@ TEST(Liftoff_debug_side_table_call) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// call, local spilled, stack empty.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
// OOL stack check, local spilled as before, stack empty.
{1, {}},
},
@@ -332,11 +335,11 @@ TEST(Liftoff_debug_side_table_call_const) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// call, local is kConst.
- {1, {Constant(0, kI32, kConst)}},
+ {1, {Constant(0, kWasmI32, kConst)}},
// OOL stack check, local spilled.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
},
debug_side_table.get());
}
@@ -351,13 +354,13 @@ TEST(Liftoff_debug_side_table_indirect_call) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// indirect call, local spilled, stack empty.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
// OOL stack check, local still spilled.
{1, {}},
// OOL trap (invalid index), local still spilled, stack has {kConst}.
- {2, {Constant(1, kI32, kConst)}},
+ {2, {Constant(1, kWasmI32, kConst)}},
// OOL trap (sig mismatch), stack unmodified.
{2, {}},
},
@@ -373,11 +376,11 @@ TEST(Liftoff_debug_side_table_loop) {
CheckDebugSideTable(
{
// function entry, local in register.
- {1, {Register(0, kI32)}},
+ {1, {Register(0, kWasmI32)}},
// OOL stack check, local spilled, stack empty.
- {1, {Stack(0, kI32)}},
+ {1, {Stack(0, kWasmI32)}},
// OOL loop stack check, local still spilled, stack has {kConst}.
- {2, {Constant(1, kI32, kConst)}},
+ {2, {Constant(1, kWasmI32, kConst)}},
},
debug_side_table.get());
}
@@ -390,9 +393,9 @@ TEST(Liftoff_debug_side_table_trap) {
CheckDebugSideTable(
{
// function entry, locals in registers.
- {2, {Register(0, kI32), Register(1, kI32)}},
+ {2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
// OOL stack check, local spilled, stack empty.
- {2, {Stack(0, kI32), Stack(1, kI32)}},
+ {2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
// OOL trap (div by zero), stack as before.
{2, {}},
// OOL trap (unrepresentable), stack as before.
@@ -414,11 +417,38 @@ TEST(Liftoff_breakpoint_simple) {
CheckDebugSideTable(
{
// First break point, locals in registers.
- {2, {Register(0, kI32), Register(1, kI32)}},
+ {2, {Register(0, kWasmI32), Register(1, kWasmI32)}},
// Second break point, locals unchanged, two register stack values.
- {4, {Register(2, kI32), Register(3, kI32)}},
+ {4, {Register(2, kWasmI32), Register(3, kWasmI32)}},
// OOL stack check, locals spilled, stack empty.
- {2, {Stack(0, kI32), Stack(1, kI32)}},
+ {2, {Stack(0, kWasmI32), Stack(1, kWasmI32)}},
+ },
+ debug_side_table.get());
+}
+
+TEST(Liftoff_debug_side_table_catch_all) {
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ LiftoffCompileEnvironment env;
+ TestSignatures sigs;
+ int ex = env.builder()->AddException(sigs.v_v());
+ ValueType exception_type = ValueType::Ref(HeapType::kExtern, kNonNullable);
+ auto debug_side_table = env.GenerateDebugSideTable(
+ {}, {kWasmI32},
+ {WASM_TRY_CATCH_ALL_T(kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(ex)),
+ WASM_I32V(1)),
+ WASM_DROP},
+ {
+ 18 // Break at the end of the try block.
+ });
+ CheckDebugSideTable(
+ {
+ // function entry.
+ {1, {Register(0, kWasmI32)}},
+ // breakpoint.
+ {3,
+ {Stack(0, kWasmI32), Register(1, exception_type),
+ Constant(2, kWasmI32, 1)}},
+ {1, {}},
},
debug_side_table.get());
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 37163c0a8c..760a7cc7ea 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -1491,7 +1491,7 @@ static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
WasmRunner<void> r(tier);
- FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
+ FunctionSig* sig = sigs.many(r.zone(), kWasmVoid, param, num_params);
r.builder().AddSignature(sig);
r.builder().AddSignature(sig);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index e55547911b..bb61f93ac3 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -99,6 +99,30 @@ WASM_EXEC_TEST(TryCatchAllThrow) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build the main test function.
+ BUILD(r, kExprTry, static_cast<byte>((kWasmI32).value_type_code()),
+ WASM_STMTS(WASM_I32V(kResult1), WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_THROW(except))),
+ kExprCatchAll, WASM_I32V(kResult0), kExprEnd);
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchCatchAllThrow) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
uint32_t except1 = r.builder().AddException(sigs.v_v());
uint32_t except2 = r.builder().AddException(sigs.v_v());
constexpr uint32_t kResult0 = 23;
@@ -112,8 +136,8 @@ WASM_EXEC_TEST(TryCatchAllThrow) {
WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)), WASM_THROW(except1)),
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_I32V(1)),
WASM_THROW(except2))),
- kExprCatch, except1, WASM_STMTS(WASM_I32V(kResult0)), kExprCatchAll,
- WASM_STMTS(WASM_I32V(kResult1)), kExprEnd);
+ kExprCatch, except1, WASM_I32V(kResult0), kExprCatchAll,
+ WASM_I32V(kResult1), kExprEnd);
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -318,6 +342,39 @@ WASM_EXEC_TEST(TryCatchCallDirect) {
}
}
+WASM_EXEC_TEST(TryCatchAllCallDirect) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a throwing helper function.
+ WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
+ BUILD(throw_func, WASM_THROW(except));
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(
+ throw_func.function_index(),
+ WASM_I32V(7), WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_STMTS(WASM_I32V(kResult0))));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
WASM_EXEC_TEST(TryCatchCallIndirect) {
TestSignatures sigs;
EXPERIMENTAL_FLAG_SCOPE(eh);
@@ -348,7 +405,49 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
sig_index, WASM_I32V(7),
WASM_I32V(9), WASM_LOCAL_GET(0)),
WASM_DROP))),
- WASM_STMTS(WASM_I32V(kResult0)), except));
+ WASM_I32V(kResult0), except));
+
+ if (execution_tier != TestExecutionTier::kInterpreter) {
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+ } else {
+ CHECK_EQ(kResult0, r.CallInterpreter(0));
+ CHECK_EQ(kResult1, r.CallInterpreter(1));
+ }
+}
+
+WASM_EXEC_TEST(TryCatchAllCallIndirect) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
+ uint32_t except = r.builder().AddException(sigs.v_v());
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+
+ // Build a throwing helper function.
+ WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
+ BUILD(throw_func, WASM_THROW(except));
+ byte sig_index = r.builder().AddSignature(sigs.i_ii());
+ throw_func.SetSigIndex(0);
+
+ // Add an indirect function table.
+ uint16_t indirect_function_table[] = {
+ static_cast<uint16_t>(throw_func.function_index())};
+ r.builder().AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
+
+ // Build the main test function.
+ BUILD(r,
+ WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_INDIRECT(
+ sig_index, WASM_I32V(7),
+ WASM_I32V(9), WASM_LOCAL_GET(0)),
+ WASM_DROP))),
+ WASM_I32V(kResult0)));
if (execution_tier != TestExecutionTier::kInterpreter) {
// Need to call through JS to allow for creation of stack traces.
@@ -383,7 +482,37 @@ WASM_COMPILED_EXEC_TEST(TryCatchCallExternal) {
WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
WASM_I32V(9)),
WASM_DROP))),
- WASM_STMTS(WASM_I32V(kResult0))));
+ WASM_I32V(kResult0)));
+
+ // Need to call through JS to allow for creation of stack traces.
+ r.CheckCallViaJS(kResult0, 0);
+ r.CheckCallViaJS(kResult1, 1);
+}
+
+WASM_COMPILED_EXEC_TEST(TryCatchAllCallExternal) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ HandleScope scope(CcTest::InitIsolateOnce());
+ const char* source = "(function() { throw 'ball'; })";
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ ManuallyImportedJSFunction import = {sigs.i_ii(), js_function};
+ WasmRunner<uint32_t, uint32_t> r(execution_tier, &import);
+ constexpr uint32_t kResult0 = 23;
+ constexpr uint32_t kResult1 = 42;
+ constexpr uint32_t kJSFunc = 0;
+
+ // Build the main test function.
+ BUILD(r, WASM_TRY_CATCH_ALL_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(kResult1),
+ WASM_IF(WASM_I32_EQZ(WASM_LOCAL_GET(0)),
+ WASM_STMTS(WASM_CALL_FUNCTION(kJSFunc, WASM_I32V(7),
+ WASM_I32V(9)),
+ WASM_DROP))),
+ WASM_I32V(kResult0)));
// Need to call through JS to allow for creation of stack traces.
r.CheckCallViaJS(kResult0, 0);
@@ -460,6 +589,46 @@ TEST(Regress1180457) {
CHECK_EQ(kResult0, r.CallInterpreter());
}
+TEST(Regress1187896) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ byte try_sig = r.builder().AddSignature(sigs.v_i());
+ constexpr uint32_t kResult = 23;
+ BUILD(r, kExprI32Const, 0, kExprTry, try_sig, kExprDrop, kExprCatchAll,
+ kExprNop, kExprEnd, kExprI32Const, kResult);
+ CHECK_EQ(kResult, r.CallInterpreter());
+}
+
+TEST(Regress1190291) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ byte try_sig = r.builder().AddSignature(sigs.v_i());
+ BUILD(r, kExprUnreachable, kExprTry, try_sig, kExprCatchAll, kExprEnd,
+ kExprI32Const, 0);
+ r.CallInterpreter();
+}
+
+TEST(Regress1186795) {
+ TestSignatures sigs;
+ EXPERIMENTAL_FLAG_SCOPE(eh);
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ uint32_t except = r.builder().AddException(sigs.v_i());
+ BUILD(r, WASM_TRY_CATCH_T(
+ kWasmI32,
+ WASM_STMTS(
+ WASM_I32V(0), WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
+ WASM_I32V(0), WASM_I32V(0), WASM_I32V(0),
+ WASM_TRY_UNWIND_T(
+ kWasmI32, WASM_STMTS(WASM_I32V(0), WASM_THROW(except)),
+ WASM_I32V(0)),
+ WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP),
+ WASM_NOP, except));
+ CHECK_EQ(0, r.CallInterpreter());
+}
+
} // namespace test_run_wasm_exceptions
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 7199d34e7d..0d039843e6 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -312,14 +312,14 @@ TEST(MemoryGrow) {
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
- BUILD(r, WASM_GROW_MEMORY(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
CHECK_EQ(1, r.Call(1));
}
{
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
- BUILD(r, WASM_GROW_MEMORY(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
CHECK_EQ(-1, r.Call(11));
}
}
@@ -332,7 +332,7 @@ TEST(MemoryGrowPreservesData) {
BUILD(
r,
WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value)),
- WASM_GROW_MEMORY(WASM_LOCAL_GET(0)), WASM_DROP,
+ WASM_MEMORY_GROW(WASM_LOCAL_GET(0)), WASM_DROP,
WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index)));
CHECK_EQ(value, r.Call(1));
}
@@ -341,7 +341,7 @@ TEST(MemoryGrowInvalidSize) {
// Grow memory by an invalid amount without initial memory.
WasmRunner<int32_t, uint32_t> r(TestExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
- BUILD(r, WASM_GROW_MEMORY(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
CHECK_EQ(-1, r.Call(1048575));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
index 71bb77f6ad..2679000dd6 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-memory64.cc
@@ -98,6 +98,26 @@ WASM_EXEC_TEST(MemorySize) {
CHECK_EQ(kNumPages, r.Call());
}
+WASM_EXEC_TEST(MemoryGrow) {
+ // TODO(clemensb): Implement memory64 in the interpreter.
+ if (execution_tier == TestExecutionTier::kInterpreter) return;
+
+ Memory64Runner<int64_t, int64_t> r(execution_tier);
+ r.builder().SetMaxMemPages(13);
+ r.builder().AddMemory(kWasmPageSize);
+
+ BUILD(r, WASM_MEMORY_GROW(WASM_LOCAL_GET(0)));
+ CHECK_EQ(1, r.Call(6));
+ CHECK_EQ(7, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 31));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 32));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 33));
+ CHECK_EQ(-1, r.Call(int64_t{1} << 63));
+ CHECK_EQ(-1, r.Call(6)); // Above the maximum of 13.
+ CHECK_EQ(8, r.Call(5)); // Just at the maximum of 13.
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index a9f5dd6b26..14d88bc562 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -471,7 +471,7 @@ TEST(Run_WasmModule_MemSize_GrowMem) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(10)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(10)), WASM_DROP,
WASM_MEMORY_SIZE};
EMIT_CODE_WITH_END(f, code);
TestModule(&zone, builder, kExpectedValue);
@@ -490,7 +490,7 @@ TEST(MemoryGrowZero) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V(0))};
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V(0))};
EMIT_CODE_WITH_END(f, code);
TestModule(&zone, builder, kExpectedValue);
}
@@ -597,7 +597,7 @@ TEST(Run_WasmModule_MemoryGrowInIf) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_GROW_MEMORY(WASM_I32V(1)),
+ byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_MEMORY_GROW(WASM_I32V(1)),
WASM_I32V(12))};
EMIT_CODE_WITH_END(f, code);
TestModule(&zone, builder, 12);
@@ -618,7 +618,7 @@ TEST(Run_WasmModule_GrowMemOobOffset) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(1)),
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(1)),
WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value))};
EMIT_CODE_WITH_END(f, code);
@@ -640,7 +640,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_LOCAL_GET(0)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_LOCAL_GET(0)), WASM_DROP,
WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value)),
WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index))};
@@ -687,7 +687,7 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(1)), WASM_DROP,
+ byte code[] = {WASM_MEMORY_GROW(WASM_I32V_1(1)), WASM_DROP,
WASM_STORE_MEM(MachineType::Int32(), WASM_LOCAL_GET(0),
WASM_I32V(value)),
WASM_LOAD_MEM(MachineType::Int32(), WASM_LOCAL_GET(0))};
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
new file mode 100644
index 0000000000..50f5bb44b7
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relaxed-simd.cc
@@ -0,0 +1,239 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/overflowing-math.h"
+#include "src/wasm/compilation-environment.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/cctest/wasm/wasm-simd-utils.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_relaxed_simd {
+
+// Use this for experimental relaxed-simd opcodes.
+#define WASM_RELAXED_SIMD_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ TestExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_turbofan) { \
+ if (!CpuFeatures::SupportsWasmSimd128()) return; \
+ EXPERIMENTAL_FLAG_SCOPE(relaxed_simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
+ } \
+ TEST(RunWasm_##name##_interpreter) { \
+ EXPERIMENTAL_FLAG_SCOPE(relaxed_simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ TestExecutionTier execution_tier)
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_PPC64
+// Only used for qfma and qfms tests below.
+
+// FMOperation holds the params (a, b, c) for a Multiply-Add or
+// Multiply-Subtract operation, and the expected result if the operation was
+// fused, rounded only once for the entire operation, or unfused, rounded after
+// multiply and again after add/subtract.
+template <typename T>
+struct FMOperation {
+ const T a;
+ const T b;
+ const T c;
+ const T fused_result;
+ const T unfused_result;
+};
+
+// large_n is large number that overflows T when multiplied by itself, this is a
+// useful constant to test fused/unfused behavior.
+template <typename T>
+constexpr T large_n = T(0);
+
+template <>
+constexpr double large_n<double> = 1e200;
+
+template <>
+constexpr float large_n<float> = 1e20;
+
+// Fused Multiply-Add performs a + b * c.
+template <typename T>
+static constexpr FMOperation<T> qfma_array[] = {
+ {1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
+ // fused: a + b * c = -inf + (positive overflow) = -inf
+ // unfused: a + b * c = -inf + inf = NaN
+ {-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // fused: a + b * c = inf + (negative overflow) = inf
+ // unfused: a + b * c = inf + -inf = NaN
+ {std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // NaN
+ {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
+ // -NaN
+ {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
+
+template <typename T>
+static constexpr Vector<const FMOperation<T>> qfma_vector() {
+ return ArrayVector(qfma_array<T>);
+}
+
+// Fused Multiply-Subtract performs a - b * c.
+template <typename T>
+static constexpr FMOperation<T> qfms_array[]{
+ {1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
+ // fused: a - b * c = inf - (positive overflow) = inf
+ // unfused: a - b * c = inf - inf = NaN
+ {std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
+ std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // fused: a - b * c = -inf - (negative overflow) = -inf
+ // unfused: a - b * c = -inf - -inf = NaN
+ {-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
+ -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
+ // NaN
+ {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
+ // -NaN
+ {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
+ std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
+
+template <typename T>
+static constexpr Vector<const FMOperation<T>> qfms_vector() {
+ return ArrayVector(qfms_array<T>);
+}
+
+// Fused results only when fma3 feature is enabled, and running on TurboFan or
+// Liftoff (which can fall back to TurboFan if FMA is not implemented).
+bool ExpectFused(TestExecutionTier tier) {
+#ifdef V8_TARGET_ARCH_X64
+ return CpuFeatures::IsSupported(FMA3) &&
+ (tier == TestExecutionTier::kTurbofan ||
+ tier == TestExecutionTier::kLiftoff);
+#else
+ return (tier == TestExecutionTier::kTurbofan ||
+ tier == TestExecutionTier::kLiftoff);
+#endif
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
+ // V8_TARGET_ARCH_PPC64
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_PPC64
+WASM_RELAXED_SIMD_TEST(F32x4Qfma) {
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<float> x : qfma_vector<float>()) {
+ r.Call(x.a, x.b, x.c);
+ float expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_RELAXED_SIMD_TEST(F32x4Qfms) {
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<float> x : qfms_vector<float>()) {
+ r.Call(x.a, x.b, x.c);
+ float expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_RELAXED_SIMD_TEST(F64x2Qfma) {
+ WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<double> x : qfma_vector<double>()) {
+ r.Call(x.a, x.b, x.c);
+ double expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+
+WASM_RELAXED_SIMD_TEST(F64x2Qfms) {
+ WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1, value3 = 2;
+ BUILD(r,
+ WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
+ WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
+ WASM_ONE);
+
+ for (FMOperation<double> x : qfms_vector<double>()) {
+ r.Call(x.a, x.b, x.c);
+ double expected =
+ ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
+ }
+ }
+}
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
+ // V8_TARGET_ARCH_PPC64
+
+WASM_RELAXED_SIMD_TEST(F32x4RecipApprox) {
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
+ base::Recip, false /* !exact */);
+}
+
+WASM_RELAXED_SIMD_TEST(F32x4RecipSqrtApprox) {
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
+ base::RecipSqrt, false /* !exact */);
+}
+
+#undef WASM_RELAXED_SIMD_TEST
+} // namespace test_run_wasm_relaxed_simd
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
index 4c5309aae5..c0cc3c7dac 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
@@ -193,7 +193,7 @@ WASM_SIMD_TEST(AllTrue_DifferentShapes) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV8x16AllTrue));
+ WASM_SIMD_OP(kExprI8x16AllTrue));
CHECK_EQ(0, r.Call(0x00FF00FF));
}
@@ -202,7 +202,7 @@ WASM_SIMD_TEST(AllTrue_DifferentShapes) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV16x8AllTrue));
+ WASM_SIMD_OP(kExprI16x8AllTrue));
CHECK_EQ(0, r.Call(0x000000FF));
}
@@ -212,7 +212,7 @@ WASM_SIMD_TEST(AllTrue_DifferentShapes) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(0)),
- WASM_SIMD_OP(kExprV16x8AllTrue));
+ WASM_SIMD_OP(kExprI16x8AllTrue));
CHECK_EQ(1, r.Call(0x000F000F));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index fa9299f27b..4dd925a20a 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -8,8 +8,10 @@
#include <cstdint>
#include <cstring>
#include <limits>
+#include <map>
#include <tuple>
#include <type_traits>
+#include <utility>
#include <vector>
#include "src/base/bits.h"
@@ -19,7 +21,6 @@
#include "src/base/overflowing-math.h"
#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
#include "src/common/globals.h"
@@ -33,6 +34,7 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/cctest/wasm/wasm-simd-utils.h"
#include "test/common/flag-utils.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
@@ -44,27 +46,7 @@ namespace test_run_wasm_simd {
namespace {
-using DoubleUnOp = double (*)(double);
-using DoubleBinOp = double (*)(double, double);
-using DoubleCompareOp = int64_t (*)(double, double);
-using FloatUnOp = float (*)(float);
-using FloatBinOp = float (*)(float, float);
-using FloatCompareOp = int (*)(float, float);
-using Int64UnOp = int64_t (*)(int64_t);
-using Int64BinOp = int64_t (*)(int64_t, int64_t);
-using Int64ShiftOp = int64_t (*)(int64_t, int);
-using Int32UnOp = int32_t (*)(int32_t);
-using Int32BinOp = int32_t (*)(int32_t, int32_t);
-using Int32CompareOp = int (*)(int32_t, int32_t);
-using Int32ShiftOp = int32_t (*)(int32_t, int);
-using Int16UnOp = int16_t (*)(int16_t);
-using Int16BinOp = int16_t (*)(int16_t, int16_t);
-using Int16CompareOp = int (*)(int16_t, int16_t);
-using Int16ShiftOp = int16_t (*)(int16_t, int);
-using Int8UnOp = int8_t (*)(int8_t);
-using Int8BinOp = int8_t (*)(int8_t, int8_t);
-using Int8CompareOp = int (*)(int8_t, int8_t);
-using Int8ShiftOp = int8_t (*)(int8_t, int);
+using Shuffle = std::array<int8_t, kSimd128Size>;
#define WASM_SIMD_TEST(name) \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
@@ -81,20 +63,9 @@ using Int8ShiftOp = int8_t (*)(int8_t, int);
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
} \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kLowerSimd, TestExecutionTier::kTurbofan); \
- } \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier)
-// Generic expected value functions.
-template <typename T, typename = typename std::enable_if<
- std::is_floating_point<T>::value>::type>
-T Negate(T a) {
- return -a;
-}
-
// For signed integral types, use base::AddWithWraparound.
template <typename T, typename = typename std::enable_if<
std::is_floating_point<T>::value>::type>
@@ -138,45 +109,93 @@ T UnsignedMaximum(T a, T b) {
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
}
-int Equal(float a, float b) { return a == b ? -1 : 0; }
+template <typename T, typename U = T>
+U Equal(T a, T b) {
+ return a == b ? -1 : 0;
+}
-template <typename T>
-T Equal(T a, T b) {
+template <>
+int32_t Equal(float a, float b) {
return a == b ? -1 : 0;
}
-int NotEqual(float a, float b) { return a != b ? -1 : 0; }
+template <>
+int64_t Equal(double a, double b) {
+ return a == b ? -1 : 0;
+}
-template <typename T>
-T NotEqual(T a, T b) {
+template <typename T, typename U = T>
+U NotEqual(T a, T b) {
+ return a != b ? -1 : 0;
+}
+
+template <>
+int32_t NotEqual(float a, float b) {
return a != b ? -1 : 0;
}
-int Less(float a, float b) { return a < b ? -1 : 0; }
+template <>
+int64_t NotEqual(double a, double b) {
+ return a != b ? -1 : 0;
+}
-template <typename T>
-T Less(T a, T b) {
+template <typename T, typename U = T>
+U Less(T a, T b) {
return a < b ? -1 : 0;
}
-int LessEqual(float a, float b) { return a <= b ? -1 : 0; }
+template <>
+int32_t Less(float a, float b) {
+ return a < b ? -1 : 0;
+}
-template <typename T>
-T LessEqual(T a, T b) {
+template <>
+int64_t Less(double a, double b) {
+ return a < b ? -1 : 0;
+}
+
+template <typename T, typename U = T>
+U LessEqual(T a, T b) {
+ return a <= b ? -1 : 0;
+}
+
+template <>
+int32_t LessEqual(float a, float b) {
return a <= b ? -1 : 0;
}
-int Greater(float a, float b) { return a > b ? -1 : 0; }
+template <>
+int64_t LessEqual(double a, double b) {
+ return a <= b ? -1 : 0;
+}
-template <typename T>
-T Greater(T a, T b) {
+template <typename T, typename U = T>
+U Greater(T a, T b) {
return a > b ? -1 : 0;
}
-int GreaterEqual(float a, float b) { return a >= b ? -1 : 0; }
+template <>
+int32_t Greater(float a, float b) {
+ return a > b ? -1 : 0;
+}
-template <typename T>
-T GreaterEqual(T a, T b) {
+template <>
+int64_t Greater(double a, double b) {
+ return a > b ? -1 : 0;
+}
+
+template <typename T, typename U = T>
+U GreaterEqual(T a, T b) {
+ return a >= b ? -1 : 0;
+}
+
+template <>
+int32_t GreaterEqual(float a, float b) {
+ return a >= b ? -1 : 0;
+}
+
+template <>
+int64_t GreaterEqual(double a, double b) {
return a >= b ? -1 : 0;
}
@@ -227,109 +246,6 @@ template <typename T>
T Abs(T a) {
return std::abs(a);
}
-
-// only used for F64x2 tests below
-int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
-
-int64_t NotEqual(double a, double b) { return a != b ? -1 : 0; }
-
-int64_t Greater(double a, double b) { return a > b ? -1 : 0; }
-
-int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
-
-int64_t Less(double a, double b) { return a < b ? -1 : 0; }
-
-int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
-
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-// Only used for qfma and qfms tests below.
-
-// FMOperation holds the params (a, b, c) for a Multiply-Add or
-// Multiply-Subtract operation, and the expected result if the operation was
-// fused, rounded only once for the entire operation, or unfused, rounded after
-// multiply and again after add/subtract.
-template <typename T>
-struct FMOperation {
- const T a;
- const T b;
- const T c;
- const T fused_result;
- const T unfused_result;
-};
-
-// large_n is large number that overflows T when multiplied by itself, this is a
-// useful constant to test fused/unfused behavior.
-template <typename T>
-constexpr T large_n = T(0);
-
-template <>
-constexpr double large_n<double> = 1e200;
-
-template <>
-constexpr float large_n<float> = 1e20;
-
-// Fused Multiply-Add performs a + b * c.
-template <typename T>
-static constexpr FMOperation<T> qfma_array[] = {
- {1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
- // fused: a + b * c = -inf + (positive overflow) = -inf
- // unfused: a + b * c = -inf + inf = NaN
- {-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
- -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // fused: a + b * c = inf + (negative overflow) = inf
- // unfused: a + b * c = inf + -inf = NaN
- {std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
- std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // NaN
- {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
- // -NaN
- {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
-
-template <typename T>
-static constexpr Vector<const FMOperation<T>> qfma_vector() {
- return ArrayVector(qfma_array<T>);
-}
-
-// Fused Multiply-Subtract performs a - b * c.
-template <typename T>
-static constexpr FMOperation<T> qfms_array[]{
- {1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
- // fused: a - b * c = inf - (positive overflow) = inf
- // unfused: a - b * c = inf - inf = NaN
- {std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
- std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // fused: a - b * c = -inf - (negative overflow) = -inf
- // unfused: a - b * c = -inf - -inf = NaN
- {-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
- -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
- // NaN
- {std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
- // -NaN
- {-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
- std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
-
-template <typename T>
-static constexpr Vector<const FMOperation<T>> qfms_vector() {
- return ArrayVector(qfms_array<T>);
-}
-
-// Fused results only when fma3 feature is enabled, and running on TurboFan or
-// Liftoff (which can fall back to TurboFan if FMA is not implemented).
-bool ExpectFused(TestExecutionTier tier) {
-#ifdef V8_TARGET_ARCH_X64
- return CpuFeatures::IsSupported(FMA3) &&
- (tier == TestExecutionTier::kTurbofan ||
- tier == TestExecutionTier::kLiftoff);
-#else
- return (tier == TestExecutionTier::kTurbofan ||
- tier == TestExecutionTier::kLiftoff);
-#endif
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-
} // namespace
#define WASM_SIMD_CHECK_LANE_S(TYPE, value, LANE_TYPE, lane_value, lane_index) \
@@ -345,57 +261,6 @@ bool ExpectFused(TestExecutionTier tier) {
lane_index, WASM_LOCAL_GET(value))), \
WASM_RETURN1(WASM_ZERO))
-// The macro below disables tests lowering for certain nodes where the simd
-// lowering doesn't work correctly. Early return here if the CPU does not
-// support SIMD as the graph will be implicitly lowered in that case.
-#define WASM_SIMD_TEST_NO_LOWERING(name) \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- TestExecutionTier execution_tier); \
- TEST(RunWasm_##name##_turbofan) { \
- if (!CpuFeatures::SupportsWasmSimd128()) return; \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
- } \
- TEST(RunWasm_##name##_liftoff) { \
- if (!CpuFeatures::SupportsWasmSimd128()) return; \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kLiftoff); \
- } \
- TEST(RunWasm_##name##_interpreter) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
- } \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- TestExecutionTier execution_tier)
-
-// Returns true if the platform can represent the result.
-template <typename T>
-bool PlatformCanRepresent(T x) {
-#if V8_TARGET_ARCH_ARM
- return std::fpclassify(x) != FP_SUBNORMAL;
-#else
- return true;
-#endif
-}
-
-// Returns true for very small and very large numbers. We skip these test
-// values for the approximation instructions, which don't work at the extremes.
-bool IsExtreme(float x) {
- float abs_x = std::fabs(x);
- const float kSmallFloatThreshold = 1.0e-32f;
- const float kLargeFloatThreshold = 1.0e32f;
- return abs_x != 0.0f && // 0 or -0 are fine.
- (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
-}
-
-#if V8_OS_AIX
-template <typename T>
-bool MightReverseSign(T float_op) {
- return float_op == static_cast<T>(Negate) ||
- float_op == static_cast<T>(std::abs);
-}
-#endif
-
WASM_SIMD_TEST(S128Globals) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input and output vectors.
@@ -488,113 +353,6 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
}
}
-bool IsSameNan(float expected, float actual) {
- // Sign is non-deterministic.
- uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
- uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
- // Some implementations convert signaling NaNs to quiet NaNs.
- return (expected_bits == actual_bits) ||
- ((expected_bits | 0x00400000) == actual_bits);
-}
-
-bool IsCanonical(float actual) {
- uint32_t actual_bits = bit_cast<uint32_t>(actual);
- // Canonical NaN has quiet bit and no payload.
- return (actual_bits & 0xFFC00000) == actual_bits;
-}
-
-void CheckFloatResult(float x, float y, float expected, float actual,
- bool exact = true) {
- if (std::isnan(expected)) {
- CHECK(std::isnan(actual));
- if (std::isnan(x) && IsSameNan(x, actual)) return;
- if (std::isnan(y) && IsSameNan(y, actual)) return;
- if (IsSameNan(expected, actual)) return;
- if (IsCanonical(actual)) return;
- // This is expected to assert; it's useful for debugging.
- CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
- } else {
- if (exact) {
- CHECK_EQ(expected, actual);
- // The sign of 0's must match.
- CHECK_EQ(std::signbit(expected), std::signbit(actual));
- return;
- }
- // Otherwise, perform an approximate equality test. First check for
- // equality to handle +/-Infinity where approximate equality doesn't work.
- if (expected == actual) return;
-
- // 1% error allows all platforms to pass easily.
- constexpr float kApproximationError = 0.01f;
- float abs_error = std::abs(expected) * kApproximationError,
- min = expected - abs_error, max = expected + abs_error;
- CHECK_LE(min, actual);
- CHECK_GE(max, actual);
- }
-}
-
-// Test some values not included in the float inputs from value_helper. These
-// tests are useful for opcodes that are synthesized during code gen, like Min
-// and Max on ia32 and x64.
-static constexpr uint32_t nan_test_array[] = {
- // Bit patterns of quiet NaNs and signaling NaNs, with or without
- // additional payload.
- 0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0xFFFFFFFF, 0x7F876543, 0xFF876543,
- // NaN with top payload bit unset.
- 0x7FA00000,
- // Both Infinities.
- 0x7F800000, 0xFF800000,
- // Some "normal" numbers, 1 and -1.
- 0x3F800000, 0xBF800000};
-
-#define FOR_FLOAT32_NAN_INPUTS(i) \
- for (size_t i = 0; i < arraysize(nan_test_array); ++i)
-
-void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, FloatUnOp expected_op,
- bool exact = true) {
- WasmRunner<int32_t, float> r(execution_tier, lower_simd);
- // Global to hold output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_FLOAT32_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- float expected = expected_op(x);
-#if V8_OS_AIX
- if (!MightReverseSign<FloatUnOp>(expected_op))
- expected = FpOpWorkaround<float>(x, expected);
-#endif
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, x, expected, actual, exact);
- }
- }
-
- FOR_FLOAT32_NAN_INPUTS(i) {
- float x = bit_cast<float>(nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- float expected = expected_op(x);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, x, expected, actual, exact);
- }
- }
-}
-
WASM_SIMD_TEST(F32x4Abs) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
}
@@ -607,18 +365,6 @@ WASM_SIMD_TEST(F32x4Sqrt) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, std::sqrt);
}
-WASM_SIMD_TEST(F32x4RecipApprox) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
- base::Recip, false /* !exact */);
-}
-
-WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
- base::RecipSqrt, false /* !exact */);
-}
-
WASM_SIMD_TEST(F32x4Ceil) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Ceil, ceilf, true);
}
@@ -636,54 +382,6 @@ WASM_SIMD_TEST(F32x4NearestInt) {
true);
}
-void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, FloatBinOp expected_op) {
- WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
- // Global to hold output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT32_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT32_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- float expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-
- FOR_FLOAT32_NAN_INPUTS(i) {
- float x = bit_cast<float>(nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT32_NAN_INPUTS(j) {
- float y = bit_cast<float>(nan_test_array[j]);
- if (!PlatformCanRepresent(y)) continue;
- float expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-}
-
-#undef FOR_FLOAT32_NAN_INPUTS
-
WASM_SIMD_TEST(F32x4Add) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
}
@@ -711,37 +409,6 @@ WASM_SIMD_TEST(F32x4Pmax) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmax, Maximum);
}
-void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- FloatCompareOp expected_op) {
- WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT32_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT32_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- float diff = x - y; // Model comparison as subtraction.
- if (!PlatformCanRepresent(diff)) continue;
- r.Call(x, y);
- int32_t expected = expected_op(x, y);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(F32x4Eq) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
}
@@ -766,115 +433,6 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
-// TODO(v8:10983) Prototyping sign select.
-template <typename T>
-void RunSignSelect(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode signselect, WasmOpcode splat,
- std::array<int8_t, kSimd128Size> mask) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
- T* output = r.builder().template AddGlobal<T>(kWasmS128);
-
- // Splat 2 constant values, then use a mask that selects alternate lanes.
- BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(splat), WASM_LOCAL_GET(1),
- WASM_SIMD_OP(splat), WASM_SIMD_CONSTANT(mask), WASM_SIMD_OP(signselect),
- kExprGlobalSet, 0, WASM_ONE);
-
- r.Call(1, 2);
-
- constexpr int lanes = kSimd128Size / sizeof(T);
- for (int i = 0; i < lanes; i += 2) {
- CHECK_EQ(1, ReadLittleEndianValue<T>(&output[i]));
- }
- for (int i = 1; i < lanes; i += 2) {
- CHECK_EQ(2, ReadLittleEndianValue<T>(&output[i]));
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I8x16SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0x80, 0, -1, 0, 0x80, 0, -1, 0,
- 0x80, 0, -1, 0, 0x80, 0, -1, 0};
- RunSignSelect<int8_t>(execution_tier, lower_simd, kExprI8x16SignSelect,
- kExprI8x16Splat, mask);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I16x8SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0, 0x80, 0, 0, -1, -1, 0, 0,
- 0, 0x80, 0, 0, -1, -1, 0, 0};
- RunSignSelect<int16_t>(execution_tier, lower_simd, kExprI16x8SignSelect,
- kExprI16x8Splat, mask);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I32x4SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0, 0, 0, 0x80, 0, 0, 0, 0,
- -1, -1, -1, -1, 0, 0, 0, 0};
- RunSignSelect<int32_t>(execution_tier, lower_simd, kExprI32x4SignSelect,
- kExprI32x4Splat, mask);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2SignSelect) {
- std::array<int8_t, kSimd128Size> mask = {0, 0, 0, 0, 0, 0, 0, 0x80,
- 0, 0, 0, 0, 0, 0, 0, 0};
- RunSignSelect<int64_t>(execution_tier, lower_simd, kExprI64x2SignSelect,
- kExprI64x2Splat, mask);
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_ARM
-
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMA(
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<float> x : qfma_vector<float>()) {
- r.Call(x.a, x.b, x.c);
- float expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- float* g = r.builder().AddGlobal<float>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F32x4_QFMS(
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<float> x : qfms_vector<float>()) {
- r.Call(x.a, x.b, x.c);
- float expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 4; i++) {
- float actual = ReadLittleEndianValue<float>(&g[i]);
- CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-
WASM_SIMD_TEST(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -925,68 +483,15 @@ WASM_SIMD_TEST(I64x2ReplaceLane) {
}
}
-void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64UnOp expected_op) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = expected_op(x);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I64x2Neg) {
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
base::NegateWithWraparound);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Abs) {
+WASM_SIMD_TEST(I64x2Abs) {
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Abs, std::abs);
}
-void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64ShiftOp expected_op) {
- // Intentionally shift by 64, should be no-op.
- for (int shift = 1; shift <= 64; shift++) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
- int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = expected_op(x, shift);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I64x2Shl) {
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
LogicalShiftLeft);
@@ -1002,32 +507,6 @@ WASM_SIMD_TEST(I64x2ShrU) {
LogicalShiftRight);
}
-void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64BinOp expected_op) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_INT64_INPUTS(x) {
- FOR_INT64_INPUTS(y) {
- r.Call(x, y);
- int64_t expected = expected_op(x, y);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I64x2Add) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
base::AddWithWraparound);
@@ -1038,27 +517,27 @@ WASM_SIMD_TEST(I64x2Sub) {
base::SubWithWraparound);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
+WASM_SIMD_TEST(I64x2Eq) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
+WASM_SIMD_TEST(I64x2Ne) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
+WASM_SIMD_TEST(I64x2LtS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
+WASM_SIMD_TEST(I64x2LeS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
+WASM_SIMD_TEST(I64x2GtS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
+WASM_SIMD_TEST(I64x2GeS) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
}
@@ -1149,124 +628,6 @@ WASM_SIMD_TEST(I64x2ExtractWithF64x2) {
CHECK_EQ(1, r.Call());
}
-bool IsExtreme(double x) {
- double abs_x = std::fabs(x);
- const double kSmallFloatThreshold = 1.0e-298;
- const double kLargeFloatThreshold = 1.0e298;
- return abs_x != 0.0f && // 0 or -0 are fine.
- (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
-}
-
-bool IsSameNan(double expected, double actual) {
- // Sign is non-deterministic.
- uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
- uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
- // Some implementations convert signaling NaNs to quiet NaNs.
- return (expected_bits == actual_bits) ||
- ((expected_bits | 0x0008000000000000) == actual_bits);
-}
-
-bool IsCanonical(double actual) {
- uint64_t actual_bits = bit_cast<uint64_t>(actual);
- // Canonical NaN has quiet bit and no payload.
- return (actual_bits & 0xFFF8000000000000) == actual_bits;
-}
-
-void CheckDoubleResult(double x, double y, double expected, double actual,
- bool exact = true) {
- if (std::isnan(expected)) {
- CHECK(std::isnan(actual));
- if (std::isnan(x) && IsSameNan(x, actual)) return;
- if (std::isnan(y) && IsSameNan(y, actual)) return;
- if (IsSameNan(expected, actual)) return;
- if (IsCanonical(actual)) return;
- // This is expected to assert; it's useful for debugging.
- CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
- } else {
- if (exact) {
- CHECK_EQ(expected, actual);
- // The sign of 0's must match.
- CHECK_EQ(std::signbit(expected), std::signbit(actual));
- return;
- }
- // Otherwise, perform an approximate equality test. First check for
- // equality to handle +/-Infinity where approximate equality doesn't work.
- if (expected == actual) return;
-
- // 1% error allows all platforms to pass easily.
- constexpr double kApproximationError = 0.01f;
- double abs_error = std::abs(expected) * kApproximationError,
- min = expected - abs_error, max = expected + abs_error;
- CHECK_LE(min, actual);
- CHECK_GE(max, actual);
- }
-}
-
-// Test some values not included in the double inputs from value_helper. These
-// tests are useful for opcodes that are synthesized during code gen, like Min
-// and Max on ia32 and x64.
-static constexpr uint64_t double_nan_test_array[] = {
- // quiet NaNs, + and -
- 0x7FF8000000000001, 0xFFF8000000000001,
- // with payload
- 0x7FF8000000000011, 0xFFF8000000000011,
- // signaling NaNs, + and -
- 0x7FF0000000000001, 0xFFF0000000000001,
- // with payload
- 0x7FF0000000000011, 0xFFF0000000000011,
- // Both Infinities.
- 0x7FF0000000000000, 0xFFF0000000000000,
- // Some "normal" numbers, 1 and -1.
- 0x3FF0000000000000, 0xBFF0000000000000};
-
-#define FOR_FLOAT64_NAN_INPUTS(i) \
- for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
-
-void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, DoubleUnOp expected_op,
- bool exact = true) {
- WasmRunner<int32_t, double> r(execution_tier, lower_simd);
- // Global to hold output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- double expected = expected_op(x);
-#if V8_OS_AIX
- if (!MightReverseSign<DoubleUnOp>(expected_op))
- expected = FpOpWorkaround<double>(x, expected);
-#endif
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, x, expected, actual, exact);
- }
- }
-
- FOR_FLOAT64_NAN_INPUTS(i) {
- double x = bit_cast<double>(double_nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- // Extreme values have larger errors so skip them for approximation tests.
- if (!exact && IsExtreme(x)) continue;
- double expected = expected_op(x);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, x, expected, actual, exact);
- }
- }
-}
-
WASM_SIMD_TEST(F64x2Abs) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
}
@@ -1301,12 +662,15 @@ void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode opcode) {
WasmRunner<int32_t, SrcType> r(execution_tier, lower_simd);
double* g = r.builder().template AddGlobal<double>(kWasmS128);
- // TODO(zhin): set top lanes to 0 to assert conversion happens on low lanes.
- BUILD(
- r,
- WASM_GLOBAL_SET(
- 0, WASM_SIMD_UNOP(opcode, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)))),
- WASM_ONE);
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0,
+ WASM_SIMD_UNOP(
+ opcode,
+ // Set top lane of i64x2 == set top 2 lanes of i32x4.
+ WASM_SIMD_I64x2_REPLACE_LANE(
+ 1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(0)), WASM_ZERO64))),
+ WASM_ONE);
for (SrcType x : compiler::ValueHelper::GetVector<SrcType>()) {
r.Call(x);
@@ -1318,12 +682,12 @@ void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
}
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4S) {
+WASM_SIMD_TEST(F64x2ConvertLowI32x4S) {
RunF64x2ConvertLowI32x4Test<int32_t>(execution_tier, lower_simd,
kExprF64x2ConvertLowI32x4S);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2ConvertLowI32x4U) {
+WASM_SIMD_TEST(F64x2ConvertLowI32x4U) {
RunF64x2ConvertLowI32x4Test<uint32_t>(execution_tier, lower_simd,
kExprF64x2ConvertLowI32x4U);
}
@@ -1353,17 +717,17 @@ void RunI32x4TruncSatF64x2Test(TestExecutionTier execution_tier,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2SZero) {
+WASM_SIMD_TEST(I32x4TruncSatF64x2SZero) {
RunI32x4TruncSatF64x2Test<int32_t>(execution_tier, lower_simd,
kExprI32x4TruncSatF64x2SZero);
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4TruncSatF64x2UZero) {
+WASM_SIMD_TEST(I32x4TruncSatF64x2UZero) {
RunI32x4TruncSatF64x2Test<uint32_t>(execution_tier, lower_simd,
kExprI32x4TruncSatF64x2UZero);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4DemoteF64x2Zero) {
+WASM_SIMD_TEST(F32x4DemoteF64x2Zero) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
float* g = r.builder().AddGlobal<float>(kWasmS128);
BUILD(r,
@@ -1386,7 +750,7 @@ WASM_SIMD_TEST_NO_LOWERING(F32x4DemoteF64x2Zero) {
}
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
+WASM_SIMD_TEST(F64x2PromoteLowF32x4) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
double* g = r.builder().AddGlobal<double>(kWasmS128);
BUILD(r,
@@ -1405,53 +769,6 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
}
}
-void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, DoubleBinOp expected_op) {
- WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
- // Global to hold output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test value, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_INPUTS(y) {
- if (!PlatformCanRepresent(x)) continue;
- double expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-
- FOR_FLOAT64_NAN_INPUTS(i) {
- double x = bit_cast<double>(double_nan_test_array[i]);
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_NAN_INPUTS(j) {
- double y = bit_cast<double>(double_nan_test_array[j]);
- double expected = expected_op(x, y);
- if (!PlatformCanRepresent(expected)) continue;
- r.Call(x, y);
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x, y, expected, actual, true /* exact */);
- }
- }
- }
-}
-
-#undef FOR_FLOAT64_NAN_INPUTS
-
WASM_SIMD_TEST(F64x2Add) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Add, Add);
}
@@ -1476,42 +793,6 @@ WASM_SIMD_TEST(F64x2Pmax) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmax, Maximum);
}
-void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- DoubleCompareOp expected_op) {
- WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- // Make the lanes of each temp compare differently:
- // temp1 = y, x and temp2 = y, y.
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp1,
- WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- double diff = x - y; // Model comparison as subtraction.
- if (!PlatformCanRepresent(diff)) continue;
- r.Call(x, y);
- int64_t expected0 = expected_op(x, y);
- int64_t expected1 = expected_op(y, y);
- CHECK_EQ(expected0, ReadLittleEndianValue<int64_t>(&g[0]));
- CHECK_EQ(expected1, ReadLittleEndianValue<int64_t>(&g[1]));
- }
- }
-}
-
WASM_SIMD_TEST(F64x2Eq) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
}
@@ -1549,58 +830,6 @@ WASM_SIMD_TEST(I64x2Mul) {
base::MulWithWraparound);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMA(
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<double> x : qfma_vector<double>()) {
- r.Call(x.a, x.b, x.c);
- double expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- double* g = r.builder().AddGlobal<double>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1, value3 = 2;
- BUILD(r,
- WASM_GLOBAL_SET(0, WASM_SIMD_F64x2_QFMS(
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2)),
- WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value3)))),
- WASM_ONE);
-
- for (FMOperation<double> x : qfms_vector<double>()) {
- r.Call(x.a, x.b, x.c);
- double expected =
- ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
- for (int i = 0; i < 2; i++) {
- double actual = ReadLittleEndianValue<double>(&g[i]);
- CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
- }
- }
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
-
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -1758,7 +987,7 @@ WASM_SIMD_TEST(I32x4BitMask) {
}
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2BitMask) {
+WASM_SIMD_TEST(I64x2BitMask) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
byte value1 = r.AllocateLocal(kWasmS128);
@@ -1928,7 +1157,7 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
}
// Tests both signed and unsigned conversion from I32x4 (unpacking).
-WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
+WASM_SIMD_TEST(I64x2ConvertI32x4) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create four output vectors to hold signed and unsigned results.
int64_t* g0 = r.builder().AddGlobal<int64_t>(kWasmS128);
@@ -1963,27 +1192,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
}
}
-void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int32UnOp expected_op) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT32_INPUTS(x) {
- r.Call(x);
- int32_t expected = expected_op(x);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I32x4Neg) {
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
base::NegateWithWraparound);
@@ -2001,74 +1209,56 @@ WASM_SIMD_TEST(S128Not) {
template <typename Narrow, typename Wide>
void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode ext_add_pairwise,
- WasmOpcode splat) {
+ WasmOpcode splat, Shuffle interleaving_shuffle) {
constexpr int num_lanes = kSimd128Size / sizeof(Wide);
- WasmRunner<int32_t, Narrow> r(execution_tier, lower_simd);
+ WasmRunner<int32_t, Narrow, Narrow> r(execution_tier, lower_simd);
Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
- // TODO(v8:11086) We splat the same value, so pairwise adding ends up adding
- // the same value to itself, consider a more complicated test, like having 2
- // vectors, and shuffling them.
- BUILD(r, WASM_LOCAL_GET(0), WASM_SIMD_OP(splat),
+ BUILD(r,
+ WASM_SIMD_I8x16_SHUFFLE_OP(kExprI8x16Shuffle, interleaving_shuffle,
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(0)),
+ WASM_SIMD_UNOP(splat, WASM_LOCAL_GET(1))),
WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE);
- for (Narrow x : compiler::ValueHelper::GetVector<Narrow>()) {
- r.Call(x);
- Wide expected = AddLong<Wide>(x, x);
+ auto v = compiler::ValueHelper::GetVector<Narrow>();
+ // Iterate vector from both ends to try and splat two different values.
+ for (auto i = v.begin(), j = v.end() - 1; i < v.end(); i++, j--) {
+ r.Call(*i, *j);
+ Wide expected = AddLong<Wide>(*i, *j);
for (int i = 0; i < num_lanes; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<Wide>(&g[i]));
}
}
}
-WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8S) {
- RunExtAddPairwiseTest<int16_t, int32_t>(execution_tier, lower_simd,
- kExprI32x4ExtAddPairwiseI16x8S,
- kExprI16x8Splat);
-}
+// interleave even lanes from one input and odd lanes from another.
+constexpr Shuffle interleave_16x8_shuffle = {0, 1, 18, 19, 4, 5, 22, 23,
+ 8, 9, 26, 27, 12, 13, 30, 31};
+constexpr Shuffle interleave_8x16_shuffle = {0, 17, 2, 19, 4, 21, 6, 23,
+ 8, 25, 10, 27, 12, 29, 14, 31};
-WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8U) {
- RunExtAddPairwiseTest<uint16_t, uint32_t>(execution_tier, lower_simd,
- kExprI32x4ExtAddPairwiseI16x8U,
- kExprI16x8Splat);
+WASM_SIMD_TEST(I32x4ExtAddPairwiseI16x8S) {
+ RunExtAddPairwiseTest<int16_t, int32_t>(
+ execution_tier, lower_simd, kExprI32x4ExtAddPairwiseI16x8S,
+ kExprI16x8Splat, interleave_16x8_shuffle);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16S) {
- RunExtAddPairwiseTest<int8_t, int16_t>(execution_tier, lower_simd,
- kExprI16x8ExtAddPairwiseI8x16S,
- kExprI8x16Splat);
+WASM_SIMD_TEST(I32x4ExtAddPairwiseI16x8U) {
+ RunExtAddPairwiseTest<uint16_t, uint32_t>(
+ execution_tier, lower_simd, kExprI32x4ExtAddPairwiseI16x8U,
+ kExprI16x8Splat, interleave_16x8_shuffle);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16U) {
- RunExtAddPairwiseTest<uint8_t, uint16_t>(execution_tier, lower_simd,
- kExprI16x8ExtAddPairwiseI8x16U,
- kExprI8x16Splat);
+WASM_SIMD_TEST(I16x8ExtAddPairwiseI8x16S) {
+ RunExtAddPairwiseTest<int8_t, int16_t>(
+ execution_tier, lower_simd, kExprI16x8ExtAddPairwiseI8x16S,
+ kExprI8x16Splat, interleave_8x16_shuffle);
}
-void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int32BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- FOR_INT32_INPUTS(x) {
- FOR_INT32_INPUTS(y) {
- r.Call(x, y);
- int32_t expected = expected_op(x, y);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
- }
- }
- }
+WASM_SIMD_TEST(I16x8ExtAddPairwiseI8x16U) {
+ RunExtAddPairwiseTest<uint8_t, uint16_t>(
+ execution_tier, lower_simd, kExprI16x8ExtAddPairwiseI8x16U,
+ kExprI8x16Splat, interleave_8x16_shuffle);
}
WASM_SIMD_TEST(I32x4Add) {
@@ -2167,38 +1357,6 @@ WASM_SIMD_TEST(I32x4GeU) {
UnsignedGreaterEqual);
}
-void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int32ShiftOp expected_op) {
- // Intentionally shift by 32, should be no-op.
- for (int shift = 1; shift <= 32; shift++) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
- int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT32_INPUTS(x) {
- r.Call(x);
- int32_t expected = expected_op(x, shift);
- for (int i = 0; i < 4; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
LogicalShiftLeft);
@@ -2278,27 +1436,6 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
}
}
-void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int16UnOp expected_op) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT16_INPUTS(x) {
- r.Call(x);
- int16_t expected = expected_op(x);
- for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I16x8Neg) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
base::NegateWithWraparound);
@@ -2308,33 +1445,6 @@ WASM_SIMD_TEST(I16x8Abs) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Abs, Abs);
}
-template <typename T = int16_t, typename OpType = T (*)(T, T)>
-void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, OpType expected_op) {
- WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
- // Global to hold output.
- T* g = r.builder().template AddGlobal<T>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- for (T x : compiler::ValueHelper::GetVector<T>()) {
- for (T y : compiler::ValueHelper::GetVector<T>()) {
- r.Call(x, y);
- T expected = expected_op(x, y);
- for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I16x8Add) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
base::AddWithWraparound);
@@ -2369,13 +1479,13 @@ WASM_SIMD_TEST(I16x8MaxS) {
}
WASM_SIMD_TEST(I16x8AddSatU) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatU,
- SaturateAdd<uint16_t>);
+ RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd, kExprI16x8AddSatU,
+ SaturateAdd<uint16_t>);
}
WASM_SIMD_TEST(I16x8SubSatU) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatU,
- SaturateSub<uint16_t>);
+ RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd, kExprI16x8SubSatU,
+ SaturateSub<uint16_t>);
}
WASM_SIMD_TEST(I16x8MinU) {
@@ -2433,10 +1543,10 @@ WASM_SIMD_TEST(I16x8LeU) {
WASM_SIMD_TEST(I16x8RoundingAverageU) {
RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd,
kExprI16x8RoundingAverageU,
- base::RoundingAverageUnsigned);
+ RoundingAverageUnsigned);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8Q15MulRSatS) {
+WASM_SIMD_TEST(I16x8Q15MulRSatS) {
RunI16x8BinOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8Q15MulRSatS,
SaturateRoundingQMul<int16_t>);
}
@@ -2576,38 +1686,6 @@ WASM_SIMD_TEST(I32x4DotI16x8S) {
}
}
-void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int16ShiftOp expected_op) {
- // Intentionally shift by 16, should be no-op.
- for (int shift = 1; shift <= 16; shift++) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
- int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT16_INPUTS(x) {
- r.Call(x);
- int16_t expected = expected_op(x, shift);
- for (int i = 0; i < 8; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
LogicalShiftLeft);
@@ -2623,27 +1701,6 @@ WASM_SIMD_TEST(I16x8ShrU) {
LogicalShiftRight);
}
-void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int8UnOp expected_op) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
- WASM_ONE);
-
- FOR_INT8_INPUTS(x) {
- r.Call(x);
- int8_t expected = expected_op(x);
- for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
- }
- }
-}
-
WASM_SIMD_TEST(I8x16Neg) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
base::NegateWithWraparound);
@@ -2653,7 +1710,7 @@ WASM_SIMD_TEST(I8x16Abs) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
+WASM_SIMD_TEST(I8x16Popcnt) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
@@ -2703,33 +1760,6 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
}
}
-template <typename T = int8_t, typename OpType = T (*)(T, T)>
-void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, OpType expected_op) {
- WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
- // Global to hold output.
- T* g = r.builder().template AddGlobal<T>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_ONE);
-
- for (T x : compiler::ValueHelper::GetVector<T>()) {
- for (T y : compiler::ValueHelper::GetVector<T>()) {
- r.Call(x, y);
- T expected = expected_op(x, y);
- for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
- }
- }
- }
-}
-
WASM_SIMD_TEST(I8x16Add) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
base::AddWithWraparound);
@@ -2759,13 +1789,13 @@ WASM_SIMD_TEST(I8x16MaxS) {
}
WASM_SIMD_TEST(I8x16AddSatU) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatU,
- SaturateAdd<uint8_t>);
+ RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd, kExprI8x16AddSatU,
+ SaturateAdd<uint8_t>);
}
WASM_SIMD_TEST(I8x16SubSatU) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatU,
- SaturateSub<uint8_t>);
+ RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd, kExprI8x16SubSatU,
+ SaturateSub<uint8_t>);
}
WASM_SIMD_TEST(I8x16MinU) {
@@ -2820,48 +1850,10 @@ WASM_SIMD_TEST(I8x16LeU) {
UnsignedLessEqual);
}
-WASM_SIMD_TEST(I8x16Mul) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul,
- base::MulWithWraparound);
-}
-
WASM_SIMD_TEST(I8x16RoundingAverageU) {
RunI8x16BinOpTest<uint8_t>(execution_tier, lower_simd,
kExprI8x16RoundingAverageU,
- base::RoundingAverageUnsigned);
-}
-
-void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int8ShiftOp expected_op) {
- // Intentionally shift by 8, should be no-op.
- for (int shift = 1; shift <= 8; shift++) {
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
- int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
- int8_t* g_imm = r.builder().AddGlobal<int8_t>(kWasmS128);
- int8_t* g_mem = r.builder().AddGlobal<int8_t>(kWasmS128);
- byte value = 0;
- byte simd = r.AllocateLocal(kWasmS128);
- // Shift using an immediate, and shift using a value loaded from memory.
- BUILD(
- r, WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
- WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
- WASM_I32V(shift))),
- WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
- opcode, WASM_LOCAL_GET(simd),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
- WASM_ONE);
-
- r.builder().WriteMemory(&memory[0], shift);
- FOR_INT8_INPUTS(x) {
- r.Call(x);
- int8_t expected = expected_op(x, shift);
- for (int i = 0; i < 16; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_imm[i]));
- CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_mem[i]));
- }
- }
- }
+ RoundingAverageUnsigned);
}
WASM_SIMD_TEST(I8x16Shl) {
@@ -2990,27 +1982,6 @@ void RunBinaryLaneOpTest(
}
}
-WASM_SIMD_TEST(I32x4AddHoriz) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- // Inputs are [0 1 2 3] and [4 5 6 7].
- RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
- {{1, 5, 9, 13}});
-}
-
-WASM_SIMD_TEST(I16x8AddHoriz) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- // Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
- RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
- {{1, 5, 9, 13, 17, 21, 25, 29}});
-}
-
-WASM_SIMD_TEST(F32x4AddHoriz) {
- FLAG_SCOPE(wasm_simd_post_mvp);
- // Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
- RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
- {{1.0f, 5.0f, 9.0f, 13.0f}});
-}
-
// Test shuffle ops.
void RunShuffleOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op,
@@ -3077,7 +2048,6 @@ enum ShuffleKey {
kNumShuffleKeys
};
-using Shuffle = std::array<int8_t, kSimd128Size>;
using ShuffleMap = std::map<ShuffleKey, const Shuffle>;
ShuffleMap test_shuffles = {
@@ -3256,6 +2226,30 @@ WASM_SIMD_TEST(I8x16Swizzle) {
CHECK_EQ(ReadLittleEndianValue<uint8_t>(&dst[i]), si.expected[i]);
}
}
+
+ {
+ // We have an optimization for constant indices, test this case.
+ for (SwizzleTestArgs si : swizzle_test_vector) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
+ WASM_SIMD_CONSTANT(si.indices))),
+ WASM_ONE);
+
+ for (int i = 0; i < kSimd128Size; i++) {
+ WriteLittleEndianValue<uint8_t>(&src0[i], si.input[i]);
+ }
+
+ CHECK_EQ(1, r.Call());
+
+ for (int i = 0; i < kSimd128Size; i++) {
+ CHECK_EQ(ReadLittleEndianValue<uint8_t>(&dst[i]), si.expected[i]);
+ }
+ }
+ }
}
// Combine 3 shuffles a, b, and c by applying both a and b and then applying c
@@ -3370,7 +2364,6 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte zero = r.AllocateLocal(kWasmS128); \
@@ -3392,14 +2385,14 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
@@ -3423,14 +2416,14 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
WASM_RETURN1(WASM_ZERO)), \
WASM_LOCAL_SET( \
- reduced, WASM_SIMD_UNOP(kExprV##format##AllTrue, \
+ reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
@@ -3698,60 +2691,6 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-// TODO(v8:11168): Prototyping prefetch.
-WASM_SIMD_TEST(SimdPrefetch) {
- FLAG_SCOPE(wasm_simd_post_mvp);
-
- {
- // Test PrefetchT.
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_ZERO, WASM_SIMD_OP(kExprPrefetchT), ZERO_ALIGNMENT,
- ZERO_OFFSET,
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- r.builder().WriteMemory(&memory[0], i);
- CHECK_EQ(i, r.Call());
- }
- }
-
- {
- // Test PrefetchNT.
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- BUILD(r, WASM_ZERO, WASM_SIMD_OP(kExprPrefetchNT), ZERO_ALIGNMENT,
- ZERO_OFFSET,
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- r.builder().WriteMemory(&memory[0], i);
- CHECK_EQ(i, r.Call());
- }
- }
-
- {
- // Test OOB.
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
-
- // Prefetch kWasmPageSize+1 but still load from 0.
- BUILD(r, WASM_I32V(kWasmPageSize + 1), WASM_SIMD_OP(kExprPrefetchNT),
- ZERO_ALIGNMENT, ZERO_OFFSET,
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- r.builder().WriteMemory(&memory[0], i);
- CHECK_EQ(i, r.Call());
- }
- }
-}
-#endif // V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
-
WASM_SIMD_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
int32_t* memory =
@@ -4181,22 +3120,22 @@ void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load8Lane) {
+WASM_SIMD_TEST(S128Load8Lane) {
RunLoadLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Load8Lane,
kExprI8x16Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load16Lane) {
+WASM_SIMD_TEST(S128Load16Lane) {
RunLoadLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Load16Lane,
kExprI16x8Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load32Lane) {
+WASM_SIMD_TEST(S128Load32Lane) {
RunLoadLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Lane,
kExprI32x4Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
+WASM_SIMD_TEST(S128Load64Lane) {
RunLoadLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Lane,
kExprI64x2Splat);
}
@@ -4274,29 +3213,28 @@ void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store8Lane) {
+WASM_SIMD_TEST(S128Store8Lane) {
RunStoreLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Store8Lane,
kExprI8x16Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store16Lane) {
+WASM_SIMD_TEST(S128Store16Lane) {
RunStoreLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Store16Lane,
kExprI16x8Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store32Lane) {
+WASM_SIMD_TEST(S128Store32Lane) {
RunStoreLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Store32Lane,
kExprI32x4Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
+WASM_SIMD_TEST(S128Store64Lane) {
RunStoreLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Store64Lane,
kExprI64x2Splat);
}
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
@@ -4325,15 +3263,14 @@ WASM_SIMD_TEST(V128AnytrueWithNegativeZero) {
}
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
- WASM_SIMD_TEST(V##format##AllTrue) { \
- FLAG_SCOPE(wasm_simd_post_mvp); \
+ WASM_SIMD_TEST(I##format##AllTrue) { \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
BUILD( \
r, \
WASM_LOCAL_SET(simd, WASM_SIMD_I##format##_SPLAT(WASM_LOCAL_GET(0))), \
- WASM_SIMD_UNOP(kExprV##format##AllTrue, WASM_LOCAL_GET(simd))); \
+ WASM_SIMD_UNOP(kExprI##format##AllTrue, WASM_LOCAL_GET(simd))); \
CHECK_EQ(1, r.Call(max)); \
CHECK_EQ(1, r.Call(0x1)); \
CHECK_EQ(0, r.Call(0)); \
@@ -4406,28 +3343,6 @@ WASM_SIMD_TEST(S128ConstAllOnes) {
RunSimdConstTest(execution_tier, lower_simd, expected);
}
-void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- Int8BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
-
- CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
- r.Call(0xff, 0x7fff));
- CHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
- r.Call(0xfe, 0x7fff));
- CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
- r.Call(0xff, 0x7ffe));
-}
-
WASM_SIMD_TEST(I8x16LeUMixed) {
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LeU,
UnsignedLessEqual);
@@ -4445,28 +3360,6 @@ WASM_SIMD_TEST(I8x16GtUMixed) {
UnsignedGreater);
}
-void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
- LowerSimd lower_simd, WasmOpcode opcode,
- Int16BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- byte temp3 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
- WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
- WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
- WASM_LOCAL_GET(temp2))),
- WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
-
- CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
- r.Call(0xffff, 0x7fffffff));
- CHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
- r.Call(0xfeff, 0x7fffffff));
- CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
- r.Call(0xffff, 0x7ffffeff));
-}
-
WASM_SIMD_TEST(I16x8LeUMixed) {
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LeU,
UnsignedLessEqual);
@@ -4571,7 +3464,7 @@ WASM_EXTRACT_I16x8_TEST(S, UINT16) WASM_EXTRACT_I16x8_TEST(I, INT16)
#undef WASM_SIMD_SELECT_TEST
#undef WASM_SIMD_NON_CANONICAL_SELECT_TEST
#undef WASM_SIMD_BOOL_REDUCTION_TEST
-#undef WASM_SIMD_TEST_NO_LOWERING
+#undef WASM_SIMD_TEST
#undef WASM_SIMD_ANYTRUE_TEST
#undef WASM_SIMD_ALLTRUE_TEST
#undef WASM_SIMD_F64x2_QFMA
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 4999502832..250820ed19 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -3083,6 +3083,47 @@ WASM_EXEC_TEST(CallIndirect_canonical) {
CHECK_TRAP(r.Call(5));
}
+WASM_EXEC_TEST(Regress_PushReturns) {
+ ValueType kSigTypes[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(12, 0, kSigTypes);
+ WasmRunner<int32_t> r(execution_tier);
+
+ WasmFunctionCompiler& f1 = r.NewFunction(&sig);
+ BUILD(f1, WASM_I32V(1), WASM_I32V(2), WASM_I32V(3), WASM_I32V(4),
+ WASM_I32V(5), WASM_I32V(6), WASM_I32V(7), WASM_I32V(8), WASM_I32V(9),
+ WASM_I32V(10), WASM_I32V(11), WASM_I32V(12));
+
+ BUILD(r, WASM_CALL_FUNCTION0(f1.function_index()), WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP, WASM_DROP,
+ WASM_DROP, WASM_DROP, WASM_DROP);
+ CHECK_EQ(1, r.Call());
+}
+
+WASM_EXEC_TEST(Regress_EnsureArguments) {
+ ValueType kSigTypes[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32,
+ kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig(0, 12, kSigTypes);
+ WasmRunner<int32_t> r(execution_tier);
+
+ WasmFunctionCompiler& f2 = r.NewFunction(&sig);
+ BUILD(f2, kExprReturn);
+
+ BUILD(r, WASM_I32V(42), kExprReturn,
+ WASM_CALL_FUNCTION(f2.function_index(), WASM_I32V(1)));
+ CHECK_EQ(42, r.Call());
+}
+
+WASM_EXEC_TEST(Regress_PushControl) {
+ EXPERIMENTAL_FLAG_SCOPE(mv);
+ WasmRunner<int32_t> r(execution_tier);
+ BUILD(r, WASM_I32V(42),
+ WASM_IF(WASM_I32V(0), WASM_UNREACHABLE, kExprIf, kVoidCode, kExprEnd));
+ CHECK_EQ(42, r.Call());
+}
+
WASM_EXEC_TEST(F32Floor) {
WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_FLOOR(WASM_LOCAL_GET(0)));
@@ -3341,7 +3382,7 @@ static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; ++num_params) {
WasmRunner<void> r(tier);
- FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
+ FunctionSig* sig = sigs.many(r.zone(), kWasmVoid, param, num_params);
r.builder().AddSignature(sig);
r.builder().AddSignature(sig);
@@ -3821,6 +3862,18 @@ TEST(Regression_1085507) {
WASM_BLOCK_X(sig_v_i, kExprDrop), kExprElse, kExprEnd, WASM_I32V_1(0));
}
+TEST(Regression_1185323_1185492) {
+ WasmRunner<int32_t> r(TestExecutionTier::kInterpreter);
+ r.builder().AddIndirectFunctionTable(nullptr, 1);
+ BUILD(r, WASM_I32V_1(0),
+ // Use a long leb128 encoding of kExprTableSize instruction.
+ // This exercises a bug in the interpreter which tries to read the
+ // immediate at pc+2 (it should be pc+4).
+ kNumericPrefix, 0x90, 0x80, 0x00, 0x00, // table.size 0.
+ WASM_UNREACHABLE, kExprTableSet, 0x00); // Hits a DCHECK if reached.
+ r.Call();
+}
+
#undef B1
#undef B2
#undef RET
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 92ad205070..639bc66336 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -393,7 +393,7 @@ ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
// Add an invalid global to the module. The decoder will fail there.
- builder.AddGlobal(kWasmStmt, true, WasmInitExpr::GlobalGet(12));
+ builder.AddGlobal(kWasmVoid, true, WasmInitExpr::GlobalGet(12));
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
uint8_t code[] = {kExprLocalGet, 0, kExprEnd};
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 156bfb55ac..9c2edfe75f 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -237,7 +237,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
CHECK_EQ(expected.locals.size(), num_locals);
for (int i = 0; i < num_locals; ++i) {
WasmValue local_value = debug_info->GetLocalValue(
- i, frame->pc(), frame->fp(), frame->callee_fp());
+ i, frame->pc(), frame->fp(), frame->callee_fp(), isolate_);
CHECK_EQ(WasmValWrapper{expected.locals[i]}, WasmValWrapper{local_value});
}
@@ -245,7 +245,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
CHECK_EQ(expected.stack.size(), stack_depth);
for (int i = 0; i < stack_depth; ++i) {
WasmValue stack_value = debug_info->GetStackValue(
- i, frame->pc(), frame->fp(), frame->callee_fp());
+ i, frame->pc(), frame->fp(), frame->callee_fp(), isolate_);
CHECK_EQ(WasmValWrapper{expected.stack[i]}, WasmValWrapper{stack_value});
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
index 2c49e2de81..b55b971b71 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
@@ -7,6 +7,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-metrics.h"
#include "src/api/api-inl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "test/cctest/cctest.h"
#include "test/common/wasm/flag-utils.h"
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index e23a549ddd..79ba524ffb 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -205,8 +205,8 @@ TEST(DeserializeWithSourceUrl) {
const std::string url = "http://example.com/example.wasm";
Handle<WasmModuleObject> module_object;
CHECK(test.Deserialize(VectorOf(url)).ToHandle(&module_object));
- String source_url = String::cast(module_object->script().source_url());
- CHECK_EQ(url, source_url.ToCString().get());
+ String url_str = String::cast(module_object->script().name());
+ CHECK_EQ(url, url_str.ToCString().get());
}
test.CollectGarbage();
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 9faab4479e..5e59f13c3f 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -74,13 +74,21 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// Line and column are 1-based in v8::StackFrame, just as in ExceptionInfo.
CHECK_EQ(excInfos[frameNr].line_nr, frame->GetLineNumber());
CHECK_EQ(excInfos[frameNr].column, frame->GetColumn());
+ v8::Local<v8::String> scriptSource = frame->GetScriptSource();
+ if (frame->IsWasm()) {
+ CHECK(scriptSource.IsEmpty());
+ } else {
+ CHECK(scriptSource->IsString());
+ }
}
- CheckComputeLocation(i_isolate, exc, excInfos[0]);
+ CheckComputeLocation(i_isolate, exc, excInfos[0],
+ stack->GetFrame(v8_isolate, 0));
}
void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
- const ExceptionInfo& topLocation) {
+ const ExceptionInfo& topLocation,
+ const v8::Local<v8::StackFrame> stackFrame) {
MessageLocation loc;
CHECK(i_isolate->ComputeLocationFromStackTrace(&loc, exc));
printf("loc start: %d, end: %d\n", loc.start_pos(), loc.end_pos());
@@ -97,6 +105,13 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// whether Script::PositionInfo.column should be the offset
// relative to the module or relative to the function.
// CHECK_EQ(topLocation.column - 1, message->GetColumnNumber());
+ String scriptSource = message->GetSource();
+ CHECK(scriptSource.IsString());
+ if (stackFrame->IsWasm()) {
+ CHECK_EQ(scriptSource.length(), 0);
+ } else {
+ CHECK_GT(scriptSource.length(), 0);
+ }
}
#undef CHECK_CSTREQ
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 82f7824315..163e7b8799 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -10,6 +10,7 @@
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
@@ -359,16 +360,18 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
const byte* start, const byte* end) {
WasmFeatures unused_detected_features;
FunctionBody body(sig, 0, start, end);
+ std::vector<compiler::WasmLoopInfo> loops;
DecodeResult result =
BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
- &unused_detected_features, body, nullptr);
+ &unused_detected_features, body, &loops, nullptr);
if (result.failed()) {
#ifdef DEBUG
if (!FLAG_trace_wasm_decoder) {
// Retry the compilation with the tracing flag on, to help in debugging.
FLAG_trace_wasm_decoder = true;
- result = BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr,
- builder, &unused_detected_features, body, nullptr);
+ result =
+ BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
+ &unused_detected_features, body, &loops, nullptr);
}
#endif
@@ -376,9 +379,6 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
result.error().message().c_str());
}
builder->LowerInt64(compiler::WasmGraphBuilder::kCalledFromWasm);
- if (!CpuFeatures::SupportsWasmSimd128()) {
- builder->SimdScalarLoweringForTesting();
- }
}
void TestBuildingGraph(Zone* zone, compiler::JSGraph* jsgraph,
@@ -481,8 +481,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
for (size_t i = 0; i < num_params + 1; i++) {
rep_builder.AddParam(MachineRepresentation::kWord32);
}
- compiler::Int64Lowering r(graph(), machine(), common(), zone(),
- rep_builder.Build());
+ compiler::Int64Lowering r(graph(), machine(), common(), simplified(),
+ zone(), rep_builder.Build());
r.LowerGraph();
}
@@ -561,8 +561,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
DCHECK_NOT_NULL(code);
DisallowGarbageCollection no_gc;
Script script = builder_->instance_object()->module_object().script();
- std::unique_ptr<char[]> source_url =
- String::cast(script.source_url()).ToCString();
+ std::unique_ptr<char[]> source_url = String::cast(script.name()).ToCString();
if (WasmCode::ShouldBeLogged(isolate())) {
code->LogCode(isolate(), source_url.get(), script.id());
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index f873390283..e4b0868e47 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -531,6 +531,19 @@ class WasmRunnerBase : public InitializedHandleScope {
static bool trap_happened;
};
+template <typename T>
+inline WasmValue WasmValueInitializer(T value) {
+ return WasmValue(value);
+}
+template <>
+inline WasmValue WasmValueInitializer(int8_t value) {
+ return WasmValue(static_cast<int32_t>(value));
+}
+template <>
+inline WasmValue WasmValueInitializer(int16_t value) {
+ return WasmValue(static_cast<int32_t>(value));
+}
+
template <typename ReturnType, typename... ParamTypes>
class WasmRunner : public WasmRunnerBase {
public:
@@ -557,6 +570,11 @@ class WasmRunner : public WasmRunnerBase {
lower_simd) {}
ReturnType Call(ParamTypes... p) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ // Save the original context, because CEntry (for runtime calls) will
+ // reset / invalidate it when returning.
+ SaveContext save_context(isolate);
+
DCHECK(compiled_);
if (interpret()) return CallInterpreter(p...);
@@ -586,7 +604,7 @@ class WasmRunner : public WasmRunnerBase {
ReturnType CallInterpreter(ParamTypes... p) {
interpreter()->Reset();
- std::array<WasmValue, sizeof...(p)> args{{WasmValue(p)...}};
+ std::array<WasmValue, sizeof...(p)> args{{WasmValueInitializer(p)...}};
interpreter()->InitFrame(function(), args.data());
interpreter()->Run();
CHECK_GT(interpreter()->NumInterpretedCalls(), 0);
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
new file mode 100644
index 0000000000..64a3e63aaa
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
@@ -0,0 +1,752 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/wasm/wasm-simd-utils.h"
+
+#include <cmath>
+
+#include "src/base/logging.h"
+#include "src/base/memory.h"
+#include "src/common/globals.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8UnOp expected_op) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT8_INPUTS(x) {
+ r.Call(x);
+ int8_t expected = expected_op(x);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
+ }
+ }
+}
+
+template <typename T, typename OpType>
+void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op) {
+ WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
+ // Global to hold output.
+ T* g = r.builder().template AddGlobal<T>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ for (T x : compiler::ValueHelper::GetVector<T>()) {
+ for (T y : compiler::ValueHelper::GetVector<T>()) {
+ r.Call(x, y);
+ T expected = expected_op(x, y);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
+ }
+ }
+ }
+}
+
+// Explicit instantiations of uses.
+template void RunI8x16BinOpTest<int8_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Int8BinOp);
+
+template void RunI8x16BinOpTest<uint8_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Uint8BinOp);
+
+void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8ShiftOp expected_op) {
+ // Intentionally shift by 8, should be no-op.
+ for (int shift = 1; shift <= 8; shift++) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int8_t* g_imm = r.builder().AddGlobal<int8_t>(kWasmS128);
+ int8_t* g_mem = r.builder().AddGlobal<int8_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT8_INPUTS(x) {
+ r.Call(x);
+ int8_t expected = expected_op(x, shift);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int8BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ byte temp3 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I8x16_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
+
+ CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
+ r.Call(0xff, 0x7fff));
+ CHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
+ r.Call(0xfe, 0x7fff));
+ CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
+ r.Call(0xff, 0x7ffe));
+}
+
+void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16UnOp expected_op) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT16_INPUTS(x) {
+ r.Call(x);
+ int16_t expected = expected_op(x);
+ for (int i = 0; i < 8; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
+ }
+ }
+}
+
+template <typename T, typename OpType>
+void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op) {
+ WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
+ // Global to hold output.
+ T* g = r.builder().template AddGlobal<T>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ for (T x : compiler::ValueHelper::GetVector<T>()) {
+ for (T y : compiler::ValueHelper::GetVector<T>()) {
+ r.Call(x, y);
+ T expected = expected_op(x, y);
+ for (int i = 0; i < 8; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
+ }
+ }
+ }
+}
+
+// Explicit instantiations of uses.
+template void RunI16x8BinOpTest<int16_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Int16BinOp);
+template void RunI16x8BinOpTest<uint16_t>(TestExecutionTier, LowerSimd,
+ WasmOpcode, Uint16BinOp);
+
+void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16ShiftOp expected_op) {
+ // Intentionally shift by 16, should be no-op.
+ for (int shift = 1; shift <= 16; shift++) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
+ int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT16_INPUTS(x) {
+ r.Call(x);
+ int16_t expected = expected_op(x, shift);
+ for (int i = 0; i < 8; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int16BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ byte temp3 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I16x8_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp3, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_LOCAL_GET(temp3)));
+
+ CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
+ r.Call(0xffff, 0x7fffffff));
+ CHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
+ r.Call(0xfeff, 0x7fffffff));
+ CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
+ r.Call(0xffff, 0x7ffffeff));
+}
+
+void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32UnOp expected_op) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT32_INPUTS(x) {
+ r.Call(x);
+ int32_t expected = expected_op(x);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ }
+ }
+}
+
+void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32BinOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_INT32_INPUTS(x) {
+ FOR_INT32_INPUTS(y) {
+ r.Call(x, y);
+ int32_t expected = expected_op(x, y);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ }
+ }
+ }
+}
+
+void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32ShiftOp expected_op) {
+ // Intentionally shift by 32, should be no-op.
+ for (int shift = 1; shift <= 32; shift++) {
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
+ int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT32_INPUTS(x) {
+ r.Call(x);
+ int32_t expected = expected_op(x, shift);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64UnOp expected_op) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+}
+
+void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64BinOp expected_op) {
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ FOR_INT64_INPUTS(y) {
+ r.Call(x, y);
+ int64_t expected = expected_op(x, y);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64ShiftOp expected_op) {
+ // Intentionally shift by 64, should be no-op.
+ for (int shift = 1; shift <= 64; shift++) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
+ int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte value = 0;
+ byte simd = r.AllocateLocal(kWasmS128);
+ // Shift using an immediate, and shift using a value loaded from memory.
+ BUILD(
+ r, WASM_LOCAL_SET(simd, WASM_SIMD_I64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_SHIFT_OP(opcode, WASM_LOCAL_GET(simd),
+ WASM_I32V(shift))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_SHIFT_OP(
+ opcode, WASM_LOCAL_GET(simd),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
+ WASM_ONE);
+
+ r.builder().WriteMemory(&memory[0], shift);
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x, shift);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
+ }
+ }
+ }
+}
+
+bool IsExtreme(float x) {
+ float abs_x = std::fabs(x);
+ const float kSmallFloatThreshold = 1.0e-32f;
+ const float kLargeFloatThreshold = 1.0e32f;
+ return abs_x != 0.0f && // 0 or -0 are fine.
+ (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
+}
+
+bool IsSameNan(float expected, float actual) {
+ // Sign is non-deterministic.
+ uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
+ uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x00400000) == actual_bits);
+}
+
+bool IsCanonical(float actual) {
+ uint32_t actual_bits = bit_cast<uint32_t>(actual);
+ // Canonical NaN has quiet bit and no payload.
+ return (actual_bits & 0xFFC00000) == actual_bits;
+}
+
+void CheckFloatResult(float x, float y, float expected, float actual,
+ bool exact) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ if (std::isnan(x) && IsSameNan(x, actual)) return;
+ if (std::isnan(y) && IsSameNan(y, actual)) return;
+ if (IsSameNan(expected, actual)) return;
+ if (IsCanonical(actual)) return;
+ // This is expected to assert; it's useful for debugging.
+ CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
+ } else {
+ if (exact) {
+ CHECK_EQ(expected, actual);
+ // The sign of 0's must match.
+ CHECK_EQ(std::signbit(expected), std::signbit(actual));
+ return;
+ }
+ // Otherwise, perform an approximate equality test. First check for
+ // equality to handle +/-Infinity where approximate equality doesn't work.
+ if (expected == actual) return;
+
+ // 1% error allows all platforms to pass easily.
+ constexpr float kApproximationError = 0.01f;
+ float abs_error = std::abs(expected) * kApproximationError,
+ min = expected - abs_error, max = expected + abs_error;
+ CHECK_LE(min, actual);
+ CHECK_GE(max, actual);
+ }
+}
+
+void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatUnOp expected_op, bool exact) {
+ WasmRunner<int32_t, float> r(execution_tier, lower_simd);
+ // Global to hold output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ float expected = expected_op(x);
+#if V8_OS_AIX
+ if (!MightReverseSign<FloatUnOp>(expected_op))
+ expected = FpOpWorkaround<float>(x, expected);
+#endif
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, x, expected, actual, exact);
+ }
+ }
+
+ FOR_FLOAT32_NAN_INPUTS(i) {
+ float x = bit_cast<float>(nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ float expected = expected_op(x);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, x, expected, actual, exact);
+ }
+ }
+}
+
+void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatBinOp expected_op) {
+ WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
+ // Global to hold output.
+ float* g = r.builder().AddGlobal<float>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT32_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ float expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+
+ FOR_FLOAT32_NAN_INPUTS(i) {
+ float x = bit_cast<float>(nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT32_NAN_INPUTS(j) {
+ float y = bit_cast<float>(nan_test_array[j]);
+ if (!PlatformCanRepresent(y)) continue;
+ float expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 4; i++) {
+ float actual = ReadLittleEndianValue<float>(&g[i]);
+ CheckFloatResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+}
+
+void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ FloatCompareOp expected_op) {
+ WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT32_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT32_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ float diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
+ r.Call(x, y);
+ int32_t expected = expected_op(x, y);
+ for (int i = 0; i < 4; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
+ }
+ }
+ }
+}
+
+bool IsExtreme(double x) {
+ double abs_x = std::fabs(x);
+ const double kSmallFloatThreshold = 1.0e-298;
+ const double kLargeFloatThreshold = 1.0e298;
+ return abs_x != 0.0f && // 0 or -0 are fine.
+ (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
+}
+
+bool IsSameNan(double expected, double actual) {
+ // Sign is non-deterministic.
+ uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
+ uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x0008000000000000) == actual_bits);
+}
+
+bool IsCanonical(double actual) {
+ uint64_t actual_bits = bit_cast<uint64_t>(actual);
+ // Canonical NaN has quiet bit and no payload.
+ return (actual_bits & 0xFFF8000000000000) == actual_bits;
+}
+
+void CheckDoubleResult(double x, double y, double expected, double actual,
+ bool exact) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ if (std::isnan(x) && IsSameNan(x, actual)) return;
+ if (std::isnan(y) && IsSameNan(y, actual)) return;
+ if (IsSameNan(expected, actual)) return;
+ if (IsCanonical(actual)) return;
+ // This is expected to assert; it's useful for debugging.
+ CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
+ } else {
+ if (exact) {
+ CHECK_EQ(expected, actual);
+ // The sign of 0's must match.
+ CHECK_EQ(std::signbit(expected), std::signbit(actual));
+ return;
+ }
+ // Otherwise, perform an approximate equality test. First check for
+ // equality to handle +/-Infinity where approximate equality doesn't work.
+ if (expected == actual) return;
+
+ // 1% error allows all platforms to pass easily.
+ constexpr double kApproximationError = 0.01f;
+ double abs_error = std::abs(expected) * kApproximationError,
+ min = expected - abs_error, max = expected + abs_error;
+ CHECK_LE(min, actual);
+ CHECK_GE(max, actual);
+ }
+}
+
+void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleUnOp expected_op, bool exact) {
+ WasmRunner<int32_t, double> r(execution_tier, lower_simd);
+ // Global to hold output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_UNOP(opcode, WASM_LOCAL_GET(temp1))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ double expected = expected_op(x);
+#if V8_OS_AIX
+ if (!MightReverseSign<DoubleUnOp>(expected_op))
+ expected = FpOpWorkaround<double>(x, expected);
+#endif
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, exact);
+ }
+ }
+
+ FOR_FLOAT64_NAN_INPUTS(i) {
+ double x = bit_cast<double>(double_nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ double expected = expected_op(x);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, exact);
+ }
+ }
+}
+
+void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleBinOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Global to hold output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test value, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(x)) continue;
+ double expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+
+ FOR_FLOAT64_NAN_INPUTS(i) {
+ double x = bit_cast<double>(double_nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_NAN_INPUTS(j) {
+ double y = bit_cast<double>(double_nan_test_array[j]);
+ double expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
+}
+
+void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ DoubleCompareOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ // Make the lanes of each temp compare differently:
+ // temp1 = y, x and temp2 = y, y.
+ BUILD(r, WASM_LOCAL_SET(temp1, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value1))),
+ WASM_LOCAL_SET(temp1,
+ WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(value2))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value2))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(opcode, WASM_LOCAL_GET(temp1),
+ WASM_LOCAL_GET(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ double diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
+ r.Call(x, y);
+ int64_t expected0 = expected_op(x, y);
+ int64_t expected1 = expected_op(y, y);
+ CHECK_EQ(expected0, ReadLittleEndianValue<int64_t>(&g[0]));
+ CHECK_EQ(expected1, ReadLittleEndianValue<int64_t>(&g[1]));
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.h b/deps/v8/test/cctest/wasm/wasm-simd-utils.h
new file mode 100644
index 0000000000..157731df27
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.h
@@ -0,0 +1,177 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "src/base/macros.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using Int8UnOp = int8_t (*)(int8_t);
+using Int8BinOp = int8_t (*)(int8_t, int8_t);
+using Uint8BinOp = uint8_t (*)(uint8_t, uint8_t);
+using Int8CompareOp = int (*)(int8_t, int8_t);
+using Int8ShiftOp = int8_t (*)(int8_t, int);
+
+using Int16UnOp = int16_t (*)(int16_t);
+using Int16BinOp = int16_t (*)(int16_t, int16_t);
+using Uint16BinOp = uint16_t (*)(uint16_t, uint16_t);
+using Int16ShiftOp = int16_t (*)(int16_t, int);
+using Int32UnOp = int32_t (*)(int32_t);
+using Int32BinOp = int32_t (*)(int32_t, int32_t);
+using Int32ShiftOp = int32_t (*)(int32_t, int);
+using Int64UnOp = int64_t (*)(int64_t);
+using Int64BinOp = int64_t (*)(int64_t, int64_t);
+using Int64ShiftOp = int64_t (*)(int64_t, int);
+using FloatUnOp = float (*)(float);
+using FloatBinOp = float (*)(float, float);
+using FloatCompareOp = int32_t (*)(float, float);
+using DoubleUnOp = double (*)(double);
+using DoubleBinOp = double (*)(double, double);
+using DoubleCompareOp = int64_t (*)(double, double);
+
+void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8UnOp expected_op);
+
+template <typename T = int8_t, typename OpType = T (*)(T, T)>
+void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op);
+
+void RunI8x16ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int8ShiftOp expected_op);
+void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int8BinOp expected_op);
+
+void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16UnOp expected_op);
+template <typename T = int16_t, typename OpType = T (*)(T, T)>
+void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op);
+void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int16ShiftOp expected_op);
+void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ Int16BinOp expected_op);
+
+void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32UnOp expected_op);
+void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32BinOp expected_op);
+void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int32ShiftOp expected_op);
+
+void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64UnOp expected_op);
+void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64BinOp expected_op);
+void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64ShiftOp expected_op);
+
+// Generic expected value functions.
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
+T Negate(T a) {
+ return -a;
+}
+
+#if V8_OS_AIX
+template <typename T>
+bool MightReverseSign(T float_op) {
+ return float_op == static_cast<T>(Negate) ||
+ float_op == static_cast<T>(std::abs);
+}
+#endif
+
+// Test some values not included in the float inputs from value_helper. These
+// tests are useful for opcodes that are synthesized during code gen, like Min
+// and Max on ia32 and x64.
+static constexpr uint32_t nan_test_array[] = {
+ // Bit patterns of quiet NaNs and signaling NaNs, with or without
+ // additional payload.
+ 0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0xFFFFFFFF, 0x7F876543, 0xFF876543,
+ // NaN with top payload bit unset.
+ 0x7FA00000,
+ // Both Infinities.
+ 0x7F800000, 0xFF800000,
+ // Some "normal" numbers, 1 and -1.
+ 0x3F800000, 0xBF800000};
+
+#define FOR_FLOAT32_NAN_INPUTS(i) \
+ for (size_t i = 0; i < arraysize(nan_test_array); ++i)
+
+// Test some values not included in the double inputs from value_helper. These
+// tests are useful for opcodes that are synthesized during code gen, like Min
+// and Max on ia32 and x64.
+static constexpr uint64_t double_nan_test_array[] = {
+ // quiet NaNs, + and -
+ 0x7FF8000000000001, 0xFFF8000000000001,
+ // with payload
+ 0x7FF8000000000011, 0xFFF8000000000011,
+ // signaling NaNs, + and -
+ 0x7FF0000000000001, 0xFFF0000000000001,
+ // with payload
+ 0x7FF0000000000011, 0xFFF0000000000011,
+ // Both Infinities.
+ 0x7FF0000000000000, 0xFFF0000000000000,
+ // Some "normal" numbers, 1 and -1.
+ 0x3FF0000000000000, 0xBFF0000000000000};
+
+#define FOR_FLOAT64_NAN_INPUTS(i) \
+ for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
+
+// Returns true if the platform can represent the result.
+template <typename T>
+bool PlatformCanRepresent(T x) {
+#if V8_TARGET_ARCH_ARM
+ return std::fpclassify(x) != FP_SUBNORMAL;
+#else
+ return true;
+#endif
+}
+
+// Returns true for very small and very large numbers. We skip these test
+// values for the approximation instructions, which don't work at the extremes.
+bool IsExtreme(float x);
+bool IsSameNan(float expected, float actual);
+bool IsCanonical(float actual);
+void CheckFloatResult(float x, float y, float expected, float actual,
+ bool exact = true);
+
+bool IsExtreme(double x);
+bool IsSameNan(double expected, double actual);
+bool IsCanonical(double actual);
+void CheckDoubleResult(double x, double y, double expected, double actual,
+ bool exact = true);
+
+void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatUnOp expected_op,
+ bool exact = true);
+
+void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, FloatBinOp expected_op);
+
+void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ FloatCompareOp expected_op);
+
+void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleUnOp expected_op,
+ bool exact = true);
+void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleBinOp expected_op);
+void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode opcode,
+ DoubleCompareOp expected_op);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index fb1a1fcddf..ba021366cb 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -121,8 +121,8 @@ class TestSignatures {
FunctionSig* iii_v() { return &sig_iii_v; }
FunctionSig* many(Zone* zone, ValueType ret, ValueType param, int count) {
- FunctionSig::Builder builder(zone, ret == kWasmStmt ? 0 : 1, count);
- if (ret != kWasmStmt) builder.AddReturn(ret);
+ FunctionSig::Builder builder(zone, ret == kWasmVoid ? 0 : 1, count);
+ if (ret != kWasmVoid) builder.AddReturn(ret);
for (int i = 0; i < count; i++) {
builder.AddParam(param);
}
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index 4a4d08524a..9f7217699b 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -894,8 +894,14 @@ class SideTable : public ZoneObject {
}
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
imm.in_arity(), imm.out_arity());
- CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
- imm.out_arity());
+ int target_stack_height = stack_height - imm.in_arity();
+ if (target_stack_height < 0) {
+ // Allowed in unreachable code, but the stack height stays at 0.
+ DCHECK(unreachable);
+ target_stack_height = 0;
+ }
+ CLabel* end_label = CLabel::New(&control_transfer_zone,
+ target_stack_height, imm.out_arity());
CLabel* catch_label =
CLabel::New(&control_transfer_zone, stack_height, 0);
control_stack.emplace_back(i.pc(), end_label, catch_label,
@@ -940,6 +946,7 @@ class SideTable : public ZoneObject {
TRACE("control @%u: End\n", i.pc_offset());
// Only loops have bound labels.
DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
+ bool rethrow = false;
if (!c->end_label->target) {
if (c->else_label) {
if (*c->pc == kExprIf) {
@@ -948,30 +955,34 @@ class SideTable : public ZoneObject {
} else if (!exception_stack.empty()) {
// No catch_all block, prepare for implicit rethrow.
DCHECK_EQ(*c->pc, kExprTry);
- Control* next_try_block =
- &control_stack[exception_stack.back()];
constexpr int kUnusedControlIndex = -1;
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
kUnusedControlIndex);
- if (!unreachable) {
- next_try_block->else_label->Ref(
- i.pc(), c->else_label->target_stack_height);
- }
+ DCHECK_IMPLIES(
+ !unreachable,
+ stack_height >= c->else_label->target_stack_height);
+ stack_height = c->else_label->target_stack_height;
+ rethrow = !unreachable;
}
} else if (c->unwind) {
DCHECK_EQ(*c->pc, kExprTry);
rethrow_map_.emplace(i.pc() - i.start(),
static_cast<int>(control_stack.size()) - 1);
if (!exception_stack.empty()) {
- Control* next_try_block =
- &control_stack[exception_stack.back()];
- if (!unreachable) {
- next_try_block->else_label->Ref(i.pc(), stack_height);
- }
+ rethrow = !unreachable;
}
}
c->end_label->Bind(i.pc() + 1);
}
+ if (rethrow) {
+ Control* next_try_block = &control_stack[exception_stack.back()];
+ next_try_block->else_label->Ref(i.pc(), stack_height);
+ // We normally update the max stack height before the switch.
+ // However 'end' is not in the list of throwing opcodes so we don't
+ // take into account that it may unpack an exception.
+ max_stack_height_ =
+ std::max(max_stack_height_, stack_height + max_exception_arity);
+ }
c->Finish(&map_, code->start);
DCHECK_IMPLIES(!unreachable,
@@ -1345,25 +1356,23 @@ class WasmInterpreterInternals {
StackValue(WasmValue v, WasmInterpreterInternals* impl, sp_t index)
: value_(v) {
if (IsReferenceValue()) {
- value_ = WasmValue(Handle<Object>::null());
+ value_ = WasmValue(Handle<Object>::null(), value_.type());
int ref_index = static_cast<int>(index);
- impl->reference_stack_->set(ref_index, *v.to_externref());
+ impl->reference_stack_->set(ref_index, *v.to_ref());
}
}
WasmValue ExtractValue(WasmInterpreterInternals* impl, sp_t index) {
if (!IsReferenceValue()) return value_;
- DCHECK(value_.to_externref().is_null());
+ DCHECK(value_.to_ref().is_null());
int ref_index = static_cast<int>(index);
Isolate* isolate = impl->isolate_;
Handle<Object> ref(impl->reference_stack_->get(ref_index), isolate);
DCHECK(!ref->IsTheHole(isolate));
- return WasmValue(ref);
+ return WasmValue(ref, value_.type());
}
- bool IsReferenceValue() const {
- return value_.type().is_reference_to(HeapType::kExtern);
- }
+ bool IsReferenceValue() const { return value_.type().is_reference(); }
void ClearValue(WasmInterpreterInternals* impl, sp_t index) {
if (!IsReferenceValue()) return;
@@ -1433,13 +1442,13 @@ class WasmInterpreterInternals {
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kOptRef: {
- val = WasmValue(isolate_->factory()->null_value());
+ val = WasmValue(isolate_->factory()->null_value(), p);
break;
}
case kRef: // TODO(7748): Implement.
case kRtt:
case kRttWithDepth:
- case kStmt:
+ case kVoid:
case kBottom:
case kI8:
case kI16:
@@ -1823,7 +1832,7 @@ class WasmInterpreterInternals {
return true;
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
@@ -1848,7 +1857,7 @@ class WasmInterpreterInternals {
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.index, module()->num_declared_data_segments);
@@ -1858,7 +1867,7 @@ class WasmInterpreterInternals {
}
case kExprMemoryCopy: {
MemoryCopyImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
uint64_t size = ToMemType(Pop());
uint64_t src = ToMemType(Pop());
@@ -1877,7 +1886,7 @@ class WasmInterpreterInternals {
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
uint64_t size = ToMemType(Pop());
uint32_t value = Pop().to<uint32_t>();
@@ -1892,7 +1901,7 @@ class WasmInterpreterInternals {
}
case kExprTableInit: {
TableInitImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
@@ -1906,14 +1915,14 @@ class WasmInterpreterInternals {
}
case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
*len += imm.length;
instance_object_->dropped_elem_segments()[imm.index] = 1;
return true;
}
case kExprTableCopy: {
TableCopyImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
@@ -1927,13 +1936,13 @@ class WasmInterpreterInternals {
}
case kExprTableGrow: {
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
isolate_);
auto delta = Pop().to<uint32_t>();
- auto value = Pop().to_externref();
+ auto value = Pop().to_ref();
int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
Push(WasmValue(result));
*len += imm.length;
@@ -1941,7 +1950,7 @@ class WasmInterpreterInternals {
}
case kExprTableSize: {
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
@@ -1953,10 +1962,10 @@ class WasmInterpreterInternals {
}
case kExprTableFill: {
TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
- code->at(pc + 2));
+ code->at(pc + *len));
HandleScope handle_scope(isolate_);
auto count = Pop().to<uint32_t>();
- auto value = Pop().to_externref();
+ auto value = Pop().to_ref();
auto start = Pop().to<uint32_t>();
auto table = handle(
@@ -2391,12 +2400,11 @@ class WasmInterpreterInternals {
BINOP_CASE(I16x8SubSatS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
BINOP_CASE(I16x8SubSatU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
- base::RoundingAverageUnsigned<uint16_t>(a, b))
+ RoundingAverageUnsigned<uint16_t>(a, b))
BINOP_CASE(I16x8Q15MulRSatS, i16x8, int8, 8,
SaturateRoundingQMul<int16_t>(a, b))
BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
- BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
BINOP_CASE(I8x16MinU, i8x16, int16, 16,
static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
@@ -2408,7 +2416,7 @@ class WasmInterpreterInternals {
BINOP_CASE(I8x16SubSatS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
BINOP_CASE(I8x16SubSatU, i8x16, int16, 16, SaturateSub<uint8_t>(a, b))
BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
- base::RoundingAverageUnsigned<uint8_t>(a, b))
+ RoundingAverageUnsigned<uint8_t>(a, b))
#undef BINOP_CASE
#define UNOP_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
@@ -2753,28 +2761,6 @@ class WasmInterpreterInternals {
Push(WasmValue(Simd128(res)));
return true;
}
-#define ADD_HORIZ_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count / 2; ++i) { \
- auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
- possible_nondeterminism_ |= has_nondeterminism(result1); \
- res.val[LANE(i, res)] = result1; \
- auto result2 = s2.val[LANE(i * 2, s2)] + s2.val[LANE(i * 2 + 1, s2)]; \
- possible_nondeterminism_ |= has_nondeterminism(result2); \
- res.val[LANE(i + count / 2, res)] = result2; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
- ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
- ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
-#undef ADD_HORIZ_CASE
case kExprI32x4DotI16x8S: {
int8 v2 = Pop().to_s128().to_i16x8();
int8 v1 = Pop().to_s128().to_i16x8();
@@ -2844,10 +2830,10 @@ class WasmInterpreterInternals {
Push(WasmValue(res)); \
return true; \
}
- REDUCTION_CASE(V64x2AllTrue, i64x2, int2, 2, &)
- REDUCTION_CASE(V32x4AllTrue, i32x4, int4, 4, &)
- REDUCTION_CASE(V16x8AllTrue, i16x8, int8, 8, &)
- REDUCTION_CASE(V8x16AllTrue, i8x16, int16, 16, &)
+ REDUCTION_CASE(I64x2AllTrue, i64x2, int2, 2, &)
+ REDUCTION_CASE(I32x4AllTrue, i32x4, int4, 4, &)
+ REDUCTION_CASE(I16x8AllTrue, i16x8, int8, 8, &)
+ REDUCTION_CASE(I8x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
#define QFM_CASE(op, name, stype, count, operation) \
case kExpr##op: { \
@@ -2947,18 +2933,6 @@ class WasmInterpreterInternals {
return DoSimdStoreLane<int2, int64_t, int64_t>(
decoder, code, pc, len, MachineRepresentation::kWord64);
}
- case kExprI8x16SignSelect: {
- return DoSimdSignSelect<int16>();
- }
- case kExprI16x8SignSelect: {
- return DoSimdSignSelect<int8>();
- }
- case kExprI32x4SignSelect: {
- return DoSimdSignSelect<int4>();
- }
- case kExprI64x2SignSelect: {
- return DoSimdSignSelect<int2>();
- }
case kExprI32x4ExtAddPairwiseI16x8S: {
return DoSimdExtAddPairwise<int4, int8, int32_t, int16_t>();
}
@@ -2971,16 +2945,6 @@ class WasmInterpreterInternals {
case kExprI16x8ExtAddPairwiseI8x16U: {
return DoSimdExtAddPairwise<int8, int16, uint16_t, uint8_t>();
}
- case kExprPrefetchT:
- case kExprPrefetchNT: {
- // Max alignment doesn't matter, use an arbitrary value.
- MemoryAccessImmediate<Decoder::kNoValidation> imm(
- decoder, code->at(pc + *len), 4, module()->is_memory64);
- // Pop address and do nothing.
- Pop().to<uint32_t>();
- *len += imm.length;
- return true;
- }
default:
return false;
}
@@ -3071,7 +3035,8 @@ class WasmInterpreterInternals {
SimdLaneImmediate<Decoder::kNoValidation> lane_imm(
decoder, code->at(pc + *len + imm.length));
- Push(WasmValue(value.val[LANE(lane_imm.lane, value)]));
+ Push(WasmValue(
+ static_cast<result_type>(value.val[LANE(lane_imm.lane, value)])));
// ExecuteStore will update the len, so pass it unchanged here.
if (!ExecuteStore<result_type, load_type>(decoder, code, pc, len, rep,
@@ -3101,21 +3066,6 @@ class WasmInterpreterInternals {
return true;
}
- template <typename s_type>
- bool DoSimdSignSelect() {
- constexpr int lanes = kSimd128Size / sizeof(s_type::val[0]);
- auto c = Pop().to_s128().to<s_type>();
- auto v2 = Pop().to_s128().to<s_type>();
- auto v1 = Pop().to_s128().to<s_type>();
- s_type res;
- for (int i = 0; i < lanes; ++i) {
- res.val[LANE(i, res)] =
- c.val[LANE(i, c)] < 0 ? v1.val[LANE(i, v1)] : v2.val[LANE(i, v2)];
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
-
template <typename DstSimdType, typename SrcSimdType, typename Wide,
typename Narrow>
bool DoSimdExtAddPairwise() {
@@ -3156,6 +3106,9 @@ class WasmInterpreterInternals {
// it to 0 here such that we report the same position as in compiled code.
frames_.back().pc = 0;
isolate_->StackOverflow();
+ if (FLAG_experimental_wasm_eh) {
+ possible_nondeterminism_ = true;
+ }
if (HandleException(isolate_) == WasmInterpreter::HANDLED) {
ReloadFromFrameOnException(decoder, target, pc, limit);
return true;
@@ -3234,8 +3187,8 @@ class WasmInterpreterInternals {
case HeapType::kExtern:
case HeapType::kFunc:
case HeapType::kAny: {
- Handle<Object> externref = value.to_externref();
- encoded_values->set(encoded_index++, *externref);
+ Handle<Object> ref = value.to_ref();
+ encoded_values->set(encoded_index++, *ref);
break;
}
case HeapType::kBottom:
@@ -3254,7 +3207,7 @@ class WasmInterpreterInternals {
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -3356,9 +3309,9 @@ class WasmInterpreterInternals {
case HeapType::kExtern:
case HeapType::kFunc:
case HeapType::kAny: {
- Handle<Object> externref(encoded_values->get(encoded_index++),
- isolate_);
- value = WasmValue(externref);
+ Handle<Object> ref(encoded_values->get(encoded_index++),
+ isolate_);
+ value = WasmValue(ref, sig->GetParam(i));
break;
}
default:
@@ -3372,7 +3325,7 @@ class WasmInterpreterInternals {
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -3605,7 +3558,8 @@ class WasmInterpreterInternals {
HeapTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1), module());
len = 1 + imm.length;
- Push(WasmValue(isolate_->factory()->null_value()));
+ Push(WasmValue(isolate_->factory()->null_value(),
+ ValueType::Ref(imm.type, kNullable)));
break;
}
case kExprRefFunc: {
@@ -3616,7 +3570,7 @@ class WasmInterpreterInternals {
Handle<WasmExternalFunction> function =
WasmInstanceObject::GetOrCreateWasmExternalFunction(
isolate_, instance_object_, imm.index);
- Push(WasmValue(function));
+ Push(WasmValue(function, kWasmFuncRef));
len = 1 + imm.length;
break;
}
@@ -3762,7 +3716,7 @@ class WasmInterpreterInternals {
std::tie(global_buffer, global_index) =
WasmInstanceObject::GetGlobalBufferAndIndex(instance_object_,
global);
- Handle<Object> ref = Pop().to_externref();
+ Handle<Object> ref = Pop().to_ref();
global_buffer->set(global_index, *ref);
break;
}
@@ -3770,7 +3724,7 @@ class WasmInterpreterInternals {
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -3791,7 +3745,7 @@ class WasmInterpreterInternals {
}
Handle<Object> value =
WasmTableObject::Get(isolate_, table, entry_index);
- Push(WasmValue(value));
+ Push(WasmValue(value, table->type()));
len = 1 + imm.length;
break;
}
@@ -3803,7 +3757,7 @@ class WasmInterpreterInternals {
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
isolate_);
uint32_t table_size = table->current_length();
- Handle<Object> value = Pop().to_externref();
+ Handle<Object> value = Pop().to_ref();
uint32_t entry_index = Pop().to<uint32_t>();
if (entry_index >= table_size) {
return DoTrap(kTrapTableOutOfBounds, pc);
@@ -3953,7 +3907,7 @@ class WasmInterpreterInternals {
case kExprRefIsNull: {
len = 1;
HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t result = Pop().to_externref()->IsNull() ? 1 : 0;
+ uint32_t result = Pop().to_ref()->IsNull() ? 1 : 0;
Push(WasmValue(result));
break;
}
@@ -4071,7 +4025,9 @@ class WasmInterpreterInternals {
}
void Push(WasmValue val) {
- DCHECK_NE(kWasmStmt, val.type());
+ DCHECK_NE(kWasmVoid, val.type());
+ DCHECK_NE(kWasmI8, val.type());
+ DCHECK_NE(kWasmI16, val.type());
DCHECK_LE(1, stack_limit_ - sp_);
DCHECK(StackValue::IsClearedValue(this, StackHeight()));
StackValue stack_value(val, this, StackHeight());
@@ -4083,7 +4039,7 @@ class WasmInterpreterInternals {
void Push(WasmValue* vals, size_t arity) {
DCHECK_LE(arity, stack_limit_ - sp_);
for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
- DCHECK_NE(kWasmStmt, val->type());
+ DCHECK_NE(kWasmVoid, val->type());
Push(*val);
}
}
@@ -4160,13 +4116,13 @@ class WasmInterpreterInternals {
PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
break;
}
- case kStmt:
+ case kVoid:
PrintF("void");
break;
case kRef:
case kOptRef: {
if (val.type().is_reference_to(HeapType::kExtern)) {
- Handle<Object> ref = val.to_externref();
+ Handle<Object> ref = val.to_ref();
if (ref->IsNull()) {
PrintF("ref:null");
} else {
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 7ddc32fc89..ee51d6a0dd 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -807,7 +807,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
//------------------------------------------------------------------------------
// Memory Operations.
//------------------------------------------------------------------------------
-#define WASM_GROW_MEMORY(x) x, kExprMemoryGrow, 0
+#define WASM_MEMORY_GROW(x) x, kExprMemoryGrow, 0
#define WASM_MEMORY_SIZE kExprMemorySize, 0
#define SIG_ENTRY_v_v kWasmFunctionTypeCode, 0, 0
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index c74d0ec56c..770b320dfd 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -66,14 +66,15 @@ OwnedVector<WasmValue> MakeDefaultInterpreterArguments(Isolate* isolate,
break;
case kOptRef:
arguments[i] =
- WasmValue(Handle<Object>::cast(isolate->factory()->null_value()));
+ WasmValue(Handle<Object>::cast(isolate->factory()->null_value()),
+ sig->GetParam(i));
break;
case kRef:
case kRtt:
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
@@ -108,7 +109,7 @@ OwnedVector<Handle<Object>> MakeDefaultArguments(Isolate* isolate,
case kRttWithDepth:
case kI8:
case kI16:
- case kStmt:
+ case kVoid:
case kBottom:
UNREACHABLE();
}
diff --git a/deps/v8/test/debugger/debug/debug-break-class-fields.js b/deps/v8/test/debugger/debug/debug-break-class-fields.js
index b6b9c93235..02b6c3bb6b 100644
--- a/deps/v8/test/debugger/debug/debug-break-class-fields.js
+++ b/deps/v8/test/debugger/debug/debug-break-class-fields.js
@@ -64,76 +64,82 @@ assertTrue(Debug.showBreakPoints(initializer).indexOf("y = [B0]2;") === -1);
Debug.clearBreakPoint(b3);
assertTrue(Debug.showBreakPoints(initializer).indexOf("z = [B1]3") === -1);
+// The computed properties are evaluated during class construction,
+// not as part of the initializer function. As a consequence of which,
+// they aren't breakable here in the initializer function, but
+// instead, are part of the enclosing function.
+
function foo() {}
-var bar = "bar";
+var bar = 'bar';
class X {
[foo()] = 1;
- [bar] = 2;
baz = foo();
}
-// The computed properties are evaluated during class construction,
-// not as part of the initializer function. As a consequence of which,
-// they aren't breakable here in the initializer function, but
-// instead, are part of the enclosing function.
-//
// class X {
-// [foo()] = [B0]1;
-// [bar] = [B1]2;
-// [baz] = [B2]foo();
+// [foo()] = 1;
+// baz = [B0]foo();
// }
initializer = %GetInitializerFunction(X);
b1 = Debug.setBreakPoint(initializer, 0, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf('[foo()] = 1;') === 0);
Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
-b2 = Debug.setBreakPoint(initializer, 1, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") > 0);
+b1 = Debug.setBreakPoint(initializer, 1, 0);
+assertTrue(Debug.showBreakPoints(initializer).indexOf('baz = [B0]foo()') > 0);
+Debug.clearBreakPoint(b1);
+
+function t() {
+ class X {
+ [foo()] = 1;
+ [bar] = 2;
+ baz = foo();
+ }
+}
+
+// class X {
+// [[B0]foo()] = 1;
+// [[B1]bar] = 2;
+// baz = foo();
+// }
+
+b1 = Debug.setBreakPoint(t, 2, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') > 0);
+Debug.clearBreakPoint(b1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') === -1);
+
+b2 = Debug.setBreakPoint(t, 3, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = 2;') > 0);
Debug.clearBreakPoint(b2);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = [B0]2;') === -1);
-b3 = Debug.setBreakPoint(initializer, 2, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B0]foo()") > 0);
+b3 = Debug.setBreakPoint(t, 4, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('baz = foo()') > 0);
Debug.clearBreakPoint(b3);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B0]foo()") === -1);
-b1 = Debug.setBreakPoint(initializer, 0, 0);
-b2 = Debug.setBreakPoint(initializer, 1, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B1]2;") > 0);
+b1 = Debug.setBreakPoint(t, 2, 0);
+b2 = Debug.setBreakPoint(t, 3, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') > 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B1]bar] = 2;') > 0);
Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') === -1);
Debug.clearBreakPoint(b2);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B1]2;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B1]bar] = 2;') === -1);
-b1 = Debug.setBreakPoint(initializer, 0, 0);
-b3 = Debug.setBreakPoint(initializer, 2, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") > 0);
+b1 = Debug.setBreakPoint(t, 2, 0);
+b3 = Debug.setBreakPoint(initializer, 4, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') > 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('baz = foo()') > 0);
Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[foo()] = [B0]1;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]foo()] = 1;') === -1);
Debug.clearBreakPoint(b3);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") === -1);
-b2 = Debug.setBreakPoint(initializer, 1, 0);
-b3 = Debug.setBreakPoint(initializer, 2, 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") > 0);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") > 0);
+b2 = Debug.setBreakPoint(t, 3, 0);
+b3 = Debug.setBreakPoint(t, 4, 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = 2;') > 0);
+assertTrue(Debug.showBreakPoints(t).indexOf('baz = foo()') > 0);
Debug.clearBreakPoint(b2);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[bar] = [B0]2;") === -1);
+assertTrue(Debug.showBreakPoints(t).indexOf('[[B0]bar] = 2;') === -1);
Debug.clearBreakPoint(b3);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("baz = [B1]foo()") === -1);
-
-function t() {
- class X {
- [foo()] = 1;
- }
-}
-
-b1 = Debug.setBreakPoint(t, 0, 0);
-assertTrue(Debug.showBreakPoints(t).indexOf("[[B0]foo()] = 1;")> 0);
-Debug.clearBreakPoint(b1);
-assertTrue(Debug.showBreakPoints(initializer).indexOf("[[B0]foo()] = 1;") === -1);
diff --git a/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js b/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
index fb988bcb57..28329fdcee 100644
--- a/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
@@ -34,7 +34,7 @@ function f() {
class Derived extends GetBase() {} // 0.
}
-var bp = Debug.setBreakPoint(f, 0);
+var bp = Debug.setBreakPoint(f, 1, 20);
f();
assertEquals(4, stepCount);
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index 5054dc5327..b862b3cad2 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -153,6 +153,12 @@
'debug/wasm/*': [SKIP],
}],
+##############################################################################
+# Tests requiring Sparkplug.
+['arch not in (x64, arm64, ia32, arm)', {
+ 'regress/regress-crbug-1199681': [SKIP],
+}],
+
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
diff --git a/deps/v8/test/debugger/regress/regress-crbug-1199681.js b/deps/v8/test/debugger/regress/regress-crbug-1199681.js
new file mode 100644
index 0000000000..211475250d
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-crbug-1199681.js
@@ -0,0 +1,52 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --sparkplug --allow-natives-syntax
+
+function f() {
+ debugger;
+ return 1;
+}
+
+function g() {
+ return f(); // Break
+}
+
+function h() {
+ return g();
+}
+
+// Ensure FeedbackVector to consider f for inlining.
+%EnsureFeedbackVectorForFunction(f);
+%CompileBaseline(g);
+
+%PrepareFunctionForOptimization(h);
+h();
+h();
+
+var Debug = debug.Debug;
+var step_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ if (step_count == 0) {
+ exec_state.prepareStep(Debug.StepAction.StepOut);
+ } else {
+ assertTrue(exec_state.frame().sourceLineText().includes('Break'));
+ }
+ step_count++;
+ } catch (e) {
+ exception = e;
+ print(e);
+ }
+}
+
+Debug.setListener(listener);
+%OptimizeFunctionOnNextCall(h);
+h();
+Debug.setListener(null);
+assertNull(exception);
+assertEquals(2, step_count);
diff --git a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
index fa0743500f..760e435bfa 100644
--- a/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
+++ b/deps/v8/test/debugging/wasm/gdb-server/test_files/test_memory.js
@@ -19,11 +19,11 @@ var func_a_idx =
builder.addFunction('wasm_B', kSig_v_i)
.addBody([
kExprLoop,
- kWasmStmt, // while
+ kWasmVoid, // while
kExprLocalGet,
0, // -
kExprIf,
- kWasmStmt, // if <param0> != 0
+ kWasmVoid, // if <param0> != 0
kExprLocalGet,
0, // -
kExprI32Const,
diff --git a/deps/v8/test/fuzzer/BUILD.gn b/deps/v8/test/fuzzer/BUILD.gn
index 7c837464c5..5bbe62d706 100644
--- a/deps/v8/test/fuzzer/BUILD.gn
+++ b/deps/v8/test/fuzzer/BUILD.gn
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("../../gni/v8.gni")
+
group("v8_fuzzer") {
testonly = true
@@ -18,10 +20,15 @@ group("v8_fuzzer") {
"./parser/",
"./regexp/",
"./regexp_builtins/",
- "./multi_return/",
- "./wasm/",
- "./wasm_async/",
- "./wasm_code/",
- "./wasm_compile/",
]
+
+ if (v8_enable_webassembly) {
+ data += [
+ "./multi_return/",
+ "./wasm/",
+ "./wasm_async/",
+ "./wasm_code/",
+ "./wasm_compile/",
+ ]
+ }
}
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 06294b9f7e..8b1523d4e0 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -61,12 +61,14 @@ std::unique_ptr<FuzzerSupport> FuzzerSupport::fuzzer_support_;
// static
void FuzzerSupport::InitializeFuzzerSupport(int* argc, char*** argv) {
+#if V8_ENABLE_WEBASSEMBLY
if (V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler) {
constexpr bool kUseDefaultTrapHandler = true;
if (!v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultTrapHandler)) {
FATAL("Could not register trap handler");
}
}
+#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_NULL(FuzzerSupport::fuzzer_support_);
FuzzerSupport::fuzzer_support_ =
std::make_unique<v8_fuzzer::FuzzerSupport>(argc, argv);
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
index f865018cc5..4a8bc4d286 100644
--- a/deps/v8/test/fuzzer/fuzzer.status
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -5,14 +5,14 @@
[
##############################################################################
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'multi_return/*': [SKIP],
'wasm/*': [SKIP],
'wasm_async/*': [SKIP],
'wasm_code/*': [SKIP],
'wasm_compile/*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
diff --git a/deps/v8/test/fuzzer/inspector-fuzzer.cc b/deps/v8/test/fuzzer/inspector-fuzzer.cc
index 7f09f92ae6..77e2402fa8 100644
--- a/deps/v8/test/fuzzer/inspector-fuzzer.cc
+++ b/deps/v8/test/fuzzer/inspector-fuzzer.cc
@@ -245,10 +245,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
v8::FunctionTemplate::New(
isolate, &InspectorExtension::SetMaxAsyncTaskStacks));
inspector->Set(
- ToV8String(isolate, "dumpAsyncTaskStacksStateForTest"),
- v8::FunctionTemplate::New(
- isolate, &InspectorExtension::DumpAsyncTaskStacksStateForTest));
- inspector->Set(
ToV8String(isolate, "breakProgram"),
v8::FunctionTemplate::New(isolate, &InspectorExtension::BreakProgram));
inspector->Set(
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 4e8949412a..b8af27a6b3 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -45,18 +45,14 @@ class AsyncFuzzerResolver : public i::wasm::CompilationResultResolver {
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- OneTimeEnableStagedWasmFeatures();
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
// Set some more flags.
FLAG_wasm_async_compilation = true;
FLAG_wasm_max_mem_pages = 32;
FLAG_wasm_max_table_size = 100;
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<v8::internal::Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
@@ -68,6 +64,12 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::HandleScope handle_scope(isolate);
i::HandleScope internal_scope(i_isolate);
v8::Context::Scope context_scope(support->GetContext());
+
+ // We explicitly enable staged WebAssembly features here to increase fuzzer
+ // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+ // the flag by itself.
+ OneTimeEnableStagedWasmFeatures(isolate);
+
TryCatch try_catch(isolate);
testing::SetupIsolateForWasmModule(i_isolate);
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 6804cfa5c9..663ef33a94 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -122,7 +122,7 @@ class WasmGenerator {
: gen_(gen), emit_end_(emit_end) {
gen->blocks_.emplace_back(br_types.begin(), br_types.end());
if (param_types.size() == 0 && result_types.size() == 0) {
- gen->builder_->EmitWithU8(block_type, kWasmStmt.value_type_code());
+ gen->builder_->EmitWithU8(block_type, kWasmVoid.value_type_code());
return;
}
if (param_types.size() == 0 && result_types.size() == 1) {
@@ -135,11 +135,11 @@ class WasmGenerator {
FunctionSig::Builder builder(zone, result_types.size(),
param_types.size());
for (auto& type : param_types) {
- DCHECK_NE(type, kWasmStmt);
+ DCHECK_NE(type, kWasmVoid);
builder.AddParam(type);
}
for (auto& type : result_types) {
- DCHECK_NE(type, kWasmStmt);
+ DCHECK_NE(type, kWasmVoid);
builder.AddReturn(type);
}
FunctionSig* sig = builder.Build();
@@ -199,10 +199,10 @@ class WasmGenerator {
template <ValueKind T, IfType type>
void if_(DataRange* data) {
- static_assert(T == kStmt || type == kIfElse,
+ static_assert(T == kVoid || type == kIfElse,
"if without else cannot produce a value");
if_({},
- T == kStmt ? Vector<ValueType>{} : VectorOf({ValueType::Primitive(T)}),
+ T == kVoid ? Vector<ValueType>{} : VectorOf({ValueType::Primitive(T)}),
type, data);
}
@@ -217,7 +217,7 @@ class WasmGenerator {
uint8_t delegate_target = data->get<uint8_t>() % (try_blocks_.size() + 1);
bool is_unwind = num_catch == 0 && !has_catch_all && !is_delegate;
- Vector<const ValueType> return_type_vec = return_type.kind() == kStmt
+ Vector<const ValueType> return_type_vec = return_type.kind() == kVoid
? Vector<ValueType>{}
: VectorOf(&return_type, 1);
BlockScope block_scope(this, kExprTry, {}, return_type_vec, return_type_vec,
@@ -293,7 +293,7 @@ class WasmGenerator {
kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void br_if(DataRange* data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
@@ -305,9 +305,9 @@ class WasmGenerator {
builder_->EmitWithI32V(
kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
ConsumeAndGenerate(break_types,
- wanted_type == kStmt
+ wanted_kind == kVoid
? Vector<ValueType>{}
- : VectorOf({ValueType::Primitive(wanted_type)}),
+ : VectorOf({ValueType::Primitive(wanted_kind)}),
data);
}
@@ -424,13 +424,13 @@ class WasmGenerator {
}
}
- template <WasmOpcode memory_op, ValueKind... arg_types>
+ template <WasmOpcode memory_op, ValueKind... arg_kinds>
void memop(DataRange* data) {
const uint8_t align = data->get<uint8_t>() % (max_alignment(memory_op) + 1);
const uint32_t offset = data->get<uint32_t>();
// Generate the index and the arguments, if any.
- Generate<kI32, arg_types...>(data);
+ Generate<kI32, arg_kinds...>(data);
if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(memory_op >> 8))) {
DCHECK(memory_op >> 8 == kAtomicPrefix || memory_op >> 8 == kSimdPrefix);
@@ -496,14 +496,14 @@ class WasmGenerator {
enum CallDirect : bool { kCallDirect = true, kCallIndirect = false };
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void call(DataRange* data) {
- call(data, ValueType::Primitive(wanted_type), kCallDirect);
+ call(data, ValueType::Primitive(wanted_kind), kCallDirect);
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void call_indirect(DataRange* data) {
- call(data, ValueType::Primitive(wanted_type), kCallIndirect);
+ call(data, ValueType::Primitive(wanted_kind), kCallIndirect);
}
void Convert(ValueType src, ValueType dst) {
@@ -536,16 +536,16 @@ class WasmGenerator {
void ConvertOrGenerate(ValueType src, ValueType dst, DataRange* data) {
if (src == dst) return;
- if (src == kWasmStmt && dst != kWasmStmt) {
+ if (src == kWasmVoid && dst != kWasmVoid) {
Generate(dst, data);
- } else if (dst == kWasmStmt && src != kWasmStmt) {
+ } else if (dst == kWasmVoid && src != kWasmVoid) {
builder_->Emit(kExprDrop);
} else {
Convert(src, dst);
}
}
- void call(DataRange* data, ValueType wanted_type, CallDirect call_direct) {
+ void call(DataRange* data, ValueType wanted_kind, CallDirect call_direct) {
uint8_t random_byte = data->get<uint8_t>();
int func_index = random_byte % functions_.size();
uint32_t sig_index = functions_[func_index];
@@ -579,12 +579,12 @@ class WasmGenerator {
builder_->EmitByte(0); // Table index.
}
}
- if (sig->return_count() == 0 && wanted_type != kWasmStmt) {
+ if (sig->return_count() == 0 && wanted_kind != kWasmVoid) {
// The call did not generate a value. Thus just generate it here.
- Generate(wanted_type, data);
+ Generate(wanted_kind, data);
return;
}
- if (wanted_type == kWasmStmt) {
+ if (wanted_kind == kWasmVoid) {
// The call did generate values, but we did not want one.
for (size_t i = 0; i < sig->return_count(); ++i) {
builder_->Emit(kExprDrop);
@@ -593,16 +593,16 @@ class WasmGenerator {
}
auto return_types = VectorOf(sig->returns().begin(), sig->return_count());
auto wanted_types =
- VectorOf(&wanted_type, wanted_type == kWasmStmt ? 0 : 1);
+ VectorOf(&wanted_kind, wanted_kind == kWasmVoid ? 0 : 1);
ConsumeAndGenerate(return_types, wanted_types, data);
}
struct Var {
uint32_t index;
- ValueType type = kWasmStmt;
+ ValueType type = kWasmVoid;
Var() = default;
Var(uint32_t index, ValueType type) : index(index), type(type) {}
- bool is_valid() const { return type != kWasmStmt; }
+ bool is_valid() const { return type != kWasmVoid; }
};
Var GetRandomLocal(DataRange* data) {
@@ -616,34 +616,34 @@ class WasmGenerator {
return {index, type};
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void local_op(DataRange* data, WasmOpcode opcode) {
Var local = GetRandomLocal(data);
// If there are no locals and no parameters, just generate any value (if a
// value is needed), or do nothing.
if (!local.is_valid()) {
- if (wanted_type == kStmt) return;
- return Generate<wanted_type>(data);
+ if (wanted_kind == kVoid) return;
+ return Generate<wanted_kind>(data);
}
if (opcode != kExprLocalGet) Generate(local.type, data);
builder_->EmitWithU32V(opcode, local.index);
- if (wanted_type != kStmt && local.type.kind() != wanted_type) {
- Convert(local.type, ValueType::Primitive(wanted_type));
+ if (wanted_kind != kVoid && local.type.kind() != wanted_kind) {
+ Convert(local.type, ValueType::Primitive(wanted_kind));
}
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void get_local(DataRange* data) {
- static_assert(wanted_type != kStmt, "illegal type");
- local_op<wanted_type>(data, kExprLocalGet);
+ static_assert(wanted_kind != kVoid, "illegal type");
+ local_op<wanted_kind>(data, kExprLocalGet);
}
- void set_local(DataRange* data) { local_op<kStmt>(data, kExprLocalSet); }
+ void set_local(DataRange* data) { local_op<kVoid>(data, kExprLocalSet); }
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void tee_local(DataRange* data) {
- local_op<wanted_type>(data, kExprLocalTee);
+ local_op<wanted_kind>(data, kExprLocalTee);
}
template <size_t num_bytes>
@@ -669,42 +669,42 @@ class WasmGenerator {
return {index, type};
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void global_op(DataRange* data) {
- constexpr bool is_set = wanted_type == kStmt;
+ constexpr bool is_set = wanted_kind == kVoid;
Var global = GetRandomGlobal(data, is_set);
// If there are no globals, just generate any value (if a value is needed),
// or do nothing.
if (!global.is_valid()) {
- if (wanted_type == kStmt) return;
- return Generate<wanted_type>(data);
+ if (wanted_kind == kVoid) return;
+ return Generate<wanted_kind>(data);
}
if (is_set) Generate(global.type, data);
builder_->EmitWithU32V(is_set ? kExprGlobalSet : kExprGlobalGet,
global.index);
- if (!is_set && global.type.kind() != wanted_type) {
- Convert(global.type, ValueType::Primitive(wanted_type));
+ if (!is_set && global.type.kind() != wanted_kind) {
+ Convert(global.type, ValueType::Primitive(wanted_kind));
}
}
- template <ValueKind wanted_type>
+ template <ValueKind wanted_kind>
void get_global(DataRange* data) {
- static_assert(wanted_type != kStmt, "illegal type");
- global_op<wanted_type>(data);
+ static_assert(wanted_kind != kVoid, "illegal type");
+ global_op<wanted_kind>(data);
}
- template <ValueKind select_type>
+ template <ValueKind select_kind>
void select_with_type(DataRange* data) {
- static_assert(select_type != kStmt, "illegal type for select");
- Generate<select_type, select_type, kI32>(data);
+ static_assert(select_kind != kVoid, "illegal kind for select");
+ Generate<select_kind, select_kind, kI32>(data);
// num_types is always 1.
uint8_t num_types = 1;
builder_->EmitWithU8U8(kExprSelectWithType, num_types,
- ValueType::Primitive(select_type).value_type_code());
+ ValueType::Primitive(select_kind).value_type_code());
}
- void set_global(DataRange* data) { global_op<kStmt>(data); }
+ void set_global(DataRange* data) { global_op<kVoid>(data); }
void throw_or_rethrow(DataRange* data) {
bool rethrow = data->get<uint8_t>() % 2;
@@ -822,31 +822,31 @@ class WasmGenerator {
};
template <>
-void WasmGenerator::block<kStmt>(DataRange* data) {
+void WasmGenerator::block<kVoid>(DataRange* data) {
block({}, {}, data);
}
template <>
-void WasmGenerator::loop<kStmt>(DataRange* data) {
+void WasmGenerator::loop<kVoid>(DataRange* data) {
loop({}, {}, data);
}
template <>
-void WasmGenerator::Generate<kStmt>(DataRange* data) {
+void WasmGenerator::Generate<kVoid>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
if (recursion_limit_reached() || data->size() == 0) return;
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kStmt, kStmt>,
- &WasmGenerator::sequence<kStmt, kStmt, kStmt, kStmt>,
- &WasmGenerator::sequence<kStmt, kStmt, kStmt, kStmt, kStmt, kStmt, kStmt,
- kStmt>,
- &WasmGenerator::block<kStmt>,
- &WasmGenerator::loop<kStmt>,
- &WasmGenerator::if_<kStmt, kIf>,
- &WasmGenerator::if_<kStmt, kIfElse>,
+ &WasmGenerator::sequence<kVoid, kVoid>,
+ &WasmGenerator::sequence<kVoid, kVoid, kVoid, kVoid>,
+ &WasmGenerator::sequence<kVoid, kVoid, kVoid, kVoid, kVoid, kVoid, kVoid,
+ kVoid>,
+ &WasmGenerator::block<kVoid>,
+ &WasmGenerator::loop<kVoid>,
+ &WasmGenerator::if_<kVoid, kIf>,
+ &WasmGenerator::if_<kVoid, kIfElse>,
&WasmGenerator::br,
- &WasmGenerator::br_if<kStmt>,
+ &WasmGenerator::br_if<kVoid>,
&WasmGenerator::memop<kExprI32StoreMem, kI32>,
&WasmGenerator::memop<kExprI32StoreMem8, kI32>,
@@ -872,13 +872,13 @@ void WasmGenerator::Generate<kStmt>(DataRange* data) {
&WasmGenerator::drop,
- &WasmGenerator::call<kStmt>,
- &WasmGenerator::call_indirect<kStmt>,
+ &WasmGenerator::call<kVoid>,
+ &WasmGenerator::call_indirect<kVoid>,
&WasmGenerator::set_local,
&WasmGenerator::set_global,
&WasmGenerator::throw_or_rethrow,
- &WasmGenerator::try_block<kStmt>};
+ &WasmGenerator::try_block<kVoid>};
GenerateOneOf(alternatives, data);
}
@@ -897,9 +897,9 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
&WasmGenerator::i32_const<3>,
&WasmGenerator::i32_const<4>,
- &WasmGenerator::sequence<kI32, kStmt>,
- &WasmGenerator::sequence<kStmt, kI32>,
- &WasmGenerator::sequence<kStmt, kI32, kStmt>,
+ &WasmGenerator::sequence<kI32, kVoid>,
+ &WasmGenerator::sequence<kVoid, kI32>,
+ &WasmGenerator::sequence<kVoid, kI32, kVoid>,
&WasmGenerator::op<kExprI32Eqz, kI32>,
&WasmGenerator::op<kExprI32Eq, kI32, kI32>,
@@ -1001,13 +1001,13 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
kI32>,
&WasmGenerator::op_with_prefix<kExprV128AnyTrue, kS128>,
- &WasmGenerator::op_with_prefix<kExprV8x16AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI8x16AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16BitMask, kS128>,
- &WasmGenerator::op_with_prefix<kExprV16x8AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI16x8AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI16x8BitMask, kS128>,
- &WasmGenerator::op_with_prefix<kExprV32x4AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI32x4BitMask, kS128>,
- &WasmGenerator::op_with_prefix<kExprV64x2AllTrue, kS128>,
+ &WasmGenerator::op_with_prefix<kExprI64x2AllTrue, kS128>,
&WasmGenerator::op_with_prefix<kExprI64x2BitMask, kS128>,
&WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneS, 16, kS128>,
&WasmGenerator::simd_lane_op<kExprI8x16ExtractLaneU, 16, kS128>,
@@ -1049,9 +1049,9 @@ void WasmGenerator::Generate<kI64>(DataRange* data) {
&WasmGenerator::i64_const<7>,
&WasmGenerator::i64_const<8>,
- &WasmGenerator::sequence<kI64, kStmt>,
- &WasmGenerator::sequence<kStmt, kI64>,
- &WasmGenerator::sequence<kStmt, kI64, kStmt>,
+ &WasmGenerator::sequence<kI64, kVoid>,
+ &WasmGenerator::sequence<kVoid, kI64>,
+ &WasmGenerator::sequence<kVoid, kI64, kVoid>,
&WasmGenerator::op<kExprI64Add, kI64, kI64>,
&WasmGenerator::op<kExprI64Sub, kI64, kI64>,
@@ -1154,9 +1154,9 @@ void WasmGenerator::Generate<kF32>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kF32, kStmt>,
- &WasmGenerator::sequence<kStmt, kF32>,
- &WasmGenerator::sequence<kStmt, kF32, kStmt>,
+ &WasmGenerator::sequence<kF32, kVoid>,
+ &WasmGenerator::sequence<kVoid, kF32>,
+ &WasmGenerator::sequence<kVoid, kF32, kVoid>,
&WasmGenerator::op<kExprF32Abs, kF32>,
&WasmGenerator::op<kExprF32Neg, kF32>,
@@ -1211,9 +1211,9 @@ void WasmGenerator::Generate<kF64>(DataRange* data) {
}
constexpr GenerateFn alternatives[] = {
- &WasmGenerator::sequence<kF64, kStmt>,
- &WasmGenerator::sequence<kStmt, kF64>,
- &WasmGenerator::sequence<kStmt, kF64, kStmt>,
+ &WasmGenerator::sequence<kF64, kVoid>,
+ &WasmGenerator::sequence<kVoid, kF64>,
+ &WasmGenerator::sequence<kVoid, kF64, kVoid>,
&WasmGenerator::op<kExprF64Abs, kF64>,
&WasmGenerator::op<kExprF64Neg, kF64>,
@@ -1304,7 +1304,6 @@ void WasmGenerator::Generate<kS128>(DataRange* data) {
&WasmGenerator::op_with_prefix<kExprI8x16SubSatU, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MinS, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MinU, kS128, kS128>,
- // I8x16Mul is prototyped but not in the proposal, thus omitted here.
&WasmGenerator::op_with_prefix<kExprI8x16MaxS, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MaxU, kS128, kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16RoundingAverageU, kS128, kS128>,
@@ -1513,8 +1512,8 @@ void WasmGenerator::grow_memory(DataRange* data) {
void WasmGenerator::Generate(ValueType type, DataRange* data) {
switch (type.kind()) {
- case kStmt:
- return Generate<kStmt>(data);
+ case kVoid:
+ return Generate<kVoid>(data);
case kI32:
return Generate<kI32>(data);
case kI64:
@@ -1555,7 +1554,7 @@ void WasmGenerator::Generate(Vector<const ValueType> types, DataRange* data) {
}
if (types.size() == 0) {
- Generate(kWasmStmt, data);
+ Generate(kWasmVoid, data);
return;
}
if (types.size() == 1) {
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 597789c7e1..76fde895ef 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -307,33 +307,30 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
-void OneTimeEnableStagedWasmFeatures() {
+void OneTimeEnableStagedWasmFeatures(v8::Isolate* isolate) {
struct EnableStagedWasmFeatures {
- EnableStagedWasmFeatures() {
+ explicit EnableStagedWasmFeatures(v8::Isolate* isolate) {
#define ENABLE_STAGED_FEATURES(feat, desc, val) \
FLAG_experimental_wasm_##feat = true;
FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
#undef ENABLE_STAGED_FEATURES
+ isolate->InstallConditionalFeatures(isolate->GetCurrentContext());
}
};
// The compiler will properly synchronize the constructor call.
- static EnableStagedWasmFeatures one_time_enable_staged_features;
+ static EnableStagedWasmFeatures one_time_enable_staged_features(isolate);
}
void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
bool require_valid) {
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- OneTimeEnableStagedWasmFeatures();
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
// Strictly enforce the input size limit. Note that setting "max_len" on the
// fuzzer target is not enough, since different fuzzers are used and not all
// respect that limit.
if (data.size() > max_input_size()) return;
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
@@ -342,6 +339,12 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
+
+ // We explicitly enable staged WebAssembly features here to increase fuzzer
+ // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+ // the flag by itself.
+ OneTimeEnableStagedWasmFeatures(isolate);
+
v8::TryCatch try_catch(isolate);
HandleScope scope(i_isolate);
@@ -356,6 +359,8 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
// compiled with Turbofan and which one with Liftoff.
uint8_t tier_mask = data.empty() ? 0 : data[0];
if (!data.empty()) data += 1;
+ uint8_t debug_mask = data.empty() ? 0 : data[0];
+ if (!data.empty()) data += 1;
if (!GenerateModule(i_isolate, &zone, data, &buffer, &num_args,
&interpreter_args, &compiler_args)) {
return;
@@ -374,6 +379,8 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
FlagScope<bool> liftoff(&FLAG_liftoff, true);
FlagScope<bool> no_tier_up(&FLAG_wasm_tier_up, false);
FlagScope<int> tier_mask_scope(&FLAG_wasm_tier_mask_for_testing, tier_mask);
+ FlagScope<int> debug_mask_scope(&FLAG_wasm_debug_mask_for_testing,
+ debug_mask);
compiled_module = i_isolate->wasm_engine()->SyncCompile(
i_isolate, enabled_features, &interpreter_thrower, wire_bytes);
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index d74a26ffab..04350e3d80 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -33,7 +33,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
// no-ops. This avoids race conditions with threads reading the flags. Fuzzers
// are executed in their own process anyway, so this should not interfere with
// anything.
-void OneTimeEnableStagedWasmFeatures();
+void OneTimeEnableStagedWasmFeatures(v8::Isolate* isolate);
class WasmExecutionFuzzer {
public:
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index fe3cdfcbea..48d9108902 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -21,18 +21,14 @@
namespace i = v8::internal;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // We explicitly enable staged WebAssembly features here to increase fuzzer
- // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
- // the flag by itself.
- i::wasm::fuzzer::OneTimeEnableStagedWasmFeatures();
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
// We reduce the maximum memory size and table size of WebAssembly instances
// to avoid OOMs in the fuzzer.
i::FLAG_wasm_max_mem_pages = 32;
i::FLAG_wasm_max_table_size = 100;
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Clear any pending exceptions from a prior run.
@@ -43,6 +39,12 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(support->GetContext());
+
+ // We explicitly enable staged WebAssembly features here to increase fuzzer
+ // coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
+ // the flag by itself.
+ i::wasm::fuzzer::OneTimeEnableStagedWasmFeatures(isolate);
+
v8::TryCatch try_catch(isolate);
i::wasm::testing::SetupIsolateForWasmModule(i_isolate);
i::wasm::ModuleWireBytes wire_bytes(data, data + size);
diff --git a/deps/v8/test/fuzzer/wasm/regress-1191853.wasm b/deps/v8/test/fuzzer/wasm/regress-1191853.wasm
new file mode 100644
index 0000000000..8e8237eb2a
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm/regress-1191853.wasm
Binary files differ
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index cf039da0be..14c1704daa 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -23,7 +23,6 @@ v8_source_set("inspector_test") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
"../../src/inspector:inspector_test_headers",
]
}
diff --git a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
index 17008f3550..ac194fdd10 100644
--- a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
@@ -6,7 +6,7 @@ function testFunction() {
async function f1() {
for (let x = |_|0; x |_|< 1; ++|_|x) |_|await x;
|_|return |_|await Promise.|C|resolve(2);|R|
- |R|}
+ }
async function f2() {
let r = |_|await |C|f1() + |_|await |C|f1();
@@ -17,7 +17,7 @@ function testFunction() {
let p = |_|Promise.|C|resolve(42);
|_|await p;
|_|return r;|R|
- |R|}
+ }
return |C|f2();|R|
}
diff --git a/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt b/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
index 24bda366a9..4af1b05d66 100644
--- a/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-var-init-expected.txt
@@ -21,7 +21,7 @@ function testFunction() {
|_|(async function asyncF() {
let r = |_|await Promise.|C|resolve(42);
|_|return r;|R|
- |R|})|C|();
+ })|C|();
|_|return promise;|R|
}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
new file mode 100644
index 0000000000..6968ed3eab
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc-expected.txt
@@ -0,0 +1,7 @@
+Checks if we keep alive breakpoint information for top-level functions when calling getPossibleBreakpoints.
+Result of get possible breakpoints in topLevel.js
+[{"scriptId":"3","lineNumber":0,"columnNumber":0},{"scriptId":"3","lineNumber":0,"columnNumber":8,"type":"call"},{"scriptId":"3","lineNumber":0,"columnNumber":43,"type":"return"}]
+Result of get possible breakpoints in moduleFunc.js
+[{"scriptId":"5","lineNumber":0,"columnNumber":22},{"scriptId":"5","lineNumber":0,"columnNumber":30,"type":"call"},{"scriptId":"5","lineNumber":0,"columnNumber":63,"type":"return"},{"scriptId":"5","lineNumber":0,"columnNumber":64,"type":"return"}]
+Result of get possible breakpoints in mixedFunctions.js
+[{"scriptId":"7","lineNumber":0,"columnNumber":15,"type":"return"},{"scriptId":"7","lineNumber":1,"columnNumber":2},{"scriptId":"7","lineNumber":1,"columnNumber":10,"type":"call"},{"scriptId":"7","lineNumber":2,"columnNumber":0,"type":"return"}]
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
new file mode 100644
index 0000000000..097d0b99af
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-after-gc.js
@@ -0,0 +1,60 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we keep alive breakpoint information for top-level functions when calling getPossibleBreakpoints.');
+
+session.setupScriptMap();
+var executionContextId;
+
+const callGarbageCollector = `
+ %CollectGarbage("");
+ %CollectGarbage("");
+ %CollectGarbage("");
+ %CollectGarbage("");
+`;
+
+const topLevelFunction = `console.log('This is a top level function')`;
+const moduleFunction =
+ `function testFunc() { console.log('This is a module function') }`;
+let mixedFunctions = ` function A() {}
+ console.log('This is a top level function');
+`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ executionContextId = messageObject.params.context.id;
+ await testGetPossibleBreakpoints(
+ executionContextId, topLevelFunction, 'topLevel.js');
+ await testGetPossibleBreakpoints(
+ executionContextId, moduleFunction, 'moduleFunc.js');
+ await testGetPossibleBreakpoints(
+ executionContextId, mixedFunctions, 'mixedFunctions.js');
+ InspectorTest.completeTest();
+}
+
+async function testGetPossibleBreakpoints(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+ const location = {start: {lineNumber: 0, columnNumber: 0, scriptId}};
+ await Protocol.Runtime.runScript({scriptId});
+ await Protocol.Runtime.evaluate({expression: `${callGarbageCollector}`});
+ const {result: {locations}} =
+ await Protocol.Debugger.getPossibleBreakpoints(location);
+ InspectorTest.log(`Result of get possible breakpoints in ${url}`);
+ InspectorTest.log(JSON.stringify(locations));
+}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
index 27346bffea..f7f9b7ca25 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
@@ -236,7 +236,7 @@ async function testPromiseAsyncWithCode() {
|R|}
|C|main();
|_|return testPromise;|R|
-|R|}
+}
function returnFunction() {
|_|return returnObject;|R|
@@ -249,7 +249,7 @@ async function testPromiseComplex() {
async function foo() {
|_|await Promise.|C|resolve();
|_|return 42;|R|
- |R|}
+ }
var x = |_|1;
var y = |_|2;
|C|returnFunction(|C|emptyFunction(), x++, --y, x => 2 |_|* x|R|, |C|returnCall())|C|().a = |_|await |C|foo((a => 2 |_|*a|R|)|C|(5));
@@ -257,7 +257,7 @@ async function testPromiseComplex() {
|R|}
|C|main();
|_|return testPromise;|R|
-|R|}
+}
function twiceDefined() {
|_|return a + b;|R|
diff --git a/deps/v8/test/inspector/debugger/regress-1190290-expected.txt b/deps/v8/test/inspector/debugger/regress-1190290-expected.txt
new file mode 100644
index 0000000000..5d3b31479f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regress-1190290-expected.txt
@@ -0,0 +1,10 @@
+Checks if we correctly handle exceptions thrown on setBreakpointByUrl if script is invalid.
+[
+]
+[
+ [0] : {
+ columnNumber : 22
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+]
diff --git a/deps/v8/test/inspector/debugger/regress-1190290.js b/deps/v8/test/inspector/debugger/regress-1190290.js
new file mode 100644
index 0000000000..eab16d3f1d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regress-1190290.js
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we correctly handle exceptions thrown on setBreakpointByUrl if script is invalid.');
+
+session.setupScriptMap();
+var executionContextId;
+
+const invalidFunction = `console.lo g('This is a top level function')`;
+const moduleFunction =
+ `function testFunc() { console.log('This is a module function') }`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ executionContextId = messageObject.params.context.id;
+ await testSetBreakpoint(
+ executionContextId, invalidFunction, 'invalidFunc.js');
+ await testSetBreakpoint(executionContextId, moduleFunction, 'moduleFunc.js');
+ InspectorTest.completeTest();
+}
+
+async function testSetBreakpoint(executionContextId, func, url) {
+ await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber: 0, url});
+ InspectorTest.logMessage(locations);
+}
diff --git a/deps/v8/test/inspector/debugger/regression-1185540-expected.txt b/deps/v8/test/inspector/debugger/regression-1185540-expected.txt
new file mode 100644
index 0000000000..a495f05ec5
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regression-1185540-expected.txt
@@ -0,0 +1,2 @@
+Check that setting a breakpoint in an invalid function is not crashing.
+[]
diff --git a/deps/v8/test/inspector/debugger/regression-1185540.js b/deps/v8/test/inspector/debugger/regression-1185540.js
new file mode 100644
index 0000000000..ce1f2a85af
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/regression-1185540.js
@@ -0,0 +1,34 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Check that setting a breakpoint in an invalid function is not crashing.');
+
+const invalidFunc = `console.l og('hi');//# sourceURL=invalid.js`;
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ const executionContextId = messageObject.params.context.id;
+ await testSetBreakpoint(executionContextId, invalidFunc, 'invalid.js');
+}
+
+async function testSetBreakpoint(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber: 0, url});
+ InspectorTest.log(JSON.stringify(locations));
+ InspectorTest.completeTest();
+};
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
index 02bfe0d80c..a85aab6fe0 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
@@ -1,7 +1,13 @@
Tests that setting breakpoint before enabling debugger produces an error
-setBreakpointByUrl error: undefined
+setBreakpointByUrl error: {
+ "code": -32000,
+ "message": "Debugger agent is not enabled"
+}
setBreakpoint error: {
- "code": -32602,
- "message": "Invalid parameters",
- "data": "Failed to deserialize params.location - BINDINGS: mandatory field missing at <some position>"
+ "code": -32000,
+ "message": "Debugger agent is not enabled"
+}
+setBreakpointOnFunctionCall error: {
+ "code": -32000,
+ "message": "Debugger agent is not enabled"
}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
index 5af1085c87..4401466a92 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
@@ -10,12 +10,19 @@ function didSetBreakpointByUrlBeforeEnable(message)
{
InspectorTest.log("setBreakpointByUrl error: " + JSON.stringify(
InspectorTest.trimErrorMessage(message).error, null, 2));
- Protocol.Debugger.setBreakpoint().then(didSetBreakpointBeforeEnable);
+ Protocol.Debugger.setBreakpoint({location: { scriptId: "4", lineNumber: 0, columnNumber: 0 }}).then(didSetBreakpointBeforeEnable);
}
function didSetBreakpointBeforeEnable(message)
{
InspectorTest.log("setBreakpoint error: " + JSON.stringify(
InspectorTest.trimErrorMessage(message).error, null, 2));
+ Protocol.Debugger.setBreakpointOnFunctionCall({objectId: "4"}).then(didSetBreakpointOnFunctionCallBeforeEnable);
+}
+
+function didSetBreakpointOnFunctionCallBeforeEnable(message)
+{
+ InspectorTest.log("setBreakpointOnFunctionCall error: " + JSON.stringify(
+ InspectorTest.trimErrorMessage(message).error, null, 2));
InspectorTest.completeTest();
}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt
index 5f7fa80a97..17cc8e4a2b 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-breaks-on-first-breakable-location-expected.txt
@@ -2,7 +2,7 @@ Tests if breakpoint set is first breakable location
Set breakpoint outside of any function: (0, 0).
Setting breakpoint for id: 3 at 0, 0.
No breakable location inside a function was found
-Set breakpoint adds a breakpoint at (8, 1).
+Set breakpoint adds a breakpoint at (4, 2).
Set breakpoint at a breakable location: (4, 17).
Setting breakpoint for id: 3 at 4, 17.
Location match for (4, 17).
@@ -10,4 +10,4 @@ Initial location is expected to be breakable: true.
Set breakpoint at non-breakable location: (7, 0).
Setting breakpoint for id: 3 at 7, 0.
Location match for (7, 2).
-Initial location is expected to be breakable: false. \ No newline at end of file
+Initial location is expected to be breakable: false.
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt
index 9e8bc7dcb0..609cb02ed2 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-expected.txt
@@ -39,10 +39,6 @@ hitBreakpoints contains breakpoint: true
Set breakpoint at empty line by url in top level function..
Breakpoint resolved at:
-// last line#
-
-Breakpoint hit at:
-// last line#
-
-hitBreakpoints contains breakpoint: true
+function i2(){#}
+// last line
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt
new file mode 100644
index 0000000000..ea363069cb
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer-expected.txt
@@ -0,0 +1,66 @@
+Checks if we can set a breakpoint on a one-line inline functions.
+Setting breakpoint on `class X`
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ |_|this.x = 1;
+ }
+ [bar] = 2;
+ baz = foo();
+}
+new X();
+
+Setting breakpoint on constructor, should resolve to same location
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ |_|this.x = 1;
+ }
+ [bar] = 2;
+ baz = foo();
+}
+new X();
+
+Setting breakpoint on computed properties in class
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ this.x = 1;
+ }
+ [|_|bar] = 2;
+ baz = foo();
+}
+new X();
+
+Setting breakpoint on initializer function
+
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ this.x = 1;
+ }
+ [bar] = 2;
+ baz = |_|foo();
+}
+new X();
+
+Paused on location:
+(anonymous) (testInitializer.js:8:3)
+Paused on location:
+<instance_members_initializer> (testInitializer.js:9:8)
+X (testInitializer.js:5:13)
+(anonymous) (testInitializer.js:11:0)
+Paused on location:
+X (testInitializer.js:6:4)
+(anonymous) (testInitializer.js:11:0)
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js
new file mode 100644
index 0000000000..6ed984962a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-in-class-initializer.js
@@ -0,0 +1,75 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we can set a breakpoint on a one-line inline functions.');
+
+session.setupScriptMap();
+
+const testClassInitializer = `
+function foo() {}
+var bar = "bar";
+
+class X {
+ constructor() {
+ this.x = 1;
+ }
+ [bar] = 2;
+ baz = foo();
+}
+new X();
+//# sourceURL=testInitializer.js`
+
+Protocol.Debugger.enable().then(onDebuggerEnabled);
+
+function onDebuggerEnabled() {
+ Protocol.Runtime.enable();
+ Protocol.Runtime.onExecutionContextCreated(onExecutionContextCreated);
+}
+
+async function onExecutionContextCreated(messageObject) {
+ const executionContextId = messageObject.params.context.id;
+ await runTest(executionContextId, testClassInitializer, 'testInitializer.js');
+ InspectorTest.completeTest();
+}
+
+async function runTest(executionContextId, func, url) {
+ const obj = await Protocol.Runtime.compileScript({
+ expression: func,
+ sourceURL: url,
+ persistScript: true,
+ executionContextId: executionContextId
+ });
+ const scriptId = obj.result.scriptId;
+
+ InspectorTest.log('Setting breakpoint on `class X`');
+ await setBreakpoint(4, 'testInitializer.js');
+
+ InspectorTest.log(
+ 'Setting breakpoint on constructor, should resolve to same location');
+ await setBreakpoint(5, 'testInitializer.js');
+
+ InspectorTest.log('Setting breakpoint on computed properties in class');
+ await setBreakpoint(8, 'testInitializer.js');
+
+ InspectorTest.log('Setting breakpoint on initializer function');
+ await setBreakpoint(9, 'testInitializer.js');
+
+ Protocol.Runtime.runScript({scriptId});
+ const numBreaks = 3;
+ for (var i = 0; i < numBreaks; ++i) {
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused on location:');
+ session.logCallFrames(callFrames);
+ Protocol.Debugger.resume();
+ }
+
+ InspectorTest.completeTest();
+};
+
+async function setBreakpoint(lineNumber, url) {
+ const {result: {locations}} =
+ await Protocol.Debugger.setBreakpointByUrl({lineNumber, url});
+ await session.logBreakLocations(locations);
+}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt
new file mode 100644
index 0000000000..9c69c6d457
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function-expected.txt
@@ -0,0 +1,11 @@
+Checks if we can set a breakpoint on a one-line inline functions.
+Setting breakpoint
+ function test() {
+ function func(a) {|_|console.log(a);}
+ func("hi");
+ }
+
+Paused on location:
+func (testFunction.js:1:22)
+test (testFunction.js:2:4)
+(anonymous) (:0:0)
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js
new file mode 100644
index 0000000000..630e20150e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-inline-function.js
@@ -0,0 +1,31 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks if we can set a breakpoint on a one-line inline functions.');
+
+session.setupScriptMap();
+const testFunction = ` function test() {
+ function func(a) {console.log(a);}
+ func("hi");
+ }
+ //# sourceURL=testFunction.js`;
+
+contextGroup.addScript(testFunction);
+
+(async function testSetBreakpoint() {
+ await Protocol.Debugger.enable();
+ await Protocol.Runtime.enable();
+
+ InspectorTest.log('Setting breakpoint');
+ const {result: {locations}} = await Protocol.Debugger.setBreakpointByUrl(
+ {lineNumber: 1, url: 'testFunction.js'});
+ await session.logBreakLocations(locations);
+
+ Protocol.Runtime.evaluate({expression: 'test()'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ InspectorTest.log('Paused on location:');
+ session.logCallFrames(callFrames);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint.js b/deps/v8/test/inspector/debugger/set-breakpoint.js
index 2c641c3062..194e4d2c4b 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/set-breakpoint.js
@@ -92,22 +92,10 @@ eval('function sourceUrlFunc() { a = 2; }\\n//# sourceURL=sourceUrlScript');`);
Protocol.Runtime.evaluate({
expression: `//# sourceURL=test-script\nfunction i1(){};\n\n\n\n\nfunction i2(){}\n// last line`
});
- const [{
- params:{location}
- }, {
- params:{
- callFrames:[topFrame],
- hitBreakpoints
- }
- }] = await Promise.all([
- Protocol.Debugger.onceBreakpointResolved(),
- Protocol.Debugger.oncePaused()]);
+ const [{params: {location}}] =
+ await Promise.all([Protocol.Debugger.onceBreakpointResolved()]);
InspectorTest.log('Breakpoint resolved at:');
await session.logSourceLocation(location);
- InspectorTest.log('Breakpoint hit at:');
- await session.logSourceLocation(topFrame.location);
- const hitBreakpoint = hitBreakpoints[0] === breakpointId;
- InspectorTest.log(`hitBreakpoints contains breakpoint: ${hitBreakpoint}\n`);
}
await Protocol.Debugger.disable();
InspectorTest.completeTest();
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
new file mode 100644
index 0000000000..037057b13b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints-expected.txt
@@ -0,0 +1,31 @@
+Tests GC object inspection.
+
+Running test: test
+Instantiating.
+Waiting for wasm script (ignoring first non-wasm script).
+Setting breakpoint at offset 107 on script wasm://wasm/22e4830a
+Calling main()
+Paused:
+Script wasm://wasm/22e4830a byte offset 107: Wasm opcode 0x21 (kExprLocalSet)
+Scope:
+at main (0:107):
+ - scope (wasm-expression-stack):
+ 0: Array ((ref $ArrC))
+ object details:
+ 0: Struct ((ref null $StrA))
+ length: 1 (number)
+ - scope (local):
+ $varA: Struct ((ref null $StrA))
+ $varB: null ((ref null $ArrC))
+ object details:
+ $byte: 127 (i8)
+ $word: 32767 (i16)
+ $pointer: Struct ((ref $StrB))
+ - scope (module):
+ instance: exports: "main" (Function)
+ module: Module
+ functions: "$main": (Function)
+ globals: "$global0": function 0() { [native code] } ((ref null $type3))
+at (anonymous) (0:17):
+ -- skipped
+exports.main returned!
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
new file mode 100644
index 0000000000..2c4c774411
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-breakpoints.js
@@ -0,0 +1,214 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests GC object inspection.');
+session.setupScriptMap();
+
+const module_bytes = [
+ 0x00, 0x61, 0x73, 0x6d, 1, 0, 0, 0, // wasm magic
+
+ 0x01, // type section
+ 0x16, // section length
+ 0x04, // number of types
+ // type 0: struct $StrA (field ($byte i8) ($word i16) ($pointer (ref $StrB)))
+ 0x5f, // struct
+ 0x03, // field count
+ 0x7a, 0x01, // mut i8
+ 0x79, 0x00, // i16
+ 0x6b, 0x01, 0x01, // mut ref $StrB
+ // type 1: struct $StrB (field ($next (ref null $StrA)))
+ 0x5f, // struct
+ 0x01, // field count
+ 0x6c, 0x00, 0x01, // mut ref null $StrA
+ // type 2: array $ArrC (mut (ref null $StrA))
+ 0x5e, // array
+ 0x6c, 0x00, 0x01, // mut ref null $StrA
+ // type 3: func
+ 0x60, // signature
+ 0x00, // number of params
+ 0x00, // number of results
+
+ 0x03, // function section
+ 0x02, // section length
+ 0x01, // number of functions
+ 0x03, // function 0: signature 3
+
+ // This is just so that function index 0 counts as declared.
+ 0x06, // global section
+ 0x07, // section length
+ 0x01, // number of globals
+ 0x6c, 0x03, // type of global: ref null $sig3
+ 0x00, // immutable
+ 0xd2, 0x00, 0x0b, // initializer: ref.func $func1; end
+
+ 0x07, // export section
+ 0x08, // section length
+ 0x01, // number of exports
+ 0x04, // length of "main"
+ 0x6d, 0x61, 0x69, 0x6e, // "main"
+ 0x00, // kind: function
+ 0x00, // index: 0
+
+ /////////////////////////// CODE SECTION //////////////////////////
+ 0x0a, // code section
+ 0x35, // section length
+ 0x01, // number of functions
+
+ 0x33, // function 0: size
+ 0x02, // number of locals
+ 0x01, 0x6c, 0x00, // (local $varA (ref null $StrA))
+ 0x01, 0x6c, 0x02, // (local $varC (ref null $ArrC))
+ // $varA := new $StrA(127, 32767, new $StrB(null))
+ 0x41, 0xFF, 0x00, // i32.const 127
+ 0x41, 0xFF, 0xFF, 0x01, // i32.const 32767
+ 0xfb, 0x30, 0x01, // rtt.canon $StrB
+ 0xfb, 0x02, 0x01, // struct.new_default_with_rtt $StrB
+ 0xfb, 0x30, 0x00, // rtt.canon $StrA
+ 0xfb, 0x01, 0x00, // struct.new_with_rtt $StrA
+ 0x22, 0x00, // local.tee $varA
+ // $varA.$pointer.$next = $varA
+ 0xfb, 0x03, 0x00, 0x02, // struct.get $StrA $pointer
+ 0x20, 0x00, // local.get $varA
+ 0xfb, 0x06, 0x01, 0x00, // struct.set $StrB $next
+ // $varC := new $ArrC($varA)
+ 0x20, 0x00, // local.get $varA -- value
+ 0x41, 0x01, // i32.const 1 -- length
+ 0xfb, 0x30, 0x02, // rtt.canon $ArrC
+ 0xfb, 0x11, 0x02, // array.new_with_rtt $ArrC
+ 0x21, 0x01, // local.set $varC
+ 0x0b, // end
+
+ /////////////////////////// NAME SECTION //////////////////////////
+ 0x00, // name section
+ 0x4d, // section length
+ 0x04, // length of "name"
+ 0x6e, 0x61, 0x6d, 0x65, // "name"
+
+ 0x02, // "local names" subsection
+ 0x0f, // length of subsection
+ 0x01, // number of entries
+ 0x00, // for function 0
+ 0x02, // number of entries for function 0
+ 0x00, // local index
+ 0x04, // length of "varA"
+ 0x76, 0x61, 0x72, 0x41, // "varA"
+ 0x01, // local index
+ 0x04, // length of "varB"
+ 0x76, 0x61, 0x72, 0x42, // "varB"
+
+ 0x04, // "type names" subsection
+ 0x13, // length of subsection
+ 0x03, // number of entries
+ 0x00, // type index
+ 0x04, // name length
+ 0x53, 0x74, 0x72, 0x41, // "StrA"
+ 0x01, // type index
+ 0x04, // name length
+ 0x53, 0x74, 0x72, 0x42, // "StrB"
+ 0x02, // type index
+ 0x04, // name length
+ 0x41, 0x72, 0x72, 0x43, // "ArrC"
+
+ 0x0a, // "field names" subsection
+ 0x20, // length of subsection
+ 0x02, // number of types
+ 0x00, // for type $StrA
+ 0x03, // number of entries for $StrA
+ 0x00, // field index 0
+ 0x04, // length of "byte"
+ 0x62, 0x79, 0x74, 0x65, // "byte"
+ 0x01, // field index 1
+ 0x04, // length of "word"
+ 0x77, 0x6f, 0x72, 0x64, // "word"
+ 0x02, // field index 2
+ 0x07, // length of "pointer"
+ 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x72, // "pointer"
+ 0x01, // for type $StrB
+ 0x01, // number of entries for $StrB
+ 0x00, // field index
+ 0x04, // length of "next"
+ 0x6e, 0x65, 0x78, 0x74, // "next"
+];
+
+const getResult = msg => msg.result || InspectorTest.logMessage(msg);
+
+function setBreakpoint(offset, scriptId, scriptUrl) {
+ InspectorTest.log(
+ 'Setting breakpoint at offset ' + offset + ' on script ' + scriptUrl);
+ return Protocol.Debugger
+ .setBreakpoint({
+ 'location':
+ {'scriptId': scriptId, 'lineNumber': 0, 'columnNumber': offset}
+ })
+ .then(getResult);
+}
+
+Protocol.Debugger.onPaused(async msg => {
+ let loc = msg.params.callFrames[0].location;
+ InspectorTest.log('Paused:');
+ await session.logSourceLocation(loc);
+ InspectorTest.log('Scope:');
+ for (var frame of msg.params.callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var lineNumber = frame.location.lineNumber;
+ var columnNumber = frame.location.columnNumber;
+ InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
+ if (!/^wasm/.test(frame.url)) {
+ InspectorTest.log(' -- skipped');
+ continue;
+ }
+ for (var scope of frame.scopeChain) {
+ InspectorTest.logObject(' - scope (' + scope.type + '):');
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
+ var properties =
+ await Protocol.Runtime.getProperties({objectId});
+ await WasmInspectorTest.dumpScopeProperties(properties);
+ if (scope.type === 'wasm-expression-stack' || scope.type === 'local') {
+ for (var value of properties.result.result) {
+ var details = await Protocol.Runtime.getProperties(
+ {objectId: value.value.objectId});
+ var nested_value =
+ details.result.result.find(({name}) => name === 'value');
+ if (!nested_value.value.objectId) continue;
+ details = await Protocol.Runtime.getProperties(
+ {objectId: nested_value.value.objectId});
+ InspectorTest.log(' object details:');
+ await WasmInspectorTest.dumpScopeProperties(details);
+ }
+ }
+ }
+ }
+
+ Protocol.Debugger.resume();
+});
+
+InspectorTest.runAsyncTestSuite([
+ async function test() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Instantiating.');
+ // Spawn asynchronously:
+ WasmInspectorTest.instantiate(module_bytes);
+ InspectorTest.log(
+ 'Waiting for wasm script (ignoring first non-wasm script).');
+ // Ignore javascript and full module wasm script, get scripts for functions.
+ const [, {params: wasm_script}] =
+ await Protocol.Debugger.onceScriptParsed(2);
+ let offset = 107; // "local.set $varC" at the end.
+ await setBreakpoint(offset, wasm_script.scriptId, wasm_script.url);
+ InspectorTest.log('Calling main()');
+ await WasmInspectorTest.evalWithUrl('instance.exports.main()', 'runWasm');
+ InspectorTest.log('exports.main returned!');
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
new file mode 100644
index 0000000000..a9e1c76813
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break-expected.txt
@@ -0,0 +1,10 @@
+Tests GC within DebugBreak
+
+Running test: test
+Script wasm://wasm/38e28046 byte offset 51: Wasm opcode 0x20 (kExprLocalGet)
+GC triggered
+Debugger.resume
+Hello World (v8://test/instantiate:11:36)
+ at bar (v8://test/instantiate:11:36)
+exports.main returned!
+
diff --git a/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
new file mode 100644
index 0000000000..f6ad6b6bfa
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-gc-in-debug-break.js
@@ -0,0 +1,50 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-reftypes --expose-gc
+utils.load('test/inspector/wasm-inspector-test.js');
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests GC within DebugBreak');
+session.setupScriptMap();
+
+let builder = new WasmModuleBuilder();
+
+let f_index = builder.addImport('foo', 'bar', kSig_v_r);
+
+builder.addFunction('wasm_A', kSig_v_r)
+ .addBody([
+ kExprLocalGet, 0, // -
+ kExprCallFunction, f_index // -
+ ])
+ .exportAs('main');
+
+let module_bytes = builder.toArray();
+
+Protocol.Debugger.onPaused(async message => {
+ let frames = message.params.callFrames;
+ await session.logSourceLocation(frames[0].location);
+ await Protocol.Runtime.evaluate({expression: 'gc()'});
+ InspectorTest.log('GC triggered');
+ let action = 'resume';
+ InspectorTest.log('Debugger.' + action);
+ await Protocol.Debugger[action]();
+})
+
+contextGroup.addScript(`
+function test() {
+ debug(instance.exports.main);
+ instance.exports.main({val: "Hello World"});
+}
+//# sourceURL=test.js`);
+
+InspectorTest.runAsyncTestSuite([async function test() {
+ utils.setLogConsoleApiMessageCalls(true);
+ await Protocol.Debugger.enable();
+ await WasmInspectorTest.instantiate(
+ module_bytes, 'instance', '{foo: {bar: (x) => console.log(x.val)}}');
+ await Protocol.Runtime.evaluate(
+ {expression: 'test()', includeCommandLineAPI: true});
+ InspectorTest.log('exports.main returned!');
+}]);
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js
index 931ce978c1..beb6ca9fa9 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-byte-offsets.js
@@ -22,8 +22,8 @@ var func_idx = builder.addFunction('helper', kSig_v_v)
builder.addFunction('main', kSig_v_i)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprCallFunction, func_idx,
kExprEnd,
kExprEnd
diff --git a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
index b3e3c38c58..d300b8bb9a 100644
--- a/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
+++ b/deps/v8/test/inspector/debugger/wasm-inspect-many-registers.js
@@ -22,8 +22,15 @@ Protocol.Debugger.onPaused(async msg => {
var frame = msg.params.callFrames[0];
for (var scope of frame.scopeChain) {
if (scope.type == 'module') continue;
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
var scope_properties =
- await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
+ await Protocol.Runtime.getProperties({objectId});
let str = (await Promise.all(scope_properties.result.result.map(
elem => WasmInspectorTest.getWasmValue(elem.value))))
.join(', ');
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
index fd79e43626..cb61260730 100644
--- a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint-expected.txt
@@ -35,3 +35,37 @@ Paused at v8://test/instantiate with reason "instrumentation".
Paused at wasm://wasm/20da547a with reason "instrumentation".
Script wasm://wasm/20da547a byte offset 26: Wasm opcode 0x01 (kExprNop)
Done.
+
+Running test: testBreakInExportedFunction
+Setting instrumentation breakpoint
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Instantiating wasm module.
+Paused at v8://test/instantiate with reason "instrumentation".
+Calling exported function 'func' (should trigger a breakpoint).
+Paused at v8://test/call_func with reason "instrumentation".
+Paused at wasm://wasm/8c388106 with reason "instrumentation".
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01 (kExprNop)
+Calling exported function 'func' a second time (should trigger no breakpoint).
+Paused at v8://test/call_func with reason "instrumentation".
+Done.
+
+Running test: testBreakOnlyWithSourceMap
+Setting instrumentation breakpoint for source maps only
+{
+ id : <messageId>
+ result : {
+ breakpointId : <breakpointId>
+ }
+}
+Instantiating wasm module without source map.
+Calling exported function 'func' (should trigger no breakpoint).
+Instantiating wasm module with source map.
+Calling exported function 'func' (should trigger a breakpoint).
+Paused at wasm://wasm/c8e3a856 with reason "instrumentation".
+Script wasm://wasm/c8e3a856 byte offset 33: Wasm opcode 0x01 (kExprNop)
+Done.
diff --git a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
index feeff65999..9f1d897daa 100644
--- a/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-instrumentation-breakpoint.js
@@ -18,9 +18,6 @@ Protocol.Debugger.onPaused(async msg => {
Protocol.Debugger.resume();
});
-// TODO(clemensb): Add test for 'beforeScriptWithSourceMapExecution'.
-// TODO(clemensb): Add test for module without start function.
-
InspectorTest.runAsyncTestSuite([
async function testBreakInStartFunction() {
const builder = new WasmModuleBuilder();
@@ -64,5 +61,59 @@ InspectorTest.runAsyncTestSuite([
await WasmInspectorTest.instantiate(builder.toArray());
InspectorTest.log('Done.');
await Protocol.Debugger.disable();
- }
+ },
+
+ async function testBreakInExportedFunction() {
+ const builder = new WasmModuleBuilder();
+ const func =
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting instrumentation breakpoint');
+ InspectorTest.logMessage(
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptExecution'}));
+ InspectorTest.log('Instantiating wasm module.');
+ await WasmInspectorTest.instantiate(builder.toArray());
+ InspectorTest.log(
+ 'Calling exported function \'func\' (should trigger a breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+ InspectorTest.log(
+ 'Calling exported function \'func\' a second time ' +
+ '(should trigger no breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ },
+
+ async function testBreakOnlyWithSourceMap() {
+ const builder = new WasmModuleBuilder();
+ const func =
+ builder.addFunction('func', kSig_v_v).addBody([kExprNop]).exportFunc();
+ const bytes_no_source_map = builder.toArray();
+ builder.addCustomSection('sourceMappingURL', [3, 97, 98, 99]);
+ const bytes_with_source_map = builder.toArray();
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log(
+ 'Setting instrumentation breakpoint for source maps only');
+ InspectorTest.logMessage(
+ await Protocol.Debugger.setInstrumentationBreakpoint(
+ {instrumentation: 'beforeScriptWithSourceMapExecution'}));
+
+ InspectorTest.log('Instantiating wasm module without source map.');
+ await WasmInspectorTest.instantiate(bytes_no_source_map);
+ InspectorTest.log(
+ 'Calling exported function \'func\' (should trigger no breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+
+ InspectorTest.log('Instantiating wasm module with source map.');
+ await WasmInspectorTest.instantiate(bytes_with_source_map);
+ InspectorTest.log(
+ 'Calling exported function \'func\' (should trigger a breakpoint).');
+ await WasmInspectorTest.evalWithUrl('instance.exports.func()', 'call_func');
+ InspectorTest.log('Done.');
+ await Protocol.Debugger.disable();
+ },
+
]);
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index a7c8d9eedb..7778b57f41 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -15,6 +15,7 @@ Script wasm://wasm/e33badc2 byte offset 169: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at C (interpreted) (0:169):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -28,8 +29,7 @@ at C (interpreted) (0:169):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -46,6 +46,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -64,7 +65,7 @@ Script wasm://wasm/e33badc2 byte offset 171: Wasm opcode 0x24 (kExprGlobalSet)
Scope:
at C (interpreted) (0:171):
- scope (wasm-expression-stack):
- 0: 42 (i32)
+ stack: "0": 42 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -78,8 +79,7 @@ at C (interpreted) (0:171):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -96,6 +96,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -114,6 +115,7 @@ Script wasm://wasm/e33badc2 byte offset 173: Wasm opcode 0x41 (kExprI32Const)
Scope:
at C (interpreted) (0:173):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -127,8 +129,7 @@ at C (interpreted) (0:173):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -145,6 +146,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -163,7 +165,7 @@ Script wasm://wasm/e33badc2 byte offset 175: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at C (interpreted) (0:175):
- scope (wasm-expression-stack):
- 0: 47 (i32)
+ stack: "0": 47 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -177,8 +179,7 @@ at C (interpreted) (0:175):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -195,6 +196,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -213,6 +215,7 @@ Script wasm://wasm/e33badc2 byte offset 177: Wasm opcode 0x0b (kExprEnd)
Scope:
at C (interpreted) (0:177):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 47 (i32)
@@ -226,8 +229,7 @@ at C (interpreted) (0:177):
tables: "$exported_table": (Table)
at B (liftoff) (0:158):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -244,6 +246,7 @@ at B (liftoff) (0:158):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -262,8 +265,7 @@ Script wasm://wasm/e33badc2 byte offset 160: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:160):
- scope (wasm-expression-stack):
- 0: 42 (i32)
- 1: 3 (i32)
+ stack: "0": 42 (i32), "1": 3 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -280,6 +282,7 @@ at B (liftoff) (0:160):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -298,7 +301,7 @@ Script wasm://wasm/e33badc2 byte offset 161: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:161):
- scope (wasm-expression-stack):
- 0: 42 (i32)
+ stack: "0": 42 (i32)
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -315,6 +318,7 @@ at B (liftoff) (0:161):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -333,6 +337,7 @@ Script wasm://wasm/e33badc2 byte offset 162: Wasm opcode 0x0b (kExprEnd)
Scope:
at B (liftoff) (0:162):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$i32_arg: 42 (i32)
$i32_local: 0 (i32)
@@ -349,6 +354,7 @@ at B (liftoff) (0:162):
tables: "$exported_table": (Table)
at A (liftoff) (0:128):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
@@ -367,6 +373,7 @@ Script wasm://wasm/e33badc2 byte offset 130: Wasm opcode 0x0b (kExprEnd)
Scope:
at A (liftoff) (0:130):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 42 (i32)
- scope (module):
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 0849840abe..5c0162e4cd 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -22,7 +22,7 @@ function createModule(...customSections) {
var builder = new WasmModuleBuilder();
builder.addFunction('nopFunction', kSig_v_v).addBody([kExprNop]);
builder.addFunction('main', kSig_v_v)
- .addBody([kExprBlock, kWasmStmt, kExprI32Const, 2, kExprDrop, kExprEnd])
+ .addBody([kExprBlock, kWasmVoid, kExprI32Const, 2, kExprDrop, kExprEnd])
.exportAs('main');
for (var { name, value } of customSections) {
builder.addCustomSection(name, value);
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js
index 8c18396318..ae59334b93 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-breaks-on-first-breakable-location.js
@@ -21,8 +21,8 @@ var func_idx = builder.addFunction('helper', kSig_v_v)
builder.addFunction('main', kSig_v_i)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprCallFunction, func_idx,
kExprEnd,
kExprEnd
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index 9ab6c323bf..49836f93ab 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -10,6 +10,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -17,6 +18,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -39,6 +41,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -46,6 +49,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -59,6 +63,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -72,7 +77,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 3 (i32)
+ stack: "0": 3 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -86,6 +91,7 @@ Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -99,7 +105,7 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 3 (i32)
+ stack: "0": 3 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -113,8 +119,7 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 3 (i32)
- 1: 1 (i32)
+ stack: "0": 3 (i32), "1": 1 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -128,7 +133,7 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 2 (i32)
+ stack: "0": 2 (i32)
- scope (local):
$var0: 3 (i32)
- scope (module):
@@ -142,6 +147,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -149,6 +155,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -162,6 +169,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -169,6 +177,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -182,6 +191,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -195,7 +205,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 2 (i32)
+ stack: "0": 2 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -209,6 +219,7 @@ Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -222,7 +233,7 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 2 (i32)
+ stack: "0": 2 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -236,8 +247,7 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 2 (i32)
- 1: 1 (i32)
+ stack: "0": 2 (i32), "1": 1 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -251,7 +261,7 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 1 (i32)
+ stack: "0": 1 (i32)
- scope (local):
$var0: 2 (i32)
- scope (module):
@@ -265,6 +275,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -272,6 +283,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -285,6 +297,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -292,6 +305,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -305,6 +319,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -318,7 +333,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 1 (i32)
+ stack: "0": 1 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -332,6 +347,7 @@ Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -345,7 +361,7 @@ Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (wasm-expression-stack):
- 0: 1 (i32)
+ stack: "0": 1 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -359,8 +375,7 @@ Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (wasm-expression-stack):
- 0: 1 (i32)
- 1: 1 (i32)
+ stack: "0": 1 (i32), "1": 1 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -374,7 +389,7 @@ Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (wasm-expression-stack):
- 0: 0 (i32)
+ stack: "0": 0 (i32)
- scope (local):
$var0: 1 (i32)
- scope (module):
@@ -388,6 +403,7 @@ Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -395,6 +411,7 @@ at wasm_A (0:38):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -408,6 +425,7 @@ Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
- scope (module):
instance: exports: "main" (Function)
@@ -415,6 +433,7 @@ at wasm_A (0:39):
functions: "$wasm_A": (Function), "$wasm_B": (Function)
at wasm_B (0:56):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -428,6 +447,7 @@ Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -441,7 +461,7 @@ Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (wasm-expression-stack):
- 0: 0 (i32)
+ stack: "0": 0 (i32)
- scope (local):
$var0: 0 (i32)
- scope (module):
@@ -455,6 +475,7 @@ Script wasm://wasm/0c10a5fe byte offset 61: Wasm opcode 0x0b (kExprEnd)
Scope:
at wasm_B (0:61):
- scope (wasm-expression-stack):
+ stack:
- scope (local):
$var0: 0 (i32)
- scope (module):
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
index 5db1e36979..1b28959f7a 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint.js
@@ -17,9 +17,9 @@ const func_a =
const func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
diff --git a/deps/v8/test/inspector/debugger/wasm-source.js b/deps/v8/test/inspector/debugger/wasm-source.js
index 3fd522f8d0..d13fb247cd 100644
--- a/deps/v8/test/inspector/debugger/wasm-source.js
+++ b/deps/v8/test/inspector/debugger/wasm-source.js
@@ -20,7 +20,7 @@ var sig_index = builder.addType(kSig_v_v);
builder.addFunction('main', kSig_v_v)
.addBody([
- kExprBlock, kWasmStmt, kExprI32Const, 0, kExprCallIndirect, sig_index,
+ kExprBlock, kWasmVoid, kExprI32Const, 0, kExprCallIndirect, sig_index,
kTableZero, kExprEnd
])
.exportAs('main');
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-check.js b/deps/v8/test/inspector/debugger/wasm-stack-check.js
index 4189abd3e1..cd2384acec 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-check.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack-check.js
@@ -68,9 +68,16 @@ async function inspect(frame) {
// Inspect only the top wasm frame.
for (var scope of frame.scopeChain) {
if (scope.type == 'module') continue;
- var scope_properties =
- await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
- let str = (await Promise.all(scope_properties.result.result.map(
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
+ var properties =
+ await Protocol.Runtime.getProperties({objectId});
+ let str = (await Promise.all(properties.result.result.map(
elem => WasmInspectorTest.getWasmValue(elem.value))))
.join(', ');
line.push(`${scope.type}: [${str}]`);
diff --git a/deps/v8/test/inspector/debugger/wasm-stack.js b/deps/v8/test/inspector/debugger/wasm-stack.js
index e5a19e6fe8..961ddc84fd 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack.js
@@ -19,7 +19,7 @@ var call_imported_idx = builder.addFunction('call_func', kSig_v_v)
// Open a block in order to make the positions more interesting...
builder.addFunction('main', kSig_v_v)
.addBody(
- [kExprBlock, kWasmStmt, kExprCallFunction, call_imported_idx, kExprEnd])
+ [kExprBlock, kWasmVoid, kExprCallFunction, call_imported_idx, kExprEnd])
.exportAs('main');
var module_bytes = builder.toArray();
diff --git a/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
index 6371e80874..aa4b92b59b 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
+++ b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
@@ -12,7 +12,7 @@ session.setupScriptMap();
var builder = new WasmModuleBuilder();
var callee = builder.addFunction('callee', kSig_v_v)
- .addBody([kExprBlock, kWasmStmt, kExprEnd])
+ .addBody([kExprBlock, kWasmVoid, kExprEnd])
.index;
var main = builder.addFunction('main', kSig_v_i)
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
index 4e4135a306..a89db04cdb 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-no-opcode-merging.js
@@ -54,8 +54,15 @@ async function printPauseLocationAndStep(msg) {
let scopes = {};
for (let scope of frame.scopeChain) {
if (scope.type == 'module') continue;
- let scope_properties =
- await Protocol.Runtime.getProperties({objectId: scope.object.objectId});
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
+ var scope_properties =
+ await Protocol.Runtime.getProperties({objectId});
scopes[scope.type] = await Promise.all(scope_properties.result.result.map(
elem => WasmInspectorTest.getWasmValue(elem.value)));
}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js
index e315a565e0..32301dc7bd 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist.js
@@ -18,9 +18,9 @@ const func_a_idx = func_a.index;
const func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
index 6cece203ae..7d80628131 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js
@@ -17,9 +17,9 @@ var func_a_idx =
builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
@@ -91,8 +91,15 @@ async function waitForPauseAndStep(stepAction) {
if (scope.type === 'global' || scope.type === 'module') {
InspectorTest.logObject(' -- skipped');
} else {
+ var { objectId } = scope.object;
+ if (scope.type == 'wasm-expression-stack') {
+ objectId = (await Protocol.Runtime.callFunctionOn({
+ functionDeclaration: 'function() { return this.stack }',
+ objectId
+ })).result.result.objectId;
+ }
let properties = await Protocol.Runtime.getProperties(
- {objectId: scope.object.objectId});
+ {objectId});
for (let {name, value} of properties.result.result) {
value = await WasmInspectorTest.getWasmValue(value);
InspectorTest.log(` ${name}: ${value}`);
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping.js b/deps/v8/test/inspector/debugger/wasm-stepping.js
index 83ca29606d..30deadaaef 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping.js
@@ -17,9 +17,9 @@ var func_a_idx =
var func_b = builder.addFunction('wasm_B', kSig_v_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
kExprLocalGet, 0, // -
kExprI32Const, 1, // -
kExprI32Sub, // -
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 6a7e5e23e4..bc785e919d 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -322,8 +322,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
ToVector(args.GetIsolate(), args[1].As<v8::String>());
RunSyncTask(backend_runner_, [&context_group_id, name](IsolateData* data) {
- data->CreateContext(context_group_id,
- v8_inspector::StringView(name.data(), name.size()));
+ CHECK(data->CreateContext(
+ context_group_id,
+ v8_inspector::StringView(name.data(), name.size())));
});
}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index a98df5e010..5e13f52dfd 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -124,6 +124,7 @@
['arch == riscv64', {
# SIMD support is still in progress.
'debugger/wasm-scope-info*': [SKIP],
+ 'debugger/wasm-step-after-trap': [SKIP],
}], # 'arch == riscv64'
################################################################################
@@ -144,6 +145,9 @@
# This test is just slow on TSan, and TSan coverage is not needed to test
# that we do not run OOM. Thus skip it on TSan.
'debugger/wasm-step-a-lot': [SKIP],
+
+ # Another slow test that does not need to run on TSan.
+ 'debugger/wasm-inspect-many-registers': [SKIP],
}], # 'tsan == True'
##############################################################################
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 52eb76eabb..35f51c1fcd 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -72,11 +72,14 @@ IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
int IsolateData::CreateContextGroup() {
int context_group_id = ++last_context_group_id_;
- CreateContext(context_group_id, v8_inspector::StringView());
+ if (!CreateContext(context_group_id, v8_inspector::StringView())) {
+ DCHECK(isolate_->IsExecutionTerminating());
+ return -1;
+ }
return context_group_id;
}
-void IsolateData::CreateContext(int context_group_id,
+bool IsolateData::CreateContext(int context_group_id,
v8_inspector::StringView name) {
v8::HandleScope handle_scope(isolate_.get());
v8::Local<v8::ObjectTemplate> global_template =
@@ -87,12 +90,14 @@ void IsolateData::CreateContext(int context_group_id,
}
v8::Local<v8::Context> context =
v8::Context::New(isolate_.get(), nullptr, global_template);
+ if (context.IsEmpty()) return false;
context->SetAlignedPointerInEmbedderData(kIsolateDataIndex, this);
// Should be 2-byte aligned.
context->SetAlignedPointerInEmbedderData(
kContextGroupIdIndex, reinterpret_cast<void*>(context_group_id * 2));
contexts_[context_group_id].emplace_back(isolate_.get(), context);
if (inspector_) FireContextCreated(context, context_group_id, name);
+ return true;
}
v8::Local<v8::Context> IsolateData::GetDefaultContext(int context_group_id) {
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index e38c6c5082..74a65628a3 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -50,7 +50,8 @@ class IsolateData : public v8_inspector::V8InspectorClient {
// Setting things up.
int CreateContextGroup();
- void CreateContext(int context_group_id, v8_inspector::StringView name);
+ V8_NODISCARD bool CreateContext(int context_group_id,
+ v8_inspector::StringView name);
void ResetContextGroup(int context_group_id);
v8::Local<v8::Context> GetDefaultContext(int context_group_id);
int GetContextGroupId(v8::Local<v8::Context> context);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt
new file mode 100644
index 0000000000..8a0dac1132
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1183664-expected.txt
@@ -0,0 +1,19 @@
+Regression test for crbug.com/1183664
+
+Running test: testMultipleScriptsInSameLineWithSameURL
+Setting breakpoint in first script
+[
+ [0] : {
+ columnNumber : 1
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+]
+Setting breakpoint in second script
+[
+ [0] : {
+ columnNumber : 65
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+]
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1183664.js b/deps/v8/test/inspector/regress/regress-crbug-1183664.js
new file mode 100644
index 0000000000..558ff2b2aa
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1183664.js
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Regression test for crbug.com/1183664');
+
+const url = 'test://foo.js';
+const lineNumber = 0;
+
+const columnNumber1 = 1;
+contextGroup.addScript(`console.log("FIRST")`, lineNumber, columnNumber1, url);
+const columnNumber2 = 65;
+contextGroup.addScript(`console.log("SECOND")`, lineNumber, columnNumber2, url);
+
+InspectorTest.runAsyncTestSuite([
+ async function testMultipleScriptsInSameLineWithSameURL() {
+ await Protocol.Debugger.enable();
+ InspectorTest.logMessage('Setting breakpoint in first script')
+ {
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ url,
+ lineNumber,
+ columnNumber: columnNumber1,
+ });
+ InspectorTest.logMessage(locations);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ }
+ InspectorTest.logMessage('Setting breakpoint in second script')
+ {
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ url,
+ lineNumber,
+ columnNumber: columnNumber2,
+ });
+ InspectorTest.logMessage(locations);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ }
+ }
+]);
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt
new file mode 100644
index 0000000000..9f712a7410
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1199919-expected.txt
@@ -0,0 +1,9 @@
+Regression test for crbug/1199919
+
+Running test: testDefaultParameter
+defaultParameter (v8://test.js:2:2)
+(anonymous) (:0:0)
+
+Running test: testDestructuringParameter
+destructuringParameter (v8://test.js:6:2)
+(anonymous) (:0:0)
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1199919.js b/deps/v8/test/inspector/regress/regress-crbug-1199919.js
new file mode 100644
index 0000000000..dcc7dc2655
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1199919.js
@@ -0,0 +1,44 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} =
+ InspectorTest.start('Regression test for crbug/1199919');
+
+const source = `
+async function defaultParameter(x = 1) {
+ return x;
+}
+
+async function destructuringParameter({x}) {
+ return x;
+}
+`;
+const url = 'v8://test.js';
+
+contextGroup.addScript(source, 0, 0, url);
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function testDefaultParameter() {
+ await Promise.all([Protocol.Runtime.enable(), Protocol.Debugger.enable()]);
+ const {result: {breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({lineNumber: 2, url});
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'defaultParameter()'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await Promise.all([Protocol.Debugger.resume(), evalPromise]);
+ await Promise.all([Protocol.Runtime.disable(), Protocol.Debugger.disable()]);
+ },
+
+ async function testDestructuringParameter() {
+ await Promise.all([Protocol.Runtime.enable(), Protocol.Debugger.enable()]);
+ const {result: {breakpointId}} = await Protocol.Debugger.setBreakpointByUrl({lineNumber: 6, url});
+ const evalPromise = Protocol.Runtime.evaluate({expression: 'destructuringParameter({x: 5})'});
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await Promise.all([Protocol.Debugger.resume(), evalPromise]);
+ await Promise.all([Protocol.Runtime.disable(), Protocol.Debugger.disable()]);
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 33521c8281..ca23ac8b10 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -125,3 +125,6 @@ Internal properties
[[ArrayBufferData]] string 0x...
[[Int8Array]] object undefined
[[Uint8Array]] object undefined
+
+Running test: testObjectWithProtoProperty
+ __proto__ own object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index bc3ea8799f..1a8aa9e99a 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -104,6 +104,10 @@ InspectorTest.runAsyncTestSuite([
this.Uint8Array = this.uint8array_old;
delete this.uint8array_old;
})()`);
+ },
+
+ async function testObjectWithProtoProperty() {
+ await logExpressionProperties('Object.defineProperty({}, "__proto__", {enumerable: true, value: {b:"aaa"}})');
}
]);
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index 65fbeb4d6b..ebd0b6378c 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -53,7 +53,7 @@ TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
CHECK(Start());
}
-TaskRunner::~TaskRunner() { Join(); }
+TaskRunner::~TaskRunner() {}
void TaskRunner::Run() {
data_.reset(new IsolateData(this, std::move(setup_global_tasks_),
diff --git a/deps/v8/test/inspector/wasm-inspector-test.js b/deps/v8/test/inspector/wasm-inspector-test.js
index 47d8419055..9fe13e9d7d 100644
--- a/deps/v8/test/inspector/wasm-inspector-test.js
+++ b/deps/v8/test/inspector/wasm-inspector-test.js
@@ -36,9 +36,10 @@ WasmInspectorTest.compile = async function(bytes, module_name = 'module') {
};
WasmInspectorTest.instantiate =
- async function(bytes, instance_name = 'instance') {
+ async function(bytes, instance_name = 'instance', imports) {
const instantiate_code = `var ${instance_name} = (${
- WasmInspectorTest.instantiateFromBuffer})(${JSON.stringify(bytes)});`;
+ WasmInspectorTest.instantiateFromBuffer})(${JSON.stringify(bytes)},
+ ${imports});`;
await WasmInspectorTest.evalWithUrl(instantiate_code, 'instantiate');
};
@@ -51,12 +52,13 @@ WasmInspectorTest.dumpScopeProperties = async function(message) {
};
WasmInspectorTest.getWasmValue = async function(value) {
- let msg = await Protocol.Runtime.getProperties({objectId: value.objectId});
+ let msg = await Protocol.Runtime.getProperties({ objectId: value.objectId });
printIfFailure(msg);
const value_type = msg.result.result.find(({name}) => name === 'type');
const value_value = msg.result.result.find(({name}) => name === 'value');
return `${
value_value.value.unserializableValue ??
+ value_value.value.description ??
value_value.value.value} (${value_type.value.value})`;
};
diff --git a/deps/v8/test/intl/displaynames/getoptionsobject.js b/deps/v8/test/intl/displaynames/getoptionsobject.js
new file mode 100644
index 0000000000..b540ddda14
--- /dev/null
+++ b/deps/v8/test/intl/displaynames/getoptionsobject.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Intl.DisplayNames call GetOptionsObject instead of ToObject
+// https://tc39.es/ecma402/#sec-getoptionsobject
+// https://tc39.es/ecma262/#sec-toobject
+let testCases = [
+ null, // Null
+ true, // Boolean
+ false, // Boolean
+ 1234, // Number
+ "string", // String
+ Symbol('foo'), // Symbol
+ 9007199254740991n // BigInt
+];
+
+testCases.forEach(function (testCase) {
+ assertThrows(() => new Intl.DisplayNames("en", testCase), TypeError);
+});
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index eb162bc697..ee54c92461 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -60,11 +60,6 @@
'regress-7770': [SKIP],
}], # 'system == android'
-['msan == True', {
- # https://bugs.chromium.org/p/v8/issues/detail?id=11438
- 'regress-364374': [SKIP],
-}], # msan == True
-
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
diff --git a/deps/v8/test/intl/list-format/getoptionsobject.js b/deps/v8/test/intl/list-format/getoptionsobject.js
new file mode 100644
index 0000000000..376d1dab5b
--- /dev/null
+++ b/deps/v8/test/intl/list-format/getoptionsobject.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Intl.ListFormat call GetOptionsObject instead of ToObject
+// https://tc39.es/ecma402/#sec-getoptionsobject
+// https://tc39.es/ecma262/#sec-toobject
+let testCases = [
+ null, // Null
+ true, // Boolean
+ false, // Boolean
+ 1234, // Number
+ "string", // String
+ Symbol('foo'), // Symbol
+ 9007199254740991n // BigInt
+];
+
+testCases.forEach(function (testCase) {
+ assertThrows(() => new Intl.ListFormat("en", testCase), TypeError);
+});
diff --git a/deps/v8/test/intl/regress-11595.js b/deps/v8/test/intl/regress-11595.js
new file mode 100644
index 0000000000..cd7d869654
--- /dev/null
+++ b/deps/v8/test/intl/regress-11595.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_best_fit_matcher
+
+const intl_objects = [
+ Intl.Collator,
+ Intl.DateTimeFormat,
+ Intl.DisplayNames,
+ Intl.ListFormat,
+ Intl.NumberFormat,
+ Intl.PluralRules,
+ Intl.RelativeTimeFormat,
+ Intl.Segmenter,
+];
+
+// Just ensure the f.supportedLocalesOf won't cause crash.
+intl_objects.forEach(f => {
+ let supported = f.supportedLocalesOf(["en"]);
+ assertEquals(1, supported.length);
+ assertEquals("en", supported[0]);
+});
diff --git a/deps/v8/test/intl/segmenter/getoptionsobject.js b/deps/v8/test/intl/segmenter/getoptionsobject.js
new file mode 100644
index 0000000000..134f0fdc8e
--- /dev/null
+++ b/deps/v8/test/intl/segmenter/getoptionsobject.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Intl.Segmenter call GetOptionsObject instead of ToObject
+// https://tc39.es/ecma402/#sec-getoptionsobject
+// https://tc39.es/ecma262/#sec-toobject
+let testCases = [
+ null, // Null
+ true, // Boolean
+ false, // Boolean
+ 1234, // Number
+ "string", // String
+ Symbol('foo'), // Symbol
+ 9007199254740991n // BigInt
+];
+
+testCases.forEach(function (testCase) {
+ assertThrows(() => new Intl.Segmenter("en", testCase), TypeError);
+});
diff --git a/deps/v8/test/js-perf-test/OWNERS b/deps/v8/test/js-perf-test/OWNERS
index e46cedb98b..030f331cd4 100644
--- a/deps/v8/test/js-perf-test/OWNERS
+++ b/deps/v8/test/js-perf-test/OWNERS
@@ -1 +1 @@
-per-file JSTests.json=petermarshall@chromium.org
+per-file JSTests.json=marja@chromium.org
diff --git a/deps/v8/test/message/fail/await-non-async.out b/deps/v8/test/message/fail/await-non-async.out
index 3198e8d7b1..36cd90784d 100644
--- a/deps/v8/test/message/fail/await-non-async.out
+++ b/deps/v8/test/message/fail/await-non-async.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: SyntaxError: await is only valid in async function
+*%(basename)s:5: SyntaxError: await is only valid in async functions and the top level bodies of modules
function f() { await Promise.resolve(); }
^^^^^
-SyntaxError: await is only valid in async function
+SyntaxError: await is only valid in async functions and the top level bodies of modules
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.js b/deps/v8/test/message/fail/wasm-exception-rethrow.js
index 0f6073e17a..c8425f7bda 100644
--- a/deps/v8/test/message/fail/wasm-exception-rethrow.js
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.js
@@ -10,7 +10,7 @@ let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprI32Const, 23,
kExprThrow, except,
kExprCatch, except,
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
index f8b252b543..f23cfa3a51 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.js
@@ -2,4 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry();
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
index 0844bc02b9..7775052c91 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry1.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry: cleanup must be callable
+*%(basename)s:7: TypeError: FinalizationRegistry: cleanup must be callable
let fg = new FinalizationRegistry();
^
TypeError: FinalizationRegistry: cleanup must be callable
at new FinalizationRegistry (<anonymous>)
- at *%(basename)s:*:10
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
index 2c54063287..599bfc6d05 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.js
@@ -2,4 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry({});
diff --git a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
index 69125af984..278c3506bf 100644
--- a/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
+++ b/deps/v8/test/message/fail/weak-refs-finalizationregistry2.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry: cleanup must be callable
+*%(basename)s:7: TypeError: FinalizationRegistry: cleanup must be callable
let fg = new FinalizationRegistry({});
^
TypeError: FinalizationRegistry: cleanup must be callable
at new FinalizationRegistry (<anonymous>)
- at *%(basename)s:*:10
+ at *%(basename)s:7:10
diff --git a/deps/v8/test/message/fail/weak-refs-register1.js b/deps/v8/test/message/fail/weak-refs-register1.js
index 07d9c6c5b6..7110a25e6c 100644
--- a/deps/v8/test/message/fail/weak-refs-register1.js
+++ b/deps/v8/test/message/fail/weak-refs-register1.js
@@ -2,5 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry(() => {});
fg.register(1);
diff --git a/deps/v8/test/message/fail/weak-refs-register1.out b/deps/v8/test/message/fail/weak-refs-register1.out
index aa4cbc2fa2..6a9b23ecf8 100644
--- a/deps/v8/test/message/fail/weak-refs-register1.out
+++ b/deps/v8/test/message/fail/weak-refs-register1.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry.prototype.register: target must be an object
+*%(basename)s:8: TypeError: FinalizationRegistry.prototype.register: target must be an object
fg.register(1);
^
TypeError: FinalizationRegistry.prototype.register: target must be an object
at FinalizationRegistry.register (<anonymous>)
- at *%(basename)s:*:4
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/fail/weak-refs-register2.js b/deps/v8/test/message/fail/weak-refs-register2.js
index b57da62095..31df874585 100644
--- a/deps/v8/test/message/fail/weak-refs-register2.js
+++ b/deps/v8/test/message/fail/weak-refs-register2.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry(() => {});
let o = {};
fg.register(o, o);
diff --git a/deps/v8/test/message/fail/weak-refs-register2.out b/deps/v8/test/message/fail/weak-refs-register2.out
index 04b1ff559f..0f2c2f1ee2 100644
--- a/deps/v8/test/message/fail/weak-refs-register2.out
+++ b/deps/v8/test/message/fail/weak-refs-register2.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
+*%(basename)s:9: TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
fg.register(o, o);
^
TypeError: FinalizationRegistry.prototype.register: target and holdings must not be same
at FinalizationRegistry.register (<anonymous>)
- at *%(basename)s:*:4
+ at *%(basename)s:9:4
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.js b/deps/v8/test/message/fail/weak-refs-unregister.js
index 7befe4a2f7..0f41263cba 100644
--- a/deps/v8/test/message/fail/weak-refs-unregister.js
+++ b/deps/v8/test/message/fail/weak-refs-unregister.js
@@ -2,5 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
let fg = new FinalizationRegistry(() => {});
fg.unregister(1);
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.out b/deps/v8/test/message/fail/weak-refs-unregister.out
index 5294586983..766d04349f 100644
--- a/deps/v8/test/message/fail/weak-refs-unregister.out
+++ b/deps/v8/test/message/fail/weak-refs-unregister.out
@@ -1,6 +1,6 @@
-*%(basename)s:*: TypeError: unregisterToken ('1') must be an object
+*%(basename)s:8: TypeError: unregisterToken ('1') must be an object
fg.unregister(1);
^
TypeError: unregisterToken ('1') must be an object
at FinalizationRegistry.unregister (<anonymous>)
- at *%(basename)s:*:4
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index d5d57e0b29..03c7d6618c 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -70,10 +70,13 @@
}],
################################################################################
-['arch == ppc64 or arch == mips64el or arch == mipsel', {
+['arch == mips64el or arch == mipsel', {
# Tests that require Simd enabled.
'wasm-trace-memory': [SKIP],
-}], # arch == ppc64 or arch == mips64el or arch == mipsel
-
+}], # arch == mips64el or arch == mipsel
+['arch == riscv64', {
+ # Tests that require Simd enabled.
+ 'wasm-trace-memory': [SKIP],
+}],
]
diff --git a/deps/v8/test/message/weakref-finalizationregistry-error.js b/deps/v8/test/message/weakref-finalizationregistry-error.js
index 42f5eb3bc0..e4c47fed0d 100644
--- a/deps/v8/test/message/weakref-finalizationregistry-error.js
+++ b/deps/v8/test/message/weakref-finalizationregistry-error.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
// Flags: --no-stress-opt
// Since cleanup tasks are top-level tasks, errors thrown from them don't stop
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index 303514947e..e315f0f105 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -218,7 +218,11 @@ short_test(short_a, 50);
%OptimizeFunctionOnNextCall(short_test);
short_a.length = 10;
short_test(short_a, 0);
-assertUnoptimized(short_test);
+// TODO(v8:11457) Currently, we cannot inline stores if there is a dictionary
+// mode prototype on the prototype chain. Therefore, if
+// v8_dict_property_const_tracking is enabled, the optimized code only contains
+// a call to the IC handler and doesn't get deopted.
+assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(short_test));
// A test for when we would modify a phi index.
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index 2f4d4e6c06..87eebb5633 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -509,6 +509,10 @@ assertThrows(() => {
Array.prototype.sort.call(undefined);
}, TypeError);
+assertThrows(() => {
+ Array.prototype.sort.call(null);
+}, TypeError);
+
// This test ensures that RemoveArrayHoles does not shadow indices in the
// prototype chain. There are multiple code paths, we force both and check that
// they have the same behavior.
@@ -748,3 +752,15 @@ function TestSortCmpPackedSetLengthToZero() {
xs.sort(create_cmpfn(() => xs.length = 0));
assertTrue(HasPackedSmi(xs));
}
+TestSortCmpPackedSetLengthToZero();
+
+(function TestSortingNonObjectConvertsToObject() {
+ const v1 = Array.prototype.sort.call(true);
+ assertEquals('object', typeof v1);
+
+ const v2 = Array.prototype.sort.call(false);
+ assertEquals('object', typeof v2);
+
+ const v3 = Array.prototype.sort.call(42);
+ assertEquals('object', typeof v3);
+})();
diff --git a/deps/v8/test/mjsunit/array-store-and-grow.js b/deps/v8/test/mjsunit/array-store-and-grow.js
index d717c6dfa6..4b5f4e0da4 100644
--- a/deps/v8/test/mjsunit/array-store-and-grow.js
+++ b/deps/v8/test/mjsunit/array-store-and-grow.js
@@ -205,7 +205,11 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
grow_store(a,10,1);
assertOptimized(grow_store);
grow_store(a,2048,1);
- assertUnoptimized(grow_store);
+ // TODO(v8:11457) We don't currently support inlining element stores if there
+ // is a dictionary mode prototype on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(grow_store));
%ClearFunctionFeedback(grow_store);
})();
@@ -254,6 +258,10 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
assertOptimized(f);
// An attempt to grow should cause deopt
f(new Array("hi"), 1, 3);
- assertUnoptimized(f);
+ // TODO(v8:11457) We don't currently support inlining element stores if there
+ // is a dictionary mode prototype on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(f));
%ClearFunctionFeedback(f);
})();
diff --git a/deps/v8/test/mjsunit/baseline/cross-realm.js b/deps/v8/test/mjsunit/baseline/cross-realm.js
index 1d0fb6b0a2..8c3cc0af06 100644
--- a/deps/v8/test/mjsunit/baseline/cross-realm.js
+++ b/deps/v8/test/mjsunit/baseline/cross-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --sparkplug
+// Flags: --allow-natives-syntax --sparkplug --no-always-sparkplug
// Tier-up across Realms
@@ -17,16 +17,20 @@
var realm1 = Realm.createAllowCrossRealmAccess();
var realm2 = Realm.createAllowCrossRealmAccess();
+ // f1 and f2 have the same code, so share a SharedFunctionInfo (i.e. share
+ // bytecode and baseline code).
let f1 = Realm.eval(realm1, "(" + factory1.toString() + ")")();
let f2 = Realm.eval(realm2, "(" + factory1.toString() + ")")();
%NeverOptimizeFunction(f1);
%NeverOptimizeFunction(f2);
+ // Compile f1 to baseline, f2 stays uncompiled
%CompileBaseline(f1);
assertEquals(0, f1(0));
assertTrue(isBaseline(f1));
assertFalse(isBaseline(f2));
+ // f2 tiers up to baseline via lazy compile
assertEquals(0, f2(0));
assertTrue(isBaseline(f1));
assertTrue(isBaseline(f2));
@@ -44,14 +48,18 @@
var realm1 = Realm.createAllowCrossRealmAccess();
var realm2 = Realm.createAllowCrossRealmAccess();
+ // f1, f2 and f3 have the same code, so share a SharedFunctionInfo (i.e. share
+ // bytecode and baseline code).
let f1 = Realm.eval(realm1, "(" + factory2.toString() + ")")();
let realmFactory = Realm.eval(realm2, "(" + factory2.toString() + ")");
+ // f2 and f3 are in the same realm, so share a feedback vector cell.
let f2 = realmFactory();
let f3 = realmFactory();
%NeverOptimizeFunction(f1);
%NeverOptimizeFunction(f2);
%NeverOptimizeFunction(f3);
+ // Compile f1 to baseline, f2 to interpreter, f3 stays uncompiled.
assertEquals(0, f2(0));
%CompileBaseline(f1);
assertEquals(0, f1(0));
@@ -59,10 +67,55 @@
assertFalse(isBaseline(f2));
assertFalse(isBaseline(f3));
+ // Compile f3, tiers up to baseline via lazy compile and installs the feedback
+ // vector
assertEquals(0, f3(0));
assertTrue(isBaseline(f3));
assertFalse(isBaseline(f2));
+ // Run f2, tiers up to baseline via interpreter entry.
assertEquals(0, f2(0));
assertTrue(isBaseline(f2));
})();
+
+// Ensure a feedback vector is created when sharing baseline code and a closure
+// feedback cell array already exists.
+(function() {
+ function factory3() {
+ return function(a) {
+ return a;
+ }
+ }
+
+ var realm1 = Realm.createAllowCrossRealmAccess();
+ var realm2 = Realm.createAllowCrossRealmAccess();
+
+ // f1, f2 and f3 have the same code, so share a SharedFunctionInfo (i.e. share
+ // bytecode and baseline code).
+ let f1 = Realm.eval(realm1, "(" + factory3.toString() + ")")();
+ let realmFactory = Realm.eval(realm2, "(" + factory3.toString() + ")");
+ // f2 and f3 are in the same realm, so share a feedback vector cell.
+ let f2 = realmFactory();
+ let f3 = realmFactory();
+ %NeverOptimizeFunction(f1);
+ %NeverOptimizeFunction(f2);
+ %NeverOptimizeFunction(f3);
+
+ // Compile f1 to baseline, f2 to interpreter, f3 stays uncompiled.
+ assertEquals(0, f2(0));
+ %CompileBaseline(f1);
+ assertEquals(0, f1(0));
+ assertTrue(isBaseline(f1));
+ assertFalse(isBaseline(f2));
+ assertFalse(isBaseline(f3));
+
+ // Run f2, tiers up to baseline via interpreter entry and installs the
+ // feedback vector
+ assertEquals(0, f2(0));
+ assertTrue(isBaseline(f2));
+ assertFalse(isBaseline(f3));
+
+ // Compile f3, tiers up to baseline via lazy compile.
+ assertEquals(0, f3(0));
+ assertTrue(isBaseline(f3));
+})();
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs b/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
index 409465c210..a8b836dcbe 100644
--- a/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
+++ b/deps/v8/test/mjsunit/baseline/test-baseline-module.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --super-ic --sparkplug
+// Flags: --allow-natives-syntax --super-ic --sparkplug --no-always-sparkplug
export let exported = 17;
import imported from 'test-baseline-module-helper.mjs';
diff --git a/deps/v8/test/mjsunit/baseline/test-baseline.js b/deps/v8/test/mjsunit/baseline/test-baseline.js
index b35a7ffbff..7e427c0d54 100644
--- a/deps/v8/test/mjsunit/baseline/test-baseline.js
+++ b/deps/v8/test/mjsunit/baseline/test-baseline.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --super-ic --sparkplug
+// Flags: --allow-natives-syntax --super-ic --sparkplug --no-always-sparkplug
function run(f, ...args) {
try { f(...args); } catch (e) {}
@@ -81,11 +81,14 @@ assertEquals(run(()=>{ var x = 0; for(var i = 0; i < 10; ++i) x+=1; return x;}),
function testTypeOf(o, t) {
let types = ['number', 'string', 'symbol', 'boolean', 'bigint', 'undefined',
'function', 'object'];
- assertEquals(t, eval('run(()=>typeof ' + o + ')'));
- assertTrue(eval('run(()=>typeof ' + o + ' == "' + t + '")'));
+ assertEquals(t, eval('run(()=>typeof ' + o + ')'),
+ `(()=>typeof ${o})() == ${t}`);
+ assertTrue(eval('run(()=>typeof ' + o + ' == "' + t + '")'),
+ `typeof ${o} == ${t}`);
var other_types = types.filter((x) => x !== t);
for (var other of other_types) {
- assertFalse(eval('run(()=>typeof ' + o + ' == "' + other + '")'));
+ assertFalse(eval('run(()=>typeof ' + o + ' == "' + other + '")'),
+ `typeof ${o} != ${other}`);
}
}
@@ -100,15 +103,15 @@ testTypeOf('"42"', 'string');
testTypeOf('Symbol(42)', 'symbol');
testTypeOf('{}', 'object');
testTypeOf('[]', 'object');
-//testTypeOf('new Proxy({}, {})', 'object');
-//testTypeOf('new Proxy([], {})', 'object');
+testTypeOf('new Proxy({}, {})', 'object');
+testTypeOf('new Proxy([], {})', 'object');
testTypeOf('(_ => 42)', 'function');
testTypeOf('function() {}', 'function');
testTypeOf('function*() {}', 'function');
testTypeOf('async function() {}', 'function');
testTypeOf('async function*() {}', 'function');
-//testTypeOf('new Proxy(_ => 42, {})', 'function');
-//testTypeOf('class {}', 'function');
+testTypeOf('new Proxy(_ => 42, {})', 'function');
+testTypeOf('class {}', 'function');
testTypeOf('Object', 'function');
// Binop
@@ -265,6 +268,19 @@ for (let val of gen) {
}
assertEquals(4, i);
+// Generator with a lot of locals
+let gen_func_with_a_lot_of_locals = eval(`(function*() {
+ ${ Array(32*1024).fill().map((x,i)=>`let local_${i};`).join("\n") }
+ yield 1;
+ yield 2;
+ yield 3;
+})`);
+i = 1;
+for (let val of run(gen_func_with_a_lot_of_locals)) {
+ assertEquals(i++, val);
+}
+assertEquals(4, i);
+
// Async await
run(async function() {
await 1;
diff --git a/deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js b/deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js
new file mode 100644
index 0000000000..7376216ae1
--- /dev/null
+++ b/deps/v8/test/mjsunit/baseline/verify-bytecode-offsets.js
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-sparkplug --allow-natives-syntax
+
+// This test mainly exists to make ClusterFuzz aware of
+// d8.test.verifySourcePositions.
+
+globalValue = false;
+
+function foo(param1, ...param2) {
+ try {
+ for (let key in param1) { param2.push(key); }
+ for (let a of param1) { param2.push(a); }
+ let [a, b] = param2;
+ let copy = [{literal:1}, {}, [], [1], 1, ...param2];
+ return a + b + copy.length;
+ } catch (e) {
+ return e.toString().match(/[a-zA-Z]+/g);
+ } finally {
+ globalValue = new String(23);
+ }
+ return Math.min(Math.random(), 0.5);
+}
+
+var obj = [...Array(10).keys()];
+obj.foo = 'bar';
+foo(obj, obj);
+
+d8.test.verifySourcePositions(foo);
+
+// Make sure invalid calls throw.
+assertThrows(() => {d8.test.verifySourcePositions(0)});
+assertThrows(() => {d8.test.verifySourcePositions(obj)});
+assertThrows(() => {d8.test.verifySourcePositions(new Proxy(foo, {}))});
+assertThrows(() => {d8.test.verifySourcePositions(%GetUndetectable())});
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 50318b5639..ac0e9ef59e 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -63,6 +63,13 @@ assertUnoptimized(add_field, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization that bailed out.
-assertUnoptimized(add_field, "sync");
+if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) Currently, we cannot inline property stores if there is a
+ // dictionary mode prototype on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get invalidated when the
+ // transition map changes.
+ assertUnoptimized(add_field, "sync");
+}
// Clear type info for stress runs.
%ClearFunctionFeedback(add_field);
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-calls.js b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
new file mode 100644
index 0000000000..c61a7a62a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
@@ -0,0 +1,148 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file excercises basic fast API calls and enables fuzzing of this
+// functionality.
+
+// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// --always-opt is disabled because we rely on particular feedback for
+// optimizing to the fastest path.
+// Flags: --no-always-opt
+
+assertThrows(() => d8.test.fast_c_api());
+const fast_c_api = new d8.test.fast_c_api();
+
+// ----------- add_all -----------
+// `add_all` has the following signature:
+// double add_all(bool /*should_fallback*/, int32_t, uint32_t,
+// int64_t, uint64_t, float, double)
+
+const max_safe_float = 2**24 - 1;
+const add_all_result = -42 + 45 + Number.MIN_SAFE_INTEGER + Number.MAX_SAFE_INTEGER +
+ max_safe_float * 0.5 + Math.PI;
+
+function add_all(should_fallback = false) {
+ return fast_c_api.add_all(should_fallback,
+ -42, 45, Number.MIN_SAFE_INTEGER, Number.MAX_SAFE_INTEGER,
+ max_safe_float * 0.5, Math.PI);
+}
+
+%PrepareFunctionForOptimization(add_all);
+assertEquals(add_all_result, add_all());
+%OptimizeFunctionOnNextCall(add_all);
+
+if (fast_c_api.supports_fp_params) {
+ // Test that regular call hits the fast path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all());
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(0, fast_c_api.slow_call_count());
+
+ // Test fallback to slow path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all(true));
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(1, fast_c_api.slow_call_count());
+
+ // Test that no fallback hits the fast path again.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all());
+ assertEquals(1, fast_c_api.fast_call_count());
+ assertEquals(0, fast_c_api.slow_call_count());
+} else {
+ // Test that calling with unsupported types hits the slow path.
+ fast_c_api.reset_counts();
+ assertEquals(add_all_result, add_all());
+ assertEquals(0, fast_c_api.fast_call_count());
+ assertEquals(1, fast_c_api.slow_call_count());
+}
+
+// ----------- Test add_all signature mismatche -----------
+function add_all_mismatch() {
+ return fast_c_api.add_all(false /*should_fallback*/,
+ 45, -42, Number.MAX_SAFE_INTEGER, max_safe_float * 0.5,
+ Number.MIN_SAFE_INTEGER, Math.PI);
+}
+
+%PrepareFunctionForOptimization(add_all_mismatch);
+const add_all_mismatch_result = add_all_mismatch();
+%OptimizeFunctionOnNextCall(add_all_mismatch);
+
+fast_c_api.reset_counts();
+assertEquals(add_all_mismatch_result, add_all_mismatch());
+assertEquals(1, fast_c_api.slow_call_count());
+assertEquals(0, fast_c_api.fast_call_count());
+// If the function was ever optimized to the fast path, it should
+// have been deoptimized due to the argument types mismatch. If it
+// wasn't optimized due to lack of support for FP params, it will
+// stay optimized.
+if (fast_c_api.supports_fp_params) {
+ assertUnoptimized(add_all_mismatch);
+}
+
+// ----------- add_32bit_int -----------
+// `add_32bit_int` has the following signature:
+// int add_32bit_int(bool /*should_fallback*/, int32_t, uint32_t)
+
+const add_32bit_int_result = -42 + 45;
+
+function add_32bit_int(should_fallback = false) {
+ return fast_c_api.add_32bit_int(should_fallback, -42, 45);
+}
+
+%PrepareFunctionForOptimization(add_32bit_int);
+assertEquals(add_32bit_int_result, add_32bit_int());
+%OptimizeFunctionOnNextCall(add_32bit_int);
+
+// Test that regular call hits the fast path.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int());
+assertEquals(1, fast_c_api.fast_call_count());
+assertEquals(0, fast_c_api.slow_call_count());
+
+// Test fallback to slow path.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int(true));
+assertEquals(1, fast_c_api.fast_call_count());
+assertEquals(1, fast_c_api.slow_call_count());
+
+// Test that no fallback hits the fast path again.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int());
+assertEquals(1, fast_c_api.fast_call_count());
+assertEquals(0, fast_c_api.slow_call_count());
+
+// ----------- Test various signature mismatches -----------
+function add_32bit_int_mismatch(arg0, arg1, arg2, arg3) {
+ return fast_c_api.add_32bit_int(arg0, arg1, arg2, arg3);
+}
+
+%PrepareFunctionForOptimization(add_32bit_int_mismatch);
+assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
+%OptimizeFunctionOnNextCall(add_32bit_int_mismatch);
+
+// Test that passing extra argument stays on the fast path.
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45, -42));
+assertEquals(1, fast_c_api.fast_call_count());
+
+// Test that passing wrong argument types stays on the fast path.
+fast_c_api.reset_counts();
+assertEquals(Math.round(-42 + 3.14), add_32bit_int_mismatch(false, -42, 3.14));
+assertEquals(1, fast_c_api.fast_call_count());
+
+// Test that passing too few argument falls down the slow path,
+// because it's an argument type mismatch (undefined vs. int).
+fast_c_api.reset_counts();
+assertEquals(-42, add_32bit_int_mismatch(false, -42));
+assertEquals(1, fast_c_api.slow_call_count());
+assertEquals(0, fast_c_api.fast_call_count());
+assertUnoptimized(add_32bit_int_mismatch);
+
+// Test that the function can be optimized again.
+%PrepareFunctionForOptimization(add_32bit_int_mismatch);
+%OptimizeFunctionOnNextCall(add_32bit_int_mismatch);
+fast_c_api.reset_counts();
+assertEquals(add_32bit_int_result, add_32bit_int_mismatch(false, -42, 45));
+assertEquals(1, fast_c_api.fast_call_count());
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
index 4d1425272a..fb227dd93d 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
@@ -125,8 +125,13 @@
let v1 = b.value;
maybe_sideeffect(b);
let v2 = b.value;
- %TurbofanStaticAssert(Object.is(v1, v2));
- %TurbofanStaticAssert(Object.is(v2, k));
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then
+ // b has a dictionary mode prototype and the load elimination doesn't
+ // work, yet.
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, k));
+ }
}
%EnsureFeedbackVectorForFunction(B);
@@ -151,8 +156,13 @@
let v1 = b.value;
maybe_sideeffect(b);
let v2 = b.value;
- %TurbofanStaticAssert(Object.is(v1, v2));
- %TurbofanStaticAssert(Object.is(v2, kk));
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then
+ // b has a dictionary mode prototype and the load elimination doesn't
+ // work, yet.
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, kk));
+ }
}
%EnsureFeedbackVectorForFunction(B);
diff --git a/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js b/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
index 4150535462..9341f8e8c9 100644
--- a/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
+++ b/deps/v8/test/mjsunit/compiler/monomorphic-named-load-with-no-map.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax --expose-gc
+// Flags: --allow-natives-syntax --harmony-weak-refs --expose-gc
// Helper to convert setTimeout into an awaitable promise.
function asyncTimeout(timeout) {
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
index f01dcaffcd..ae7f92a33d 100644
--- a/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve-stable-maps.js
@@ -29,6 +29,13 @@
const b = makeObjectWithStableMap();
b.d = 1;
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) In this mode we weren't able to inline the access, yet, so
+ // it stays optimized. See related TODO in
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ return;
+ }
+
// This should deoptimize foo.
assertUnoptimized(foo);
})();
@@ -58,6 +65,13 @@
const b = makeObjectWithStableMap();
b.z = 1;
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) In this mode we weren't able to inline the access, yet, so
+ // it stays optimized. See related TODO in
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ return;
+ }
+
// This should deoptimize foo.
assertUnoptimized(foo);
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1215514.js b/deps/v8/test/mjsunit/compiler/regress-1215514.js
new file mode 100644
index 0000000000..a597b31049
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1215514.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-concurrent-recompilation
+
+new Array(4242).shift();
diff --git a/deps/v8/test/mjsunit/compiler/serializer-accessors.js b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
index da5b426c01..1281bed3df 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-accessors.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-accessors.js
@@ -11,7 +11,12 @@ class C {
return 42;
}
set prop(v) {
- assertEquals(expect_interpreted, %IsBeingInterpreted());
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then
+ // C.prototype is a dictionary mode object and we cannot inline the call
+ // to this setter, yet.
+ assertEquals(expect_interpreted, %IsBeingInterpreted());
+ }
%TurbofanStaticAssert(v === 43);
}
}
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
index b147530ba0..3367a08e3e 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-jump.js
@@ -5,7 +5,12 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
function f(x) {
- %TurbofanStaticAssert(x.foo === 42);
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then the
+ // prototype of |x| in |main| is a dictionary mode object, and we cannot
+ // inline the storing of x.foo, yet.
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ %TurbofanStaticAssert(x.foo === 42);
+ }
return %IsBeingInterpreted();
}
diff --git a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
index 6e321d5c1d..3f24649f04 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-dead-after-return.js
@@ -5,7 +5,12 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
function f(x) {
- %TurbofanStaticAssert(x.foo === 42);
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then the
+ // prototype of |x| in |main| is a dictionary mode object, and we cannot
+ // inline the storing of x.foo, yet.
+ %TurbofanStaticAssert(x.foo === 42);
+ }
return %IsBeingInterpreted();
}
diff --git a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
index 6a6da6fa7e..ff7a1c5a2b 100644
--- a/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
+++ b/deps/v8/test/mjsunit/compiler/serializer-transition-propagation.js
@@ -9,7 +9,12 @@ var expect_interpreted = true;
function C() {
this.a = 1;
assertEquals(expect_interpreted, %IsBeingInterpreted());
- %TurbofanStaticAssert(this.x == 42);
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11457) If v8_dict_property_const_tracking is enabled, then the
+ // prototype of |this| in D() is a dictionary mode object, and we cannot
+ // inline the storing of this.x, yet.
+ %TurbofanStaticAssert(this.x == 42);
+ }
};
function D() {
diff --git a/deps/v8/test/mjsunit/const-dict-tracking.js b/deps/v8/test/mjsunit/const-dict-tracking.js
index 752423443b..051239fb5f 100644
--- a/deps/v8/test/mjsunit/const-dict-tracking.js
+++ b/deps/v8/test/mjsunit/const-dict-tracking.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --no-stress-flush-bytecode
+// Flags: --block-concurrent-recompilation
//
// Tests tracking of constness of properties stored in dictionary
// mode prototypes.
@@ -260,3 +262,471 @@ function testbench(o, proto, update_proto, check_constness) {
testbench(o, proto, update_z, false);
})();
+
+//
+// Below: Testing TF optimization of accessing constants in dictionary mode
+// protoypes.
+//
+
+// Test inlining with fast mode receiver.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+
+ // Test that we inlined the access:
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+})();
+
+// Test inlining with dictionary mode receiver that is a prototype.
+
+(function() {
+
+ var proto1 = Object.create(null);
+ proto1.x = 1;
+ var proto2 = Object.create(null);
+ var o = Object.create(proto1);
+ Object.setPrototypeOf(proto1, proto2);
+
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto1));
+ assertFalse(%HasFastProperties(proto2));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(proto1));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(proto1));
+ assertOptimized(read_x);
+
+ // Test that we inlined the access:
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ // TODO(v8:11457) This test doesn't work yet, see TODO in
+ // AccessInfoFactory::TryLoadPropertyDetails. Currently, we can't inline
+ // accesses with dictionary mode receivers.
+ // if (%IsDictPropertyConstTrackingEnabled()) {
+ // assertTrue(%HasFastProperties(o));
+ // assertFalse(%HasFastProperties(proto1));
+ // assertFalse(%HasFastProperties(proto2));
+ // assertUnoptimized(read_x);
+ // }
+})();
+
+// The machinery we use for detecting the invalidation of constants held by
+// dictionary mode objects (related to the prototype validity cell mechanism) is
+// specific to prototypes. This means that for non-prototype dictionary mode
+// objects, we have no way of detecting changes invalidating folded
+// constants. Therefore, we must not fold constants held by non-prototype
+// dictionary mode objects. This is tested here.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(null);
+ Object.setPrototypeOf(o, proto);
+ assertFalse(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ // We never inlined the acceess, so it's still optimized.
+ assertOptimized(read_x);
+ }
+})();
+
+// Test inlining of accessor.
+(function() {
+ var proto = Object.create(null);
+ proto.x_val = 1;
+ Object.defineProperty(proto, "x", {
+ get : function () {return this.x_val;}
+ });
+
+ var o = Object.create(proto);
+ assertFalse(%HasFastProperties(proto))
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ // Test that we inlined the access:
+ var dummy = {x : 123};
+ read_x(dummy);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+})();
+
+// Invalidation by adding same property to receiver.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ o.x = 2;
+
+ assertEquals(2, read_x(o));
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+})();
+
+// Invalidation by adding property to intermediate prototype.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var in_between = Object.create(null);
+ Object.setPrototypeOf(in_between, proto);
+
+ var o = Object.create(in_between);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ in_between.x = 2;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(2, read_x(o));
+})();
+
+// Invalidation by changing prototype of receiver.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var other_proto = Object.create(null);
+ other_proto.x = 2;
+
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+
+ Object.setPrototypeOf(o, other_proto);
+ assertEquals(2, read_x(o));
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasFastProperties(other_proto));
+ assertUnoptimized(read_x);
+ }
+})();
+
+// Invalidation by changing [[Prototype]] of a prototype on the chain from the
+// receiver to the holder.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var other_proto = Object.create(null);
+ other_proto.x = 2;
+ var in_between = Object.create(null);
+ Object.setPrototypeOf(in_between, proto);
+
+ var o = Object.create(in_between);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ Object.setPrototypeOf(in_between, other_proto);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(in_between));
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasFastProperties(other_proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(2, read_x(o));
+})();
+
+// Invalidation by changing property on prototype itself.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ assertEquals(1, read_x(o));
+ %OptimizeFunctionOnNextCall(read_x);
+ assertEquals(1, read_x(o));
+ assertOptimized(read_x);
+
+ proto.x = 2;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(2, read_x(o));
+})();
+
+// Invalidation by deleting property on prototype.
+(function() {
+ var proto = Object.create(null);
+ proto.x = 1;
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_x(arg_o) {
+ return arg_o.x;
+ }
+
+ %PrepareFunctionForOptimization(read_x);
+ read_x(o);
+ %OptimizeFunctionOnNextCall(read_x);
+ read_x(o);
+
+ delete proto.x;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertUnoptimized(read_x);
+ }
+
+ assertEquals(undefined, read_x(o));
+})();
+
+// Storing the same value does not invalidate const-ness. Store done from
+// runtime/without feedback.
+(function() {
+ var proto = Object.create(null);
+ var some_object = {bla: 123};
+ proto.x = 1;
+ proto.y = some_object
+
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_xy(arg_o) {
+ return [arg_o.x, arg_o.y];
+ }
+
+ %PrepareFunctionForOptimization(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ %OptimizeFunctionOnNextCall(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ assertOptimized(read_xy);
+
+ // Build value 1 without re-using proto.x.
+ var x2 = 0;
+ for(var i = 0; i < 5; ++i) {
+ x2 += 0.2;
+ }
+
+ // Storing the same values for x and y again:
+ proto.x = x2;
+ proto.y = some_object;
+ assertEquals(x2, proto.x);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertTrue(%HasOwnConstDataProperty(proto, "x"));
+ assertOptimized(read_xy);
+ }
+
+ proto.x = 2;
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasOwnConstDataProperty(proto, "x"));
+ assertUnoptimized(read_xy);
+ }
+
+ assertEquals(2, read_xy(o)[0]);
+})();
+
+// Storing the same value does not invalidate const-ness. Store done by IC
+// handler.
+(function() {
+ var proto = Object.create(null);
+ var some_object = {bla: 123};
+ proto.x = 1;
+ proto.y = some_object
+
+ var o = Object.create(proto);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto));
+
+ function read_xy(arg_o) {
+ return [arg_o.x, arg_o.y];
+ }
+
+ %PrepareFunctionForOptimization(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ %OptimizeFunctionOnNextCall(read_xy);
+ assertEquals([1, some_object], read_xy(o));
+ assertOptimized(read_xy);
+
+ // Build value 1 without re-using proto.x.
+ var x2 = 0;
+ for(var i = 0; i < 5; ++i) {
+ x2 += 0.2;
+ }
+
+ function change_xy(obj, x, y) {
+ obj.x = x;
+ obj.y = y;
+ }
+
+ %PrepareFunctionForOptimization(change_xy);
+ // Storing the same values for x and y again:
+ change_xy(proto, 1, some_object);
+ change_xy(proto, 1, some_object);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertTrue(%HasOwnConstDataProperty(proto, "x"));
+ assertOptimized(read_xy);
+ }
+
+ change_xy(proto, 2, some_object);
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto));
+ assertFalse(%HasOwnConstDataProperty(proto, "x"));
+ assertUnoptimized(read_xy);
+ }
+
+ assertEquals(2, read_xy(o)[0]);
+})();
+
+// Invalidation by replacing a prototype. Just like the old prototype, the new
+// prototype owns the property as an accessor, but in the form of an
+// AccessorInfo rather than an AccessorPair.
+(function() {
+ var proto1 = Object.create(null);
+ Object.defineProperty(proto1, 'length', {get() {return 1}});
+ var proto2 = Object.create(proto1);
+ var o = Object.create(proto2);
+ assertTrue(%HasFastProperties(o));
+ assertFalse(%HasFastProperties(proto1));
+ assertFalse(%HasFastProperties(proto2));
+
+ function read_length(arg_o) {
+ return arg_o.length;
+ }
+
+ %PrepareFunctionForOptimization(read_length);
+ assertEquals(1, read_length(o));
+ %OptimizeFunctionOnNextCall(read_length, "concurrent");
+ assertEquals(1, read_length(o));
+ assertUnoptimized(read_length, "no sync");
+
+ var other_proto1 = [];
+ Object.setPrototypeOf(proto2, other_proto1);
+ %UnblockConcurrentRecompilation();
+ assertUnoptimized(read_length, "sync");
+ assertEquals(0, read_length(o));
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ assertFalse(%HasFastProperties(proto1));
+ assertFalse(%HasFastProperties(proto2));
+ assertFalse(%HasFastProperties(other_proto1));
+ assertUnoptimized(read_length);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/const-field-tracking-2.js b/deps/v8/test/mjsunit/const-field-tracking-2.js
index c1da5cf0dc..86cbb51a3b 100644
--- a/deps/v8/test/mjsunit/const-field-tracking-2.js
+++ b/deps/v8/test/mjsunit/const-field-tracking-2.js
@@ -104,7 +104,8 @@ function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
function warmup() { return new O().v; }
%EnsureFeedbackVectorForFunction(warmup);
warmup(); warmup(); warmup();
- assertTrue(%HasFastProperties(O.prototype));
+ if (!%IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasFastProperties(O.prototype));
// The parameter object is not constant but all the values have the same
// map and therefore the compiler knows the prototype object and can
diff --git a/deps/v8/test/mjsunit/const-field-tracking.js b/deps/v8/test/mjsunit/const-field-tracking.js
index bc979b80b4..2474c2e315 100644
--- a/deps/v8/test/mjsunit/const-field-tracking.js
+++ b/deps/v8/test/mjsunit/const-field-tracking.js
@@ -101,7 +101,8 @@ function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
function warmup() { return new O().v; }
%EnsureFeedbackVectorForFunction(warmup);
warmup(); warmup(); warmup();
- assertTrue(%HasFastProperties(O.prototype));
+ if (!%IsDictPropertyConstTrackingEnabled())
+ assertTrue(%HasFastProperties(O.prototype));
// The parameter object is not constant but all the values have the same
// map and therefore the compiler knows the prototype object and can
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 9b3a625cbe..c855f792af 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax --nostress-opt --opt
-// Flags: --no-stress-flush-bytecode
+// Flags: --no-stress-flush-bytecode --no-lazy-feedback-allocation
function test(f, iterations) {
%PrepareFunctionForOptimization(f);
diff --git a/deps/v8/test/mjsunit/ensure-growing-store-learns.js b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
index ba5e5ae5a9..430823702c 100644
--- a/deps/v8/test/mjsunit/ensure-growing-store-learns.js
+++ b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
@@ -32,7 +32,11 @@
foo(a, 3);
assertEquals(a[3], 5.3);
foo(a, 50000);
- assertUnoptimized(foo);
+ // TODO(v8:11457) We don't currently support inlining element stores if there
+ // is a dictionary mode prototypes on the prototype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(), isOptimized(foo));
assertTrue(%HasDictionaryElements(a));
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
index 29b65dc358..bc6ed47142 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
var global;
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
index 0353be3205..5f4fa25e54 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
function TestSetWithCustomIterator(ctor) {
const k1 = {};
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
index 91b8767403..45e4528a53 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt
+// Flags: --allow-natives-syntax --opt --no-lazy-feedback-allocation
function TestSetPrototypeModified(ctor) {
const originalPrototypeAdd = ctor.prototype.add;
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
index 3c0204466b..631f315c06 100644
--- a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
@@ -48,7 +48,12 @@
// Assert that the function was deoptimized (dependency to the constant
// value).
- assertUnoptimized(C.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(C.prototype.foo));
})();
(function TestSuperpropertyAccessInlined() {
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt.js b/deps/v8/test/mjsunit/es6/super-ic-opt.js
index c360184a18..9b1b8d218a 100644
--- a/deps/v8/test/mjsunit/es6/super-ic-opt.js
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt.js
@@ -118,7 +118,12 @@
// Assert that the function was deoptimized (dependency to the constant
// value).
- assertUnoptimized(D.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(D.prototype.foo));
})();
(function TestPropertyIsNonConstantData() {
@@ -239,7 +244,12 @@
assertEquals("new value", r);
// Assert that the function was deoptimized (holder changed).
- assertUnoptimized(C.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(C.prototype.foo));
})();
(function TestUnexpectedHomeObjectPrototypeDeoptimizes() {
@@ -278,7 +288,13 @@
assertEquals("new value", r);
// Assert that the function was deoptimized.
- assertUnoptimized(D.prototype.foo);
+ // TODO(v8:11457) We don't support inlining JSLoadNamedFromSuper for
+ // dictionary mode prototypes, yet. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(D.prototype.foo));
+
})();
(function TestUnexpectedReceiverDoesNotDeoptimize() {
diff --git a/deps/v8/test/mjsunit/field-type-tracking.js b/deps/v8/test/mjsunit/field-type-tracking.js
index 1ff336a6b3..13f1c0236f 100644
--- a/deps/v8/test/mjsunit/field-type-tracking.js
+++ b/deps/v8/test/mjsunit/field-type-tracking.js
@@ -163,7 +163,12 @@
%OptimizeFunctionOnNextCall(baz);
baz(f2, {b: 9});
baz(f3, {a: -1});
- assertUnoptimized(baz);
+ // TODO(v8:11457) Currently, Turbofan/Turboprop can never inline any stores if
+ // there is a dictionary mode object in the protoype chain. Therefore, if
+ // v8_dict_property_const_tracking is enabled, the optimized code only
+ // contains a call to the IC handler and doesn't get deopted.
+ assertEquals(%IsDictPropertyConstTrackingEnabled(),
+ isOptimized(baz));
})();
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
index e39ce7ca4f..d94f6a7151 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
@@ -5,8 +5,8 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var error1, error2;
-import('modules-skip-11.mjs').catch(e => error1 = e);
-import('modules-skip-11.mjs').catch(e => error2 = e);
+import('modules-skip-11.mjs').catch(e => { error1 = e });
+import('modules-skip-11.mjs').catch(e => { error2 = e });
%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
index d7a590e442..ab9263e119 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
@@ -3,28 +3,9 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-dynamic-import
-//
-// Note: This test fails with top level await due to test1, which tries to
-// import a module using top level await and expects it to fail.
var ran = false;
-async function test1() {
- try {
- let x = await import('modules-skip-8.mjs');
- %AbortJS('failure: should be unreachable');
- } catch(e) {
- assertEquals('Unexpected reserved word', e.message);
- ran = true;
- }
-}
-
-test1();
-%PerformMicrotaskCheckpoint();
-assertTrue(ran);
-
-ran = false;
-
async function test2() {
try {
let x = await import('modules-skip-9.mjs');
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
index f879df9a2a..547a688c2a 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
(function TestConstructFinalizationRegistry() {
let fg = new FinalizationRegistry(() => {});
assertEquals(fg.toString(), "[object FinalizationRegistry]");
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
index 960ab89487..eac92486a0 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-from-different-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let r = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
index 4e760144e6..8b43618c71 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-is-not-a-microtask.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking --allow-natives-syntax
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
// This test asserts that the cleanup function call, scheduled by GC, is a
// microtask and not a normal task.
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
index 11a9b3099d..3513c8f211 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanedUp = false;
let r = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
index a824bd9d85..f2374efc88 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-proxy-from-different-realm.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let r = Realm.create();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
index 730312cba5..ef60d3f150 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = 0;
let holdings_list = [];
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
index 4cb2459172..7476f2bd4e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-optional.js
@@ -5,7 +5,7 @@
// Flags: --harmony-weak-refs
// FinalizationRegistry#cleanupSome is normative optional and has its own
-// flag. Test that it's not present.
+// flag. Test that it's not present with only --harmony-weak-refs.
assertEquals(undefined, Object.getOwnPropertyDescriptor(
FinalizationRegistry.prototype, "cleanupSome"));
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js b/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
index 375c7f6d13..6007f9c360 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/clearkeptobjects-on-quit.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-weak-refs
+
// A newly created WeakRef is kept alive until the end of the next microtask
// checkpoint. V8 asserts that the kept objects list is cleared at the end of
// microtask checkpoints when the microtask policy is auto. Test that d8, which
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
index a6cda82485..274e714994 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-and-weakref.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
index 4e0ab2af8e..72d2cae83e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
function cleanup(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
index 7a09273ca7..f63d17ed7f 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-keeps-holdings-alive.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
let holdings_list = [];
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
index 7db4d44a6a..3b3f3412a2 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
// Flags: --no-stress-flush-bytecode
let cleanup0_call_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
index 533c3cb631..21b9ff709b 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/multiple-dirty-finalization-groups.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
index 07e23f614f..235a34a592 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/reentrant-gc-from-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let call_count = 0;
let reentrant_gc = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
index 6cfc1a1aa7..c17e7aa969 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/two-weakrefs.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let o1 = {};
let o2 = {};
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
index a45426e3f6..56d9b562a1 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/undefined-holdings.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index b23f396f38..400385d193 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index aebcc6a746..efa4df5217 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --noincremental-marking
let cleanup_call_count = 0;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
index b3f425655e..ff48758c07 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
index 903fb33a37..e607a1ead5 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index 7479996844..e04b9f1485 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index ac1e0e2c41..e11fd3b8e9 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
index f9ff219d65..772078e107 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 05ba4f28d2..3b3e488a82 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_call_count = 0;
let cleanup = function(holdings) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
index 3c8af1995b..ee4b5ecb90 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weak-cell-basics.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let cleanup_called = false;
let cleanup = function(holdings_arg) {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
index 78e8865ac0..4c8641d8aa 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-creation-keeps-alive.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let wr;
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
index f7c05e88b8..eb02290dfd 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/weakref-deref-keeps-alive.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
let wr;
let wr_control; // control WeakRef for testing what happens without deref
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 144579703a..eb27e5ba6e 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -193,6 +193,9 @@ var isInterpreted;
// Returns true if given function in baseline.
var isBaseline;
+// Returns true if given function in unoptimized (interpreted or baseline).
+var isUnoptimized;
+
// Returns true if given function is optimized.
var isOptimized;
@@ -681,8 +684,7 @@ var prettyPrinted;
return;
}
var is_optimized = (opt_status & V8OptimizationStatus.kOptimized) !== 0;
- var is_baseline = (opt_status & V8OptimizationStatus.kBaseline) !== 0;
- assertFalse(is_optimized && !is_baseline, name_opt);
+ assertFalse(is_optimized, name_opt);
}
assertOptimized = function assertOptimized(
@@ -745,6 +747,10 @@ var prettyPrinted;
(opt_status & V8OptimizationStatus.kBaseline) !== 0;
}
+ isUnoptimized = function isUnoptimized(fun) {
+ return isInterpreted(fun) || isBaseline(fun);
+ }
+
isOptimized = function isOptimized(fun) {
var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index f021b73a7f..b6dd59ec69 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -163,7 +163,8 @@
'unicodelctest-no-optimization': [PASS, NO_VARIANTS],
# Test is only enabled on ASAN. Takes too long on many other bots.
- 'regress/regress-crbug-9161': [PASS, SLOW, ['asan == False', SKIP]],
+ # Also disabled on Mac ASAN for https://crbug.com/v8/11437.
+ 'regress/regress-crbug-9161': [PASS, SLOW, ['not asan or system == macos', SKIP]],
# OOM with too many isolates/memory objects (https://crbug.com/1010272)
# Predictable tests fail due to race between postMessage and GrowMemory
@@ -273,6 +274,7 @@
'unicode-test': [SKIP],
'whitespaces': [SKIP],
'baseline/*': [SKIP],
+ 'regress/regress-chromium-1194026': [SKIP],
# Unsuitable for GC stress because coverage information is lost on GC.
'code-coverage-ad-hoc': [SKIP],
@@ -331,20 +333,9 @@
'regress/asm/*': [SKIP],
'regress/wasm/*': [SKIP],
+ 'asm/*': [SKIP],
'wasm/*': [SKIP],
- 'asm/asm-heap': [SKIP],
- 'asm/asm-validation': [SKIP],
- 'asm/call-stdlib': [SKIP],
- 'asm/call-annotation': [SKIP],
- 'asm/global-imports': [SKIP],
- 'asm/regress-1027595': [SKIP],
- 'asm/regress-1069173': [SKIP],
- 'asm/regress-913822': [SKIP],
- 'asm/regress-937650': [SKIP],
- 'asm/regress-9531': [SKIP],
- 'asm/return-types': [SKIP],
-
# Tests tracing when generating wasm in TurboFan.
'tools/compiler-trace-flags-wasm': [SKIP],
}], # not has_webassembly or variant == jitless
@@ -360,6 +351,9 @@
'regexp-tier-up-multiple': [SKIP],
'regress/regress-996234': [SKIP],
+ # This test relies on TurboFan being enabled.
+ 'compiler/fast-api-calls': [SKIP],
+
# These tests check that we can trace the compiler.
'tools/compiler-trace-flags': [SKIP],
@@ -508,9 +502,6 @@
##############################################################################
['arch == arm64 and simulator_run', {
- 'compiler/osr-big': [PASS, SLOW],
- 'regress/regress-454725': [PASS, SLOW],
- 'json': [PASS, SLOW],
'try': [PASS, SLOW],
'non-extensible-array-reduce': [PASS, SLOW],
}], # 'arch == arm64 and simulator_run'
@@ -574,9 +565,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7102
# Flaky due to huge string allocation.
'regress/regress-748069': [SKIP],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=11438
- 'regress/regress-crbug-627935': [SKIP],
}], # 'msan == True'
##############################################################################
@@ -613,6 +601,9 @@
# BUG(v8:9506): times out.
'wasm/shared-memory-worker-explicit-gc-stress': [SKIP],
+
+ # https://crbug.com/v8/9337 - OOMs on TSAN
+ 'compiler/regress-9017': [SKIP],
}], # 'tsan == True'
##############################################################################
@@ -831,6 +822,32 @@
# https://github.com/v8-riscv/v8/issues/418
'regress/regress-1138075': [SKIP],
'regress/regress-1138611': [SKIP],
+
+ # SIMD not be implemented
+ 'regress/wasm/regress-1054466': [SKIP],
+ 'regress/wasm/regress-1065599': [SKIP],
+ 'regress/wasm/regress-1070078': [SKIP],
+ 'regress/wasm/regress-1081030': [SKIP],
+ 'regress/wasm/regress-10831': [SKIP],
+ 'regress/wasm/regress-10309': [SKIP],
+ 'regress/wasm/regress-1111522': [SKIP],
+ 'regress/wasm/regress-1116019': [SKIP],
+ 'regress/wasm/regress-1124885': [SKIP],
+ 'regress/wasm/regress-1165966': [SKIP],
+ 'regress/wasm/regress-1112124': [SKIP],
+ 'regress/wasm/regress-1132461': [SKIP],
+ 'regress/wasm/regress-1161555': [SKIP],
+ 'regress/wasm/regress-1161954': [SKIP],
+ 'regress/wasm/regress-1187831': [SKIP],
+ 'regress/regress-1172797': [SKIP],
+ 'regress/wasm/regress-1179025': [SKIP],
+ 'wasm/simd-errors': [SKIP],
+ 'wasm/simd-globals': [SKIP],
+ 'wasm/multi-value-simd': [SKIP],
+ 'wasm/simd-call': [SKIP],
+ 'wasm/liftoff-simd-params': [SKIP],
+ 'wasm/exceptions-simd': [SKIP],
+
}], # 'arch == riscv64'
['arch == riscv64 and variant == stress_incremental_marking', {
@@ -938,6 +955,7 @@
'deopt-recursive-lazy-once': [SKIP],
'deopt-recursive-soft-once': [SKIP],
'code-coverage-block-opt': [SKIP],
+ 'compiler/fast-api-calls': [SKIP],
'compiler/serializer-apply': [SKIP],
'compiler/serializer-call': [SKIP],
'compiler/serializer-dead-after-jump': [SKIP],
@@ -1303,6 +1321,13 @@
}], # variant == assert_types
##############################################################################
+['variant == stress_snapshot', {
+ # This test initializes an embedder object that never needs to be serialized
+ # to the snapshot, so we don't have a SerializeInternalFieldsCallback for it.
+ 'compiler/fast-api-calls': [SKIP],
+}], # variant == stress_snapshot
+
+##############################################################################
['variant == stress_snapshot and arch != x64', {
# Deserialization fails due to read-only snapshot checksum verification.
# https://crbug.com/v8/10491
@@ -1421,11 +1446,12 @@
'regress/wasm/regress-1161555': [SKIP],
'regress/wasm/regress-1161954': [SKIP],
'regress/wasm/regress-1165966': [SKIP],
+ 'regress/wasm/regress-1187831': [SKIP],
}], # no_simd_sse == True
##############################################################################
-# TODO(v8:11421): Port baseline compiler to ia32, Arm, MIPS, S390 and PPC
-['arch not in (x64, arm64)', {
+# TODO(v8:11421): Port baseline compiler to other architectures.
+['arch not in (x64, arm64, ia32, arm)', {
'baseline/*': [SKIP],
}],
@@ -1434,4 +1460,9 @@
'regress/regress-779407': [SKIP],
}], # variant == experimental_regexp
+##############################################################################
+['variant == concurrent_inlining', {
+ 'concurrent-initial-prototype-change-1': [SKIP],
+}], # variant == concurrent_inlining
+
]
diff --git a/deps/v8/test/mjsunit/promise-hooks.js b/deps/v8/test/mjsunit/promise-hooks.js
deleted file mode 100644
index f7c1558c1d..0000000000
--- a/deps/v8/test/mjsunit/promise-hooks.js
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --opt --no-always-opt --no-stress-opt --deopt-every-n-times=0 --ignore-unhandled-promises
-
-let log = [];
-let asyncId = 0;
-
-function logEvent (type, args) {
- const promise = args[0];
- promise.asyncId = promise.asyncId || ++asyncId;
- log.push({
- type,
- promise,
- parent: args[1],
- argsLength: args.length
- })
-}
-function initHook(...args) {
- logEvent('init', args);
-}
-function resolveHook(...args) {
- logEvent('resolve', args);
-}
-function beforeHook(...args) {
- logEvent('before', args);
-}
-function afterHook(...args) {
- logEvent('after', args);
-}
-
-function printLog(message) {
- console.log(` --- ${message} --- `)
- for (const event of log) {
- console.log(JSON.stringify(event))
- }
-}
-
-function assertNextEvent(type, args) {
- const [ promiseOrId, parentOrId ] = args;
- const nextEvent = log.shift();
-
- assertEquals(type, nextEvent.type);
- assertEquals(type === 'init' ? 2 : 1, nextEvent.argsLength);
-
- assertTrue(nextEvent.promise instanceof Promise);
- if (promiseOrId instanceof Promise) {
- assertEquals(promiseOrId, nextEvent.promise);
- } else {
- assertTrue(typeof promiseOrId === 'number');
- assertEquals(promiseOrId, nextEvent.promise?.asyncId);
- }
-
- if (parentOrId instanceof Promise) {
- assertEquals(parentOrId, nextEvent.parent);
- assertTrue(nextEvent.parent instanceof Promise);
- } else if (typeof parentOrId === 'number') {
- assertEquals(parentOrId, nextEvent.parent?.asyncId);
- assertTrue(nextEvent.parent instanceof Promise);
- } else {
- assertEquals(undefined, parentOrId);
- assertEquals(undefined, nextEvent.parent);
- }
-}
-function assertEmptyLog() {
- assertEquals(0, log.length);
- asyncId = 0;
- log = [];
-}
-
-// Verify basic log structure of different promise behaviours
-function basicTest() {
- d8.promise.setHooks(initHook, beforeHook, afterHook, resolveHook);
-
- // `new Promise(...)` triggers init event with correct promise
- var done, p1 = new Promise(r => done = r);
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p1 ]);
- assertEmptyLog();
-
- // `promise.then(...)` triggers init event with correct promise and parent
- var p2 = p1.then(() => { });
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p2, p1 ]);
- assertEmptyLog();
-
- // `resolve(...)` triggers resolve event and any already attached continuations
- done();
- %PerformMicrotaskCheckpoint();
- assertNextEvent('resolve', [ p1 ]);
- assertNextEvent('before', [ p2 ]);
- assertNextEvent('resolve', [ p2 ]);
- assertNextEvent('after', [ p2 ]);
- assertEmptyLog();
-
- // `reject(...)` triggers the resolve event
- var done, p3 = new Promise((_, r) => done = r);
- done();
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p3 ]);
- assertNextEvent('resolve', [ p3 ]);
- assertEmptyLog();
-
- // `promise.catch(...)` triggers init event with correct promise and parent
- // When the promise is already completed, the continuation should also run
- // immediately at the next checkpoint.
- var p4 = p3.catch(() => { });
- %PerformMicrotaskCheckpoint();
- assertNextEvent('init', [ p4, p3 ]);
- assertNextEvent('before', [ p4 ]);
- assertNextEvent('resolve', [ p4 ]);
- assertNextEvent('after', [ p4 ]);
- assertEmptyLog();
-
- // Detach hooks
- d8.promise.setHooks();
-}
-
-// Exceptions thrown in hook handlers should not raise or reject
-function exceptions() {
- function thrower() {
- throw new Error('unexpected!');
- }
-
- // Init hook
- d8.promise.setHooks(thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // Before hook
- d8.promise.setHooks(undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .then(() => {})
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // After hook
- d8.promise.setHooks(undefined, undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .then(() => {})
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // Resolve hook
- d8.promise.setHooks(undefined, undefined, undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.resolve()
- .catch(assertUnreachable);
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-
- // Resolve hook for a reject
- d8.promise.setHooks(undefined, undefined, undefined, thrower);
- assertDoesNotThrow(() => {
- Promise.reject()
- .then(assertUnreachable)
- .catch();
- });
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-}
-
-// For now, expect the optimizer to bail out on async functions
-// when context promise hooks are attached.
-function optimizerBailout(test, verify) {
- // Warm up test method
- %PrepareFunctionForOptimization(test);
- assertUnoptimized(test);
- test();
- test();
- test();
- %PerformMicrotaskCheckpoint();
-
- // Prove transition to optimized code when no hooks are present
- assertUnoptimized(test);
- %OptimizeFunctionOnNextCall(test);
- test();
- assertOptimized(test);
- %PerformMicrotaskCheckpoint();
-
- // Verify that attaching hooks deopts the async function
- d8.promise.setHooks(initHook, beforeHook, afterHook, resolveHook);
- // assertUnoptimized(test);
-
- // Verify log structure of deoptimized call
- %PrepareFunctionForOptimization(test);
- test();
- %PerformMicrotaskCheckpoint();
-
- verify();
-
- // Optimize and verify log structure again
- %OptimizeFunctionOnNextCall(test);
- test();
- assertOptimized(test);
- %PerformMicrotaskCheckpoint();
-
- verify();
-
- d8.promise.setHooks();
-}
-
-optimizerBailout(async () => {
- await Promise.resolve();
-}, () => {
- assertNextEvent('init', [ 1 ]);
- assertNextEvent('init', [ 2 ]);
- assertNextEvent('resolve', [ 2 ]);
- assertNextEvent('init', [ 3, 2 ]);
- assertNextEvent('before', [ 3 ]);
- assertNextEvent('resolve', [ 1 ]);
- assertNextEvent('resolve', [ 3 ]);
- assertNextEvent('after', [ 3 ]);
- assertEmptyLog();
-});
-optimizerBailout(async () => {
- await { then (cb) { cb() } };
-}, () => {
- assertNextEvent('init', [ 1 ]);
- assertNextEvent('init', [ 2, 1 ]);
- assertNextEvent('init', [ 3, 2 ]);
- assertNextEvent('before', [ 2 ]);
- assertNextEvent('resolve', [ 2 ]);
- assertNextEvent('after', [ 2 ]);
- assertNextEvent('before', [ 3 ]);
- assertNextEvent('resolve', [ 1 ]);
- assertNextEvent('resolve', [ 3 ]);
- assertNextEvent('after', [ 3 ]);
- assertEmptyLog();
-});
-basicTest();
-exceptions();
-
-(function regress1126309() {
- function __f_16(test) {
- test();
- d8.promise.setHooks(undefined, () => {});
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
- }
- __f_16(async () => { await Promise.resolve()});
-})();
-
-(function boundFunction() {
- function hook() {};
- const bound = hook.bind(this);
- d8.promise.setHooks(bound, bound, bound, bound);
- Promise.resolve();
- Promise.reject();
- %PerformMicrotaskCheckpoint();
- d8.promise.setHooks();
-})();
-
-
-(function promiseAll() {
- let initCount = 0;
- d8.promise.setHooks(() => { initCount++});
- Promise.all([Promise.resolve(1)]);
- %PerformMicrotaskCheckpoint();
- assertEquals(initCount, 3);
-
- d8.promise.setHooks();
-})();
diff --git a/deps/v8/test/mjsunit/proto-accessor-not-accessible.js b/deps/v8/test/mjsunit/proto-accessor-not-accessible.js
new file mode 100644
index 0000000000..4e86f4e4ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/proto-accessor-not-accessible.js
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Accessors for __proto__ are defined in Object.prototype (spec:
+// https://tc39.es/ecma262/#sec-object.prototype.__proto__ ). If
+// Object.prototype is not in the prototype chain of an object, the accessors
+// are not accessible. In particular, __proto__ is treated as a normal property
+// and the special meaning (that getting __proto__ would return the prototype
+// and setting __proto__ would change the prototype) is lost.
+
+function testObjectWithNullProto(object) {
+ assertNull(Object.getPrototypeOf(object));
+
+ // The __proto__ getter is not accessible.
+ assertEquals(undefined, object.__proto__);
+
+ // The __proto__ setter is not accessible. Setting __proto__ will create a
+ // normal property called __proto__ and not change the prototype.
+ object.__proto__ = {};
+ assertNull(Object.getPrototypeOf(object));
+
+ // Object.setPrototypeOf can still be used for really setting the prototype.
+ const proto1 = {};
+ Object.setPrototypeOf(object, proto1);
+
+ // Now the accessors are accessible again.
+ assertEquals(proto1, object.__proto__);
+
+ const proto2 = {};
+ object.__proto__ = proto2;
+ assertEquals(proto2, object.__proto__);
+}
+
+(function TestObjectCreatedWithObjectCreate() {
+ testObjectWithNullProto(Object.create(null));
+})();
+
+(function TestProtoSetToNullAfterCreation() {
+ let object_with_null_proto = {};
+ object_with_null_proto.__proto__ = null;
+ testObjectWithNullProto(Object.create(null));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-673297.js b/deps/v8/test/mjsunit/regress/asm/regress-673297.js
index 9a00a90835..9a00a90835 100644
--- a/deps/v8/test/mjsunit/regress/regress-673297.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-673297.js
diff --git a/deps/v8/test/mjsunit/regress/regress-743622.js b/deps/v8/test/mjsunit/regress/asm/regress-743622.js
index 60512585c2..60512585c2 100644
--- a/deps/v8/test/mjsunit/regress/regress-743622.js
+++ b/deps/v8/test/mjsunit/regress/asm/regress-743622.js
diff --git a/deps/v8/test/mjsunit/regress/regress-1067270.js b/deps/v8/test/mjsunit/regress/regress-1067270.js
index 1c6eddf505..eb505e5b2b 100644
--- a/deps/v8/test/mjsunit/regress/regress-1067270.js
+++ b/deps/v8/test/mjsunit/regress/regress-1067270.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --stack-size=1200
const needle = Array(1802).join(" +") + Array(16884).join("A");
const string = "A";
diff --git a/deps/v8/test/mjsunit/regress/regress-1146880.js b/deps/v8/test/mjsunit/regress/regress-1146880.js
new file mode 100644
index 0000000000..c6bfddf84f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1146880.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --assert-types
+
+function f(a,b) {
+ let t = a >= b;
+ while (t != 0) {
+ a = a | (b - a);
+ let unused = a >= b;
+ t = a < b;
+ }
+}
+function test() {
+ f(Infinity,1);
+ f(undefined, undefined);
+}
+
+// Trigger TurboFan compilation
+%PrepareFunctionForOptimization(test);
+%PrepareFunctionForOptimization(f);
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-11491.js b/deps/v8/test/mjsunit/regress/regress-11491.js
new file mode 100644
index 0000000000..795480a15d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-11491.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function test() {
+ // Create a generator constructor with the maximum number of allowed parameters.
+ const args = new Array(65535);
+ function* gen() {}
+ const c = gen.constructor.apply(null, args);
+
+ // 'c' having 65535 parameters causes the parameters/registers fixed array
+ // attached to the generator object to be considered a large object.
+ // We call it twice so that it both covers the CreateJSGeneratorObject() C++
+ // runtime function as well as the CreateGeneratorObject() CSA builtin.
+ c();
+ c();
+}
+
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-11519.js b/deps/v8/test/mjsunit/regress/regress-11519.js
new file mode 100644
index 0000000000..ae4c83a0a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-11519.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=500 --stress-compaction
+
+function bar(a) {
+ return Object.defineProperty(a, 'x', {get() { return 1; }});
+}
+
+function foo() {
+ return {};
+}
+
+%NeverOptimizeFunction(bar);
+%PrepareFunctionForOptimization(foo);
+const o = foo(); // Keep a reference so the GC doesn't kill the map.
+%SimulateNewspaceFull();
+bar(o);
+const a = bar(foo());
+%SimulateNewspaceFull();
+%OptimizeFunctionOnNextCall(foo);
+const b = bar(foo());
+
+assertTrue(%HaveSameMap(a, b));
diff --git a/deps/v8/test/mjsunit/regress/regress-1181240.js b/deps/v8/test/mjsunit/regress/regress-1181240.js
new file mode 100644
index 0000000000..bf053a5310
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1181240.js
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ t();
+ } catch (e) {
+ f(true);
+ }
+ }
+ t();
+}
+
+var a = {x: 10};
+var b = {y: 10};
+function inner(should_deopt) {
+ if (should_deopt == true) {
+ a.x;
+ }
+ return b.y;
+}
+
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(inner);
+f(false);
+f(false);
+%OptimizeFunctionOnNextCall(f);
+f(false);
+
+function f(x) {
+ // Pass a large number of arguments so the stack check would fail.
+ inner(x,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ );
+}
+
+runNearStackLimit(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-1185072.js b/deps/v8/test/mjsunit/regress/regress-1185072.js
new file mode 100644
index 0000000000..7dd2802c99
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1185072.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __getProperties(obj) {
+ let properties = [];
+ for (let name of Object.getOwnPropertyNames(obj)) {
+ properties.push(name);
+ }
+ return properties;
+}
+function __getRandomProperty(obj, seed) {
+ let properties = __getProperties(obj);
+ return properties[seed % properties.length];
+}
+let __v_19 = [];
+class __c_0 extends Array {}
+Object.defineProperty(__v_19, 'constructor', {
+ get() {
+ return __c_0;
+ }
+});
+Object.defineProperty(__v_19, __getRandomProperty(__v_19, 776790), {
+ value: 4294967295
+});
+assertThrows(() => __v_19.concat([1])[9], RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1187170.js b/deps/v8/test/mjsunit/regress/regress-1187170.js
new file mode 100644
index 0000000000..58a6f7ef5f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1187170.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+
+var bar = 0;
+function foo(outer_arg) {
+ var arr = [1];
+ var func = function (arg) {
+ bar += arg;
+ if (outer_arg) {}
+ };
+ try {
+ arr.filter(func);
+ } catch (e) {}
+};
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+bar = {};
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1193903.js b/deps/v8/test/mjsunit/regress/regress-1193903.js
new file mode 100644
index 0000000000..491ba1150d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1193903.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var no_sync_uninternalized = "no " + "sync";
+%InternalizeString(no_sync_uninternalized);
+
+// Make sure %GetOptimizationStatus works with a non-internalized string
+// parameter.
+%GetOptimizationStatus(function() {}, no_sync_uninternalized)
diff --git a/deps/v8/test/mjsunit/regress/regress-673241.js b/deps/v8/test/mjsunit/regress/regress-673241.js
deleted file mode 100644
index a4d6ffe96f..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-673241.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --validate-asm
-
-function generateAsmJs() {
- 'use asm';
- function fun() { fun(); }
- return fun;
-}
-
-assertThrows(generateAsmJs());
diff --git a/deps/v8/test/mjsunit/regress/regress-7115.js b/deps/v8/test/mjsunit/regress/regress-7115.js
index 8bbb1ded20..f17c2e6bb9 100644
--- a/deps/v8/test/mjsunit/regress/regress-7115.js
+++ b/deps/v8/test/mjsunit/regress/regress-7115.js
@@ -5,7 +5,15 @@
// Flags: --allow-natives-syntax
function TestBuiltinSubclassing(Builtin) {
- assertTrue(%HasFastProperties(Builtin));
+ if (!%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(v8:11248) In the current implementation of
+ // v8_dict_property_const_tracking, prototypes are converted to dictionary
+ // mode in many places, but we don't guarantee that they are *created* as
+ // dictionary mode objects, yet. This will be fixed in the future. Until
+ // then, if v8_dict_property_const_tracking is enabled, we cannot always
+ // know for sure if a builtin has been converted already or not.
+ assertTrue(%HasFastProperties(Builtin));
+ }
assertTrue(%HasFastProperties(Builtin.prototype));
assertEquals(!%IsDictPropertyConstTrackingEnabled(),
%HasFastProperties(Builtin.prototype.__proto__));
diff --git a/deps/v8/test/mjsunit/regress/regress-923723.js b/deps/v8/test/mjsunit/regress/regress-923723.js
index 5a838e558f..4bd0d43777 100644
--- a/deps/v8/test/mjsunit/regress/regress-923723.js
+++ b/deps/v8/test/mjsunit/regress/regress-923723.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=50
+// Flags: --stack-size=100
function __f_3() {
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-992389.js b/deps/v8/test/mjsunit/regress/regress-992389.js
index 66fa9696f6..2eb0f755f3 100644
--- a/deps/v8/test/mjsunit/regress/regress-992389.js
+++ b/deps/v8/test/mjsunit/regress/regress-992389.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --jitless --gc-interval=12 --stack-size=50
+// Flags: --jitless --gc-interval=12 --stack-size=100
__f_0();
function __f_0() {
diff --git a/deps/v8/test/mjsunit/regress/regress-chromium-1194026.js b/deps/v8/test/mjsunit/regress/regress-chromium-1194026.js
new file mode 100644
index 0000000000..2b5f5c6912
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-chromium-1194026.js
@@ -0,0 +1,69 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-sharedarraybuffer
+
+function workerCode1() {
+ onmessage = function(e) {
+ const a = new Int32Array(e.sab);
+ while(true) {
+ // This worker tries to switch the value from 1 to 2; if it succeeds, it
+ // also notifies.
+ const ret = Atomics.compareExchange(a, 0, 1, 2);
+ if (ret === 1) {
+ Atomics.notify(a, 0);
+ }
+ // Check if we're asked to terminate:
+ if (Atomics.load(a, 1) == 1) {
+ return;
+ }
+ }
+ }
+}
+
+function workerCode2() {
+ const MAX_ROUNDS = 40;
+ onmessage = function(e) {
+ const a = new Int32Array(e.sab);
+ let round = 0;
+ function nextRound() {
+ while (true) {
+ if (round == MAX_ROUNDS) {
+ // Tell worker1 to terminate.
+ Atomics.store(a, 1, 1);
+ postMessage('done');
+ return;
+ }
+
+ // This worker changes the value to 1, and waits for it to change to 2
+ // via Atomics.waitAsync.
+ Atomics.store(a, 0, 1);
+
+ const res = Atomics.waitAsync(a, 0, 1);
+ if (res.async) {
+ res.value.then(() => { ++round; nextRound();},
+ ()=> {});
+ return;
+ }
+ // Else: continue looping. (This happens when worker1 changed the value
+ // back to 2 before waitAsync started.)
+ }
+ }
+
+ nextRound();
+ }
+}
+
+let sab = new SharedArrayBuffer(8);
+
+let w1 = new Worker(workerCode1, {type: 'function'});
+w1.postMessage({sab: sab});
+
+let w2 = new Worker(workerCode2, {type: 'function'});
+w2.postMessage({sab: sab});
+
+// Wait for worker2.
+w2.getMessage();
+w1.terminate();
+w2.terminate();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
index ec61fee068..e06e63db65 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-2.js
@@ -10,6 +10,16 @@ function foo(first_run) {
Object.defineProperty(o, 'x', { get() { return 1; }, configurable: true, enumerable: true });
delete o.x;
o.x = 23;
+
+ if (%IsDictPropertyConstTrackingEnabled()) {
+ // TODO(11248, ishell) Adding a property always sets it to constant if
+ // V8_DICT_PROPERTY_CONST_TRACKING is enabled, even if the property was
+ // deleted before and is re-added. See
+ // LookupIterator::PrepareTransitionToDataProperty, specically the usage of
+ // PropertyDetails::kConstIfDictConstnessTracking in there.
+ return;
+ }
+
if (first_run) assertFalse(%HasOwnConstDataProperty(o, 'x'));
}
%PrepareFunctionForOptimization(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js
new file mode 100644
index 0000000000..e84d98a6ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1161847-3.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(x) { delete x.bla; x.bla = 23 }
+
+function foo() {
+ let obj = {bla: 0};
+ Object.defineProperty(obj, 'bla', {writable: false});
+ bar(obj);
+ return obj.bla;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(23, foo());
+assertEquals(23, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(23, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1191886.js b/deps/v8/test/mjsunit/regress/regress-crbug-1191886.js
new file mode 100644
index 0000000000..87df25605f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1191886.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let arr = [];
+for (var i = 0; i < 1000000; i++) {
+ arr[i] = [];
+}
+assertEquals(1000000, i);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js b/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js
index 1bced5623e..9f10604e76 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1195331.js
@@ -27,9 +27,9 @@ assertFalse(%HasOwnConstDataProperty(o3, "b"));
Object.defineProperty(o2, "a", {
value:2, enumerable: false, configurable: true, writable: true,
});
-assertFalse(%HasOwnConstDataProperty(o1, "a"));
+assertTrue(%HasOwnConstDataProperty(o1, "a"));
assertFalse(%HasOwnConstDataProperty(o1, "b"));
-assertFalse(%HasOwnConstDataProperty(o3, "a"));
+assertTrue(%HasOwnConstDataProperty(o3, "a"));
assertFalse(%HasOwnConstDataProperty(o3, "b"));
assertFalse(%HasOwnConstDataProperty(o2, "a"));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9534.js b/deps/v8/test/mjsunit/regress/regress-v8-9534.js
index 0eb0217e7f..7fc98a9d74 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-9534.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9534.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --stack-size=50 --ignore-unhandled-promises
+// Flags: --allow-natives-syntax --stack-size=100 --ignore-unhandled-promises
let i = 0;
function f() {
diff --git a/deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js b/deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js
new file mode 100644
index 0000000000..06d3dc64d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/condition-change-during-branch-elimination.js
@@ -0,0 +1,49 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// This test creates the situation where BranchElimination's VisitIf sees a
+// different condition than the preceding VisitBranch (because an interleaved
+// CommonOperatorReducer replaced the condition).
+
+(function foo() {
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", kSig_v_l)
+ .addLocals(kWasmI32, 2)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0x02,
+ kExprLocalTee, 0x01,
+ kExprIf, kWasmVoid,
+ kExprElse,
+ kExprLoop, kWasmVoid,
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0x01,
+ kExprIf, kWasmVoid,
+ kExprElse,
+ kExprLocalGet, 0x02,
+ kExprBrIf, 0x04,
+ kExprBr, 0x01,
+ kExprEnd,
+ kExprLocalGet, 0x00,
+ kExprCallFunction, 0x01,
+ kExprLocalTee, 0x02,
+ kExprBrIf, 0x00,
+ kExprEnd,
+ kExprLocalGet, 0x01,
+ kExprBrIf, 0x00,
+ kExprEnd,
+ kExprEnd,
+ kExprBr, 0x00,
+ kExprEnd])
+ .exportAs("main");
+
+ builder.addFunction("callee", kSig_i_l)
+ .addBody([kExprLocalGet, 0, kExprI32ConvertI64]);
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js b/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js
index b353b7a94a..1d0d1470ee 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1027410.js
@@ -23,7 +23,7 @@ kExprEnd, // @3
// signature: d_v
// body:
kExprBlock, kWasmF64, // @3 f64
- kExprBlock, kWasmStmt, // @5
+ kExprBlock, kWasmVoid, // @5
kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f,
kExprLocalTee, 0x00,
kExprLocalTee, 0x01,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js b/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
index 99519d8ffe..99d3da9329 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1034394.js
@@ -10,7 +10,7 @@ const NUM_CASES = 3073;
let body = [];
// Add one block, so we can jump to this block or to the function end.
body.push(kExprBlock);
-body.push(kWasmStmt);
+body.push(kWasmVoid);
// Add the big BrTable.
body.push(kExprLocalGet, 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js b/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
index eec0a46432..f3e3b59b7a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1074586.js
@@ -19,15 +19,15 @@ builder.addFunction(undefined, sig)
kExprLocalGet, 0x1b, // local.get
kExprLocalSet, 0x1c, // local.set
kExprI32Const, 0x00, // i32.const
-kExprIf, kWasmStmt, // if @11
+kExprIf, kWasmVoid, // if @11
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x1e, // local.set
- kExprBlock, kWasmStmt, // block @19
+ kExprBlock, kWasmVoid, // block @19
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x21, // local.set
- kExprBlock, kWasmStmt, // block @25
- kExprBlock, kWasmStmt, // block @27
- kExprBlock, kWasmStmt, // block @29
+ kExprBlock, kWasmVoid, // block @25
+ kExprBlock, kWasmVoid, // block @27
+ kExprBlock, kWasmVoid, // block @29
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x0a, // local.set
kExprI32Const, 0x00, // i32.const
@@ -42,19 +42,19 @@ kExprIf, kWasmStmt, // if @11
kExprI32Const, 0x01, // i32.const
kExprLocalSet, 0x36, // local.set
kExprI32Const, 0x00, // i32.const
- kExprIf, kWasmStmt, // if @56
+ kExprIf, kWasmVoid, // if @56
kExprEnd, // end @59
kExprLocalGet, 0x00, // local.get
kExprLocalSet, 0x10, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x38, // local.set
- kExprBlock, kWasmStmt, // block @69
+ kExprBlock, kWasmVoid, // block @69
kExprI32Const, 0x7f, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x39, // local.set
kExprI32Const, 0x01, // i32.const
- kExprIf, kWasmStmt, // if @78
+ kExprIf, kWasmVoid, // if @78
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x11, // local.set
kExprI32Const, 0x00, // i32.const
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js b/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
index 413630d1b0..c0c2d0dcfc 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1075953.js
@@ -25,7 +25,7 @@ builder.addFunction(undefined, sig)
kExprElse, // else @45
kExprI32Const, 0x00, // i32.const
kExprEnd, // end @48
- kExprIf, kWasmStmt, // if @49
+ kExprIf, kWasmVoid, // if @49
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kAtomicPrefix, kExprI32AtomicSub, 0x01, 0x04, // i32.atomic.sub
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10831.js b/deps/v8/test/mjsunit/regress/wasm/regress-10831.js
index 58c6c4dec1..29334684ed 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-10831.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10831.js
@@ -39,7 +39,7 @@ kExprI32Const, 0xfc, 0xf8, 0x01, // i32.const
kSimdPrefix, kExprI8x16Splat, // i8x16.splat
kSimdPrefix, kExprF64x2Max, 0x01, // f64x2.max
kSimdPrefix, kExprI16x8MaxS, 0x01, // i16x8.max_s
-kSimdPrefix, kExprV8x16AllTrue, // v8x16.all_true
+kSimdPrefix, kExprI8x16AllTrue, // i8x16.all_true
kExprEnd, // end @70
]);
builder.addExport('main', 0);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-10898.js b/deps/v8/test/mjsunit/regress/wasm/regress-10898.js
index 61c8c72104..be366883d2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-10898.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-10898.js
@@ -29,7 +29,7 @@ kExprLocalTee, 0x00, // local.tee
kExprI32Const, 0xff, 0x00, // i32.const
kAtomicPrefix, kExprAtomicNotify, 0x02, 0x03, // atomic.notify
kExprI32LoadMem16S, 0x00, 0x02, // i32.load16_s
-kExprIf, kWasmStmt, // if @28
+kExprIf, kWasmVoid, // if @28
kExprLocalGet, 0x00, // local.get
kExprReturn, // return
kExprElse, // else @33
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js b/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js
index 36331d094a..aaf63724b4 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1101304.js
@@ -10,9 +10,9 @@ builder.addType(makeSig(
[]));
builder.addFunction(undefined, 0 /* sig */).addBody([
kExprI32Const, 0, // i32.const
- kExprIf, kWasmStmt, // if @3
+ kExprIf, kWasmVoid, // if @3
kExprI32Const, 1, // i32.const
- kExprIf, kWasmStmt, // if @7
+ kExprIf, kWasmVoid, // if @7
kExprNop, // nop
kExprElse, // else @10
kExprUnreachable, // unreachable
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js b/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js
index aacaedc93f..407f5a2f87 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1145135.js
@@ -19,11 +19,11 @@ kExprI32Const, 0x10, // i32.const
kExprI32Sub, // i32.sub
kExprLocalTee, 0x02, // local.tee
kExprGlobalSet, 0x00, // global.set
-kExprBlock, kWasmStmt, // block @12
+kExprBlock, kWasmVoid, // block @12
kExprLocalGet, 0x00, // local.get
kExprI32LoadMem, 0x02, 0x00, // i32.load
kExprI32Eqz, // i32.eqz
- kExprIf, kWasmStmt, // if @20
+ kExprIf, kWasmVoid, // if @20
kExprLocalGet, 0x02, // local.get
kExprI32Const, 0x00, // i32.const
kExprI32StoreMem, 0x02, 0x0c, // i32.store
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
index d9d80e58cc..56c0b7d194 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
@@ -35,9 +35,9 @@ kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x01, // i32.const
kExprI32Sub, // i32.sub
kExprLocalSet, 0x07, // local.set
-kExprBlock, kWasmStmt, // block @45
+kExprBlock, kWasmVoid, // block @45
kExprI32Const, 0x00, // i32.const
- kExprIf, kWasmStmt, // if @49
+ kExprIf, kWasmVoid, // if @49
kExprLocalGet, 0x0a, // local.get
kExprLocalSet, 0x08, // local.set
kExprElse, // else @55
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
index 989da11a25..a86866429c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
@@ -21,7 +21,7 @@ kExprI32Const, 0x00, // i32.const
kExprLocalSet, 0x04, // local.set
kExprI32Const, 0x01, // i32.const
kExprLocalSet, 0x05, // local.set
-kExprBlock, kWasmStmt, // block @11
+kExprBlock, kWasmVoid, // block @11
kExprBr, 0x00, // br depth=0
kExprEnd, // end @15
kExprGlobalGet, 0x01, // global.get
@@ -35,7 +35,7 @@ kExprLocalSet, 0x01, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x07, // local.set
-kExprBlock, kWasmStmt, // block @36
+kExprBlock, kWasmVoid, // block @36
kExprBr, 0x00, // br depth=0
kExprEnd, // end @40
kExprGlobalGet, 0x01, // global.get
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
index 93f2c3b556..f942798927 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
@@ -34,7 +34,7 @@ kExprLocalGet, 0x01, // local.get
kExprLocalGet, 0x01, // local.get
kExprGlobalGet, 0x00, // global.get
kExprDrop, // drop
-kExprLoop, kWasmStmt, // loop @8
+kExprLoop, kWasmVoid, // loop @8
kExprLoop, 0x00, // loop @10
kExprI32Const, 0x01, // i32.const
kExprMemoryGrow, 0x00, // memory.grow
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js b/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
index 907cf563c9..0bcdd7bd44 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1179182.js
@@ -13,7 +13,7 @@ builder.addFunction(undefined, kSig_i_v)
.addBody([
kExprI64Const, 0x0, // i64.const
kExprI32Const, 0x0, // i32.const
-kExprIf, kWasmStmt, // if
+kExprIf, kWasmVoid, // if
kExprI32Const, 0x0, // i32.const
kExprI32LoadMem, 0x01, 0x23, // i32.load
kExprBrTable, 0x01, 0x00, 0x00, // br_table
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1184964.js b/deps/v8/test/mjsunit/regress/wasm/regress-1184964.js
new file mode 100644
index 0000000000..2fe4fbb107
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1184964.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('foo', kSig_v_v).addBody([kExprDrop]);
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js b/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
new file mode 100644
index 0000000000..3c6e998020
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1185464.js
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --liftoff --no-wasm-tier-up --wasm-tier-mask-for-testing=2
+// Flags: --experimental-wasm-reftypes
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+
+// Generate a Liftoff call with too many reference parameters to fit in
+// parameter registers, to force stack parameter slots.
+
+const kManyParams = 32;
+const kSigWithManyRefParams = makeSig(
+ new Array(kManyParams).fill(kWasmExternRef), []);
+const kPrepareManyParamsCallBody = Array.from(
+ {length: kManyParams * 2},
+ (item, index) => index % 2 == 0 ? kExprLocalGet : 0);
+
+
+builder.addFunction(undefined, kSigWithManyRefParams).addBody([
+]);
+
+builder.addFunction(undefined, kSigWithManyRefParams)
+.addBody([
+ ...kPrepareManyParamsCallBody,
+ kExprCallFunction, 0, // call 0
+]);
+
+builder.addFunction(undefined, kSigWithManyRefParams).addBody([
+ ...kPrepareManyParamsCallBody,
+ kExprCallFunction, 1, // call 1
+]).exportAs('manyRefs');
+
+const instance = builder.instantiate();
+instance.exports.manyRefs();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1187831.js b/deps/v8/test/mjsunit/regress/wasm/regress-1187831.js
new file mode 100644
index 0000000000..84e7ed5429
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1187831.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
+builder.addType(makeSig([], []));
+builder.setTableBounds(1, 1);
+builder.addElementSegment(0, 0, false, [0]);
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: i_iii
+// body:
+kExprI32Const, 0x03, // i32.const
+kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+kExprI32Const, 0x00, // i32.const
+kSimdPrefix, kExprI8x16ReplaceLane, 0x00, // i8x16.replace_lane
+kSimdPrefix, kExprI32x4ExtAddPairwiseI16x8U, // i32x4.extadd_pairwise_i16x8_u
+kSimdPrefix, kExprI8x16ExtractLaneU, 0x00, // i8x16.extract_lane_u
+kExprEnd, // end @15
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertEquals(3, instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js b/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
new file mode 100644
index 0000000000..9a4cb1ecbd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1188825.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh
+load('test/mjsunit/wasm/wasm-module-builder.js')
+let obj = {};
+let proxy = new Proxy(obj, {});
+let builder = new WasmModuleBuilder();
+builder.addType(kSig_v_v);
+let imports = builder.addImport("m","f", kSig_v_v);
+let exception = builder.addException(kSig_v_v);
+builder.addFunction("foo", kSig_v_v)
+ .addBody([
+ kExprTry,
+ kWasmVoid,
+ kExprCallFunction, imports,
+ kExprCatch, exception,
+ kExprEnd]
+ ).exportFunc();
+let inst = builder.instantiate({
+ m: {
+ f: function () {
+ throw proxy;
+ }
+ }
+});
+assertThrows(inst.exports.foo);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1188975.js b/deps/v8/test/mjsunit/regress/wasm/regress-1188975.js
new file mode 100644
index 0000000000..3d716cd2ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1188975.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function Regress1188975() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("f", kSig_v_v)
+ .addBody([
+ kExprUnreachable,
+ kExprTry, kWasmVoid,
+ kExprElse,
+ kExprCatchAll,
+ kExprEnd,
+ ]);
+ assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1189454.js b/deps/v8/test/mjsunit/regress/wasm/regress-1189454.js
new file mode 100644
index 0000000000..9dd512f27e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1189454.js
@@ -0,0 +1,218 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc --experimental-wasm-threads
+
+// During Turbofan optimizations, when a TrapIf/Unless node is found to always
+// trap, its uses need to be marked as dead. However, in the case that one of
+// these uses is a Merge or Loop node, only the input of the Merge/Loop that
+// corresponds to the trap should be marked as dead.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+builder.addStruct([makeField(kWasmI32, true)]);
+
+builder.addFunction('test', makeSig([wasmOptRefType(0)], [kWasmI32]))
+ .addBody([
+ kExprLocalGet, 0,
+ kExprRefIsNull,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprStructGet, 0, 0,
+ kExprElse,
+ kExprI32Const, 42,
+ kExprEnd
+ ])
+ .exportFunc();
+builder.instantiate();
+
+// We include a clusterfuzz-generated testcase for this error verbatim.
+const module = new WebAssembly.Module(new Uint8Array([
+ 0, 97, 115, 109, 1, 0, 0, 0, 1, 51, 9, 96, 0, 0, 96,
+ 0, 1, 125, 96, 0, 1, 124, 96, 2, 124, 127, 1, 125, 96, 4,
+ 126, 126, 125, 127, 1, 127, 96, 1, 126, 1, 127, 96, 7, 127, 126,
+ 126, 125, 124, 127, 125, 1, 124, 96, 0, 1, 127, 96, 1, 124, 1,
+ 125, 3, 23, 22, 0, 4, 0, 5, 6, 0, 7, 0, 2, 0, 3,
+ 1, 0, 8, 0, 0, 0, 0, 0, 2, 2, 0, 4, 5, 1, 112,
+ 1, 9, 9, 5, 4, 1, 3, 1, 1, 6, 6, 1, 127, 1, 65,
+ 10, 11, 7, 213, 1, 14, 6, 102, 117, 110, 99, 95, 48, 0, 0,
+ 14, 102, 117, 110, 99, 95, 49, 95, 105, 110, 118, 111, 107, 101, 114,
+ 0, 2, 14, 102, 117, 110, 99, 95, 52, 95, 105, 110, 118, 111, 107,
+ 101, 114, 0, 5, 14, 102, 117, 110, 99, 95, 54, 95, 105, 110, 118,
+ 111, 107, 101, 114, 0, 7, 14, 102, 117, 110, 99, 95, 56, 95, 105,
+ 110, 118, 11, 107, 101, 114, 0, 9, 7, 102, 117, 110, 99, 95, 49,
+ 49, 0, 11, 15, 102, 117, 110, 99, 95, 49, 49, 95, 105, 110, 118,
+ 111, 107, 101, 114, 0, 12, 15, 102, 117, 110, 99, 95, 49, 51, 95,
+ 105, 110, 118, 111, 107, 101, 114, 0, 14, 7, 102, 117, 110, 99, 95,
+ 49, 53, 0, 15, 15, 102, 117, 110, 99, 95, 49, 53, 95, 105, 110,
+ 118, 111, 107, 101, 114, 0, 16, 15, 102, 117, 110, 99, 95, 49, 55,
+ 95, 105, 110, 118, 111, 107, 101, 114, 0, 18, 7, 102, 117, 110, 99,
+ 95, 49, 57, 0, 19, 7, 102, 117, 110, 99, 95, 50, 48, 0, 20,
+ 20, 104, 97, 110, 103, 76, 105, 109, 105, 116, 73, 110, 105, 116, 105,
+ 97, 108, 105, 122, 101, 114, 0, 21, 9, 15, 1, 0, 65, 0, 11,
+ 9, 4, 6, 6, 8, 10, 11, 11, 15, 15, 10, 220, 18, 22, 113,
+ 0, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 3, 64, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 2, 127, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 65, 128, 128, 128, 4, 11, 4, 127, 65, 193, 255, 3, 5,
+ 2, 127, 3, 64, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 12, 1, 11, 0, 65, 0, 13, 1, 0, 11, 0,
+ 11, 11, 26, 12, 0, 11, 0, 11, 131, 3, 1, 1, 125, 35, 0,
+ 69, 4, 64, 65, 128, 128, 128, 2, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 2, 127, 2, 64, 66, 157, 228, 193, 147, 127, 3, 126, 35,
+ 0, 69, 4, 64, 65, 224, 196, 126, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 35, 0, 69, 4, 64, 65, 129, 128, 124, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 32, 3, 65, 105, 13, 2, 13, 0, 66, 128,
+ 128, 128, 128, 192, 0, 11, 2, 125, 35, 0, 69, 4, 64, 32, 3,
+ 15, 11, 35, 0, 65, 1, 107, 36, 0, 67, 0, 0, 80, 193, 32,
+ 2, 2, 127, 35, 0, 69, 4, 64, 65, 117, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 3, 11, 27, 34, 4, 67, 0, 0, 0, 0,
+ 32, 4, 32, 4, 91, 27, 11, 32, 3, 16, 1, 3, 127, 35, 0,
+ 69, 4, 64, 65, 168, 186, 126, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 35, 0, 69, 4, 64, 65, 128, 1, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 65, 255, 0, 32, 3, 69, 13, 2, 34, 3, 13, 0,
+ 32, 3, 11, 69, 13, 1, 32, 3, 69, 13, 1, 65, 220, 188, 126,
+ 13, 1, 34, 3, 4, 64, 2, 64, 2, 127, 35, 0, 69, 4, 64,
+ 65, 128, 128, 128, 128, 120, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 32, 3, 32, 3, 13, 0, 13, 3, 35, 0, 69, 4, 64, 32, 3,
+ 15, 11, 35, 0, 65, 1, 107, 36, 0, 12, 1, 11, 26, 3, 127,
+ 35, 0, 69, 4, 64, 32, 3, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 32, 3, 13, 0, 65, 1, 11, 26, 12, 2, 11, 35, 0, 69,
+ 4, 64, 65, 167, 127, 15, 11, 35, 0, 65, 1, 107, 36, 0, 35,
+ 0, 69, 4, 64, 65, 128, 192, 0, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 35, 0, 69, 4, 64, 32, 3, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 65, 147, 127, 12, 2, 5, 35, 0, 69, 4, 64, 65,
+ 129, 128, 128, 128, 120, 15, 11, 35, 0, 65, 1, 107, 36, 0, 11,
+ 11, 65, 255, 255, 125, 11, 11, 33, 0, 66, 252, 130, 221, 255, 15,
+ 66, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 67, 0, 0, 234,
+ 66, 65, 252, 224, 168, 179, 122, 16, 1, 26, 11, 178, 2, 1, 2,
+ 127, 35, 0, 69, 4, 64, 65, 120, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 2, 127, 35, 0, 69, 4, 64, 65, 0, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 2, 127, 35, 0, 69, 4, 64, 65, 0, 15,
+ 11, 35, 0, 65, 1, 107, 36, 0, 65, 128, 8, 11, 4, 127, 65,
+ 0, 5, 2, 127, 65, 0, 65, 129, 126, 69, 13, 2, 4, 64, 3,
+ 64, 35, 0, 69, 4, 64, 65, 159, 216, 137, 124, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 65, 0, 40, 2, 3, 26, 35, 0, 69, 4,
+ 64, 65, 222, 136, 126, 15, 11, 35, 0, 65, 1, 107, 36, 0, 3,
+ 64, 35, 0, 4, 64, 35, 0, 65, 1, 107, 36, 0, 12, 1, 5,
+ 65, 128, 8, 15, 11, 0, 11, 0, 11, 0, 5, 3, 64, 35, 0,
+ 69, 4, 64, 65, 0, 15, 11, 35, 0, 65, 1, 107, 36, 0, 2,
+ 127, 35, 0, 69, 4, 64, 65, 0, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 65, 0, 2, 127, 35, 0, 69, 4, 64, 65, 0, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 65,
+ 0, 15, 11, 35, 0, 65, 1, 107, 36, 0, 11, 65, 1, 254, 18,
+ 0, 22, 11, 69, 13, 0, 11, 13, 0, 35, 0, 69, 4, 64, 65,
+ 128, 124, 15, 11, 35, 0, 65, 1, 107, 36, 0, 3, 64, 35, 0,
+ 69, 4, 64, 65, 224, 216, 2, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 35, 0, 69, 4, 64, 65, 128, 128, 2, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 65, 190, 127, 12, 3, 11, 0, 11, 0, 11, 0,
+ 11, 11, 11, 11, 23, 0, 35, 0, 69, 4, 64, 32, 4, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 65, 0, 43, 3, 2, 11, 116, 0,
+ 65, 141, 176, 126, 66, 217, 236, 126, 66, 128, 1, 67, 0, 0, 0,
+ 79, 68, 0, 0, 0, 0, 0, 0, 80, 64, 65, 76, 67, 0, 0,
+ 128, 95, 16, 4, 26, 65, 32, 66, 129, 128, 128, 128, 120, 66, 230,
+ 212, 156, 252, 15, 67, 0, 0, 160, 64, 68, 0, 0, 0, 0, 0,
+ 0, 224, 67, 65, 127, 67, 0, 0, 128, 128, 16, 4, 26, 65, 255,
+ 166, 200, 177, 123, 66, 185, 127, 66, 128, 128, 128, 128, 8, 67, 0,
+ 0, 0, 93, 68, 0, 0, 0, 0, 0, 0, 96, 67, 65, 150, 224,
+ 126, 67, 0, 0, 0, 88, 16, 4, 26, 11, 111, 0, 35, 0, 69,
+ 4, 64, 65, 144, 194, 0, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 3, 64, 35, 0, 69, 4, 64, 65, 0, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 65, 124, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 35, 0, 69, 4, 64, 65, 111, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 3, 127, 35, 0, 69, 4, 64, 65,
+ 128, 128, 2, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 128, 128,
+ 126, 11, 69, 13, 0, 12, 1, 11, 0, 69, 0, 13, 0, 0, 11,
+ 0, 11, 14, 0, 16, 6, 26, 16, 6, 26, 16, 6, 26, 16, 6,
+ 26, 11, 34, 0, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0,
+ 0, 224, 67, 15, 11, 35, 0, 65, 1, 107, 36, 0, 68, 26, 192,
+ 255, 255, 255, 255, 255, 255, 11, 5, 0, 16, 8, 26, 11, 26, 0,
+ 35, 0, 69, 4, 64, 67, 0, 0, 0, 0, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 67, 0, 0, 128, 214, 11, 26, 0, 35, 0, 69,
+ 4, 64, 67, 0, 0, 0, 90, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 67, 0, 0, 44, 194, 11, 8, 0, 16, 11, 26, 16, 11, 26,
+ 11, 26, 0, 35, 0, 69, 4, 64, 67, 0, 0, 0, 197, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 67, 117, 227, 255, 255, 11, 38, 0,
+ 68, 129, 255, 255, 255, 255, 255, 255, 255, 16, 13, 26, 68, 0, 0,
+ 0, 0, 0, 0, 16, 65, 16, 13, 26, 68, 193, 255, 255, 255, 255,
+ 255, 255, 255, 16, 13, 26, 11, 30, 0, 35, 0, 69, 4, 64, 15,
+ 11, 35, 0, 65, 1, 107, 36, 0, 35, 0, 69, 4, 64, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 11, 6, 0, 16, 15, 16, 15, 11,
+ 16, 0, 35, 0, 69, 4, 64, 15, 11, 35, 0, 65, 1, 107, 36,
+ 0, 11, 8, 0, 16, 17, 16, 17, 16, 17, 11, 52, 0, 35, 0,
+ 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 0, 0, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 3, 124, 35, 0, 4, 124, 35, 0, 65,
+ 1, 107, 36, 0, 12, 1, 5, 68, 0, 0, 0, 0, 0, 128, 109,
+ 64, 11, 11, 11, 218, 7, 3, 4, 127, 1, 126, 2, 125, 35, 0,
+ 69, 4, 64, 68, 255, 255, 255, 255, 255, 255, 239, 255, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 2, 124, 3, 64, 35, 0, 69, 4, 64,
+ 68, 0, 0, 0, 0, 0, 0, 42, 192, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 2, 64, 3, 64, 35, 0, 69, 4, 64, 68, 0, 0,
+ 0, 0, 0, 0, 176, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 65, 128, 127, 34, 2, 4, 127, 32, 0, 5, 35, 0, 69, 4, 64,
+ 68, 0, 0, 192, 137, 207, 250, 239, 65, 15, 11, 35, 0, 65, 1,
+ 107, 36, 0, 3, 64, 35, 0, 69, 4, 64, 68, 0, 0, 0, 245,
+ 255, 255, 239, 65, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 134,
+ 82, 34, 0, 33, 3, 32, 1, 69, 13, 0, 11, 35, 0, 69, 4,
+ 64, 68, 0, 0, 0, 0, 0, 0, 144, 192, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 1, 69, 13, 2, 32, 4, 16, 3, 13, 1,
+ 65, 116, 33, 0, 12, 3, 11, 33, 2, 3, 127, 35, 0, 69, 4,
+ 64, 68, 77, 69, 29, 145, 255, 255, 255, 255, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 1, 13, 0, 32, 2, 34, 0, 34, 1, 11,
+ 13, 0, 11, 3, 64, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0,
+ 0, 0, 48, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0, 35, 0,
+ 69, 4, 64, 68, 0, 0, 0, 0, 0, 160, 102, 64, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 32, 1, 33, 2, 65, 7, 17, 0, 0,
+ 3, 127, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 240,
+ 63, 15, 11, 35, 0, 65, 1, 107, 36, 0, 2, 127, 35, 0, 69,
+ 4, 64, 68, 0, 0, 0, 0, 0, 128, 78, 192, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 66, 129, 128, 128, 128, 120, 66, 128, 128, 2,
+ 32, 0, 27, 33, 4, 65, 177, 152, 126, 11, 4, 64, 3, 64, 35,
+ 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 16, 195, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 16, 6, 65, 15, 113, 65, 130, 128,
+ 126, 254, 0, 2, 0, 4, 64, 32, 0, 32, 1, 32, 2, 27, 4,
+ 127, 65, 207, 230, 157, 153, 4, 34, 0, 5, 65, 140, 226, 132, 187,
+ 6, 11, 26, 5, 67, 151, 255, 255, 255, 33, 6, 11, 32, 2, 13,
+ 0, 66, 128, 128, 128, 128, 128, 1, 33, 4, 11, 11, 3, 64, 35,
+ 0, 69, 4, 64, 68, 0, 0, 0, 0, 32, 250, 239, 64, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 32, 6, 26, 3, 127, 35, 0, 69,
+ 4, 64, 68, 0, 0, 0, 0, 0, 0, 128, 67, 15, 11, 35, 0,
+ 65, 1, 107, 36, 0, 3, 127, 35, 0, 69, 4, 64, 68, 0, 0,
+ 0, 0, 0, 0, 77, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0,
+ 67, 80, 255, 55, 202, 33, 6, 32, 2, 69, 13, 0, 65, 110, 11,
+ 34, 3, 13, 4, 32, 2, 33, 0, 32, 3, 69, 13, 0, 65, 128,
+ 96, 11, 69, 13, 0, 32, 1, 4, 127, 2, 127, 35, 0, 69, 4,
+ 64, 68, 138, 255, 255, 255, 255, 255, 255, 255, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 35, 0, 69, 4, 64, 68, 215, 255, 255, 255, 255,
+ 255, 255, 255, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 185, 127,
+ 2, 127, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 224,
+ 195, 15, 11, 35, 0, 65, 1, 107, 36, 0, 65, 0, 11, 13, 0,
+ 4, 64, 68, 0, 0, 0, 0, 0, 0, 240, 66, 32, 3, 65, 4,
+ 17, 3, 0, 26, 5, 32, 1, 69, 13, 3, 11, 32, 2, 34, 1,
+ 11, 5, 65, 129, 1, 34, 1, 34, 0, 11, 69, 13, 2, 11, 32,
+ 1, 65, 15, 113, 65, 128, 128, 32, 34, 1, 254, 0, 2, 0, 69,
+ 13, 0, 65, 128, 128, 32, 65, 129, 128, 124, 32, 0, 27, 11, 34,
+ 0, 13, 0, 65, 4, 66, 217, 208, 176, 127, 254, 24, 3, 0, 12,
+ 0, 11, 0, 11, 3, 127, 35, 0, 69, 4, 64, 68, 0, 0, 0,
+ 0, 0, 128, 84, 64, 15, 11, 35, 0, 65, 1, 107, 36, 0, 35,
+ 0, 69, 4, 64, 68, 177, 255, 255, 255, 255, 255, 255, 255, 15, 11,
+ 35, 0, 65, 1, 107, 36, 0, 32, 2, 13, 0, 35, 0, 69, 4,
+ 64, 68, 0, 0, 0, 0, 0, 0, 64, 195, 15, 11, 35, 0, 65,
+ 1, 107, 36, 0, 32, 0, 69, 13, 0, 35, 0, 69, 4, 64, 68,
+ 0, 0, 0, 0, 0, 0, 96, 64, 15, 11, 35, 0, 65, 1, 107,
+ 36, 0, 3, 124, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0,
+ 0, 16, 184, 15, 11, 35, 0, 65, 1, 107, 36, 0, 32, 3, 13,
+ 0, 68, 0, 0, 0, 0, 0, 0, 224, 195, 11, 32, 0, 13, 2,
+ 26, 35, 0, 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 192, 66,
+ 15, 11, 35, 0, 65, 1, 107, 36, 0, 32, 1, 13, 0, 35, 0,
+ 69, 4, 64, 68, 0, 0, 0, 0, 0, 0, 240, 191, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 65, 128, 126, 11, 13, 0, 11, 35, 0,
+ 69, 4, 64, 68, 136, 255, 255, 255, 255, 255, 255, 255, 15, 11, 35,
+ 0, 65, 1, 107, 36, 0, 68, 0, 0, 0, 0, 0, 0, 0, 192,
+ 11, 11, 6, 0, 65, 10, 36, 0, 11, 11, 15, 1, 0, 65, 0,
+ 11, 9, 109, 0, 0, 0, 0, 0, 0, 0, 38
+]));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1197393.js b/deps/v8/test/mjsunit/regress/wasm/regress-1197393.js
new file mode 100644
index 0000000000..364de33c80
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1197393.js
@@ -0,0 +1,35 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addType(makeSig([kWasmI32, kWasmI64, kWasmF64, kWasmI64], []));
+builder.addType(makeSig([kWasmF64], [kWasmF64]));
+// Generate function 1 (out of 2).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: v_ildl
+// body:
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprLocalGet, 0x00, // local.get
+kExprI32Const, 0x82, 0x7f, // i32.const
+kExprI32DivS, // i32.div_s
+kExprSelect, // select
+kExprCallFunction, 0x01, // call function #1: d_d
+kExprDrop, // drop
+kExprEnd, // end @29
+]);
+// Generate function 2 (out of 2).
+builder.addFunction(undefined, 1 /* sig */)
+ .addBodyWithEnd([
+// signature: d_d
+// body:
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprEnd, // end @10
+]);
+const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1201340.js b/deps/v8/test/mjsunit/regress/wasm/regress-1201340.js
new file mode 100644
index 0000000000..82910df155
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1201340.js
@@ -0,0 +1,13 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+builder = new WasmModuleBuilder();
+builder.addImportedMemory();
+let leb = [0x80, 0x80, 0x80, 0x80, 0x0c];
+builder.addFunction('store', makeSig([kWasmI32, kWasmI32], []))
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, ...leb])
+ .exportFunc();
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-5800.js b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
index 77c436119c..75605e3612 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-5800.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_v)
.addBody([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprI64Const, 0,
// 0x80 ... 0x10 is the LEB encoding of 0x100000000. This is chosen so
// that the 64-bit constant has a non-zero top half. In this bug, the
@@ -34,7 +34,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_v)
.addBody([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprI64Const, 0,
// 0x80 ... 0x10 is the LEB encoding of 0x100000000. This is chosen so
// that the 64-bit constant has a non-zero top half. In this bug, the
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
index 671da730fb..9bda7fcc70 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
@@ -16,7 +16,7 @@ builder.addFunction('main', kSig_i_i).addBody([
...wasmI32Const(0x41),
kExprLocalSet, 0,
// Enter loop, such that values are spilled to the stack.
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprEnd,
// Reload value. This must be loaded as 32 bit value.
kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
index b5e4e2e2b6..92579bc37b 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
@@ -18,7 +18,7 @@ builder.addFunction(undefined, kSig_i_iii)
kExprLocalSet, 1, // set_local 1
...wasmI32Const(16), // i32.const 0x1
kExprLocalSet, 2, // set_local 2
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
kExprEnd, // end
kExprLocalGet, 0, // get_local 0
kExprLocalGet, 1, // get_local 1
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
index 008ab16159..776ca522c2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-782280.js
@@ -21,7 +21,7 @@ builder.addFunction('test', kSig_i_iii)
kExprI32Const, 0, // 0, 0
kExprI32Const, 1, // 0, 0, 1
kExprI32Add, // 0, 0 + 1 -> 1
- kExprBlock, kWasmStmt, // 0, 1
+ kExprBlock, kWasmVoid, // 0, 1
kExprBr, 0, // 0, 1
kExprEnd, // 0, 1
kExprI32Add, // 0 + 1 -> 1
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
index 3daeff9e15..74f11ca81e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-791810.js
@@ -8,10 +8,10 @@ const builder = new WasmModuleBuilder();
builder.addFunction('test', kSig_i_i)
.addBody([
kExprLocalGet, 0x00, // get_local 0
- kExprBlock, kWasmStmt, // block
+ kExprBlock, kWasmVoid, // block
kExprBr, 0x00, // br depth=0
kExprEnd, // end
- kExprBlock, kWasmStmt, // block
+ kExprBlock, kWasmVoid, // block
kExprBr, 0x00, // br depth=0
kExprEnd, // end
kExprBr, 0x00, // br depth=0
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
index ac2b34019e..db93c83fde 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-793551.js
@@ -10,7 +10,7 @@ builder.addFunction('test', kSig_i_i)
// body:
kExprLocalGet, 0, // get_local 0
kExprLocalGet, 0, // get_local 0
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
kExprBr, 0, // br depth=0
kExprEnd, // end
kExprUnreachable, // unreachable
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-842501.js b/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
index d54507cc59..8445b48906 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-842501.js
@@ -23,7 +23,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
kExprF32Eq,
kExprI32LoadMem, 0x01, 0xef, 0xec, 0x95, 0x93, 0x07,
kExprI32Add,
- kExprIf, kWasmStmt, // @30
+ kExprIf, kWasmVoid, // @30
kExprEnd, // @32
kExprI32Const, 0xc9, 0x93, 0xdf, 0xcc, 0x7c,
kExprEnd, // @39
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
index a39d7e8836..db027a226d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8533.js
@@ -21,7 +21,7 @@ const sync_address = 12;
// Calling the imported function sets the thread-in-wasm flag of the
// main thread.
kExprCallFunction, import_id, // --
- kExprLoop, kWasmStmt, // --
+ kExprLoop, kWasmVoid, // --
kExprI32Const, sync_address, // --
kExprI32LoadMem, 0, 0, // --
kExprI32Eqz,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
index bf170c5d63..040c0e36c8 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-854050.js
@@ -10,7 +10,7 @@ builder.addFunction(undefined, makeSig([kWasmI32, kWasmF32], []))
.addBody([
kExprLocalGet, 0, // get_local
kExprI32Const, 0, // i32.const 0
- kExprIf, kWasmStmt, // if
+ kExprIf, kWasmVoid, // if
kExprUnreachable, // unreachable
kExprEnd, // end if
kExprLocalGet, 4, // get_local
@@ -21,7 +21,7 @@ builder.addFunction(undefined, makeSig([kWasmI32, kWasmF32], []))
kExprLocalTee, 2, // tee_local
kExprLocalTee, 8, // tee_local
kExprDrop, // drop
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
kExprEnd, // end loop
]);
builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
index 21f32180bd..00f7825a24 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-905815.js
@@ -15,7 +15,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
builder.addFunction(undefined, 1 /* sig */)
.addLocals(kWasmI32, 65)
.addBodyWithEnd([
- kExprLoop, kWasmStmt, // @3
+ kExprLoop, kWasmVoid, // @3
kSimdPrefix,
kExprF32x4Min,
kExprI64UConvertI32,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
index e9d4026308..630929a0bd 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-913804.js
@@ -6,9 +6,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_v_v).addBody([
- kExprLoop, kWasmStmt, // loop
+ kExprLoop, kWasmVoid, // loop
/**/ kExprBr, 0x01, // br depth=1
- /**/ kExprBlock, kWasmStmt, // block
+ /**/ kExprBlock, kWasmVoid, // block
/**/ /**/ kExprBr, 0x02, // br depth=2
/**/ /**/ kExprEnd, // end [block]
/**/ kExprEnd // end [loop]
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
index 4b9528ccf6..74fb87133c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917412.js
@@ -16,7 +16,7 @@ kExprElse,
kExprEnd,
kExprLocalTee, 0,
kExprLocalGet, 0,
-kExprLoop, kWasmStmt,
+kExprLoop, kWasmVoid,
kExprI64Const, 0x80, 0x80, 0x80, 0x70,
kExprLocalSet, 0x01,
kExprI32Const, 0x00,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
index 1e5c1a4488..1fab062b85 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-917588b.js
@@ -16,8 +16,8 @@ builder.addFunction(undefined, sig1)
// signature: f_lilfl
kExprBlock, kWasmF32, // @1 f32
kExprI32Const, 0x00,
- kExprIf, kWasmStmt, // @5
- kExprLoop, kWasmStmt, // @7
+ kExprIf, kWasmVoid, // @5
+ kExprLoop, kWasmVoid, // @7
kExprBlock, kWasmI32, // @9 i32
kExprF32Const, 0x00, 0x00, 0x80, 0xc1,
kExprF32Const, 0x00, 0x00, 0x80, 0x45,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
index 1cc4b675c2..ab13941b20 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-919533.js
@@ -12,7 +12,7 @@ builder.addFunction(undefined, kSig_i_i)
kExprLocalGet, 0,
// Stack now contains two copies of the first param register.
// Start a loop to create a merge point (values still in registers).
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
// The call spills all values.
kExprCallFunction, 0,
// Break to the loop. Now the spilled values need to be loaded back *into
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
index aabe001392..7df7fb47d2 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-922933.js
@@ -9,21 +9,21 @@ const sig = builder.addType(makeSig([kWasmI64], [kWasmI64]));
builder.addFunction(undefined, sig)
.addLocals(kWasmI32, 14).addLocals(kWasmI64, 17).addLocals(kWasmF32, 14)
.addBody([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprBr, 0x00,
kExprEnd,
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprI32Const, 0x00,
kExprLocalSet, 0x09,
kExprI32Const, 0x00,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprI32Const, 0x00,
kExprLocalSet, 0x0a,
kExprBr, 0x00,
kExprEnd,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0x00,
kExprLocalSet, 0x12,
kExprBr, 0x00,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
index c77845af76..c4c8b30987 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-924843.js
@@ -9,8 +9,8 @@ const sig = builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]))
builder.addFunction(undefined, sig)
.addBody([
kExprLocalGet, 2,
- kExprIf, kWasmStmt,
- kExprBlock, kWasmStmt
+ kExprIf, kWasmVoid,
+ kExprBlock, kWasmVoid
]);
builder.addExport('main', 0);
assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
index 07081087fa..fce3727cd3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
@@ -27,12 +27,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addMemory(12, 12, false);
builder.addFunction("foo", kSig_v_iii)
.addBody([].concat([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0x2,
kExprI32Const, 0x01,
kExprI32And,
// Generate a test branch (which has 32k limited reach).
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 0x0,
kExprI32Const, 0x01,
kExprI32And,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9759.js b/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
index 05bb26f7ff..ca0604eebf 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9759.js
@@ -15,7 +15,7 @@ const NUM_CASES = 0xfffd;
let cases = new Array(NUM_CASES).fill(0);
builder.addFunction('main', kSig_v_i)
.addBody([].concat([
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0,
kExprBrTable], wasmSignedLeb(NUM_CASES),
cases, [0,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9832.js b/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
index 05b63b0984..891139f50d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9832.js
@@ -16,7 +16,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
]).exportFunc();
builder.addFunction("main", kSig_i_i)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprLocalGet, 0,
kExprCallFunction, f.index,
kExprCallFunction, f.index,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js
new file mode 100644
index 0000000000..f40ead82d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-crbug-1168612.js
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inline-js-wasm-calls
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function getMain() {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_v_v)
+ .addBody([kExprUnreachable])
+ .exportAs("main");
+ return builder.instantiate().exports.main;
+}
+let foo = getMain();
+
+function loop() {
+ for (let i = 0; i < 2; i++) {
+ try {
+ foo();
+ } catch (e) {
+ if (i) {
+ throw e;
+ }
+ }
+ }
+}
+%PrepareFunctionForOptimization(loop);
+assertThrows(loop, WebAssembly.RuntimeError, "unreachable");
+%OptimizeFunctionOnNextCall(loop);
+assertThrows(loop, WebAssembly.RuntimeError, "unreachable");
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress1192313.js b/deps/v8/test/mjsunit/regress/wasm/regress1192313.js
new file mode 100644
index 0000000000..40307a3fa4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress1192313.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-eh --experimental-wasm-threads
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function Regress1192313() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(16, 32);
+ builder.addFunction('f', kSig_i_i)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kExprCallFunction, 0,
+ kAtomicPrefix, kExprI32AtomicAnd8U,
+ 0x00, 0xba, 0xe2, 0x81, 0xd6, 0x0b,
+ kExprCatchAll,
+ kExprTry, kWasmI32,
+ kExprI32Const, 0,
+ kExprI32Const, 0,
+ kAtomicPrefix, kExprI32AtomicAnd8U,
+ 0x00, 0x85, 0x97, 0xc4, 0x5f,
+ kExprDelegate, 1,
+ kExprEnd]).exportFunc();
+ let instance = builder.instantiate();
+})();
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
index de53699570..e186dbb7f5 100644
--- a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -25,7 +25,7 @@ assertFalse(isNeverOptimize());
if (i == 1) {
// f must be interpreted code.
- assertTrue(isInterpreted(f));
+ assertTrue(isUnoptimized(f));
// Run twice (i = 0, 1), then tier-up.
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/tools/foozzie.js b/deps/v8/test/mjsunit/tools/foozzie.js
index 147f637060..759df0e983 100644
--- a/deps/v8/test/mjsunit/tools/foozzie.js
+++ b/deps/v8/test/mjsunit/tools/foozzie.js
@@ -119,3 +119,12 @@ assertEquals(unoptimized, callPow(6996));
let then_called = false;
Atomics.waitAsync().value.then(() => {then_called = true;});
assertEquals(true, then_called);
+
+// Test .caller access is neutered.
+function callee() {
+ assertEquals(null, callee.caller);
+}
+function caller() {
+ callee();
+}
+caller();
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
index 19a9a0ccfb..e006ecdf0f 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -264,7 +264,7 @@ function generateFunctionBodyForSequence(sequence) {
kExprLocalGet, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
0,
// Spin until zero.
- kExprLoop, kWasmStmt, kExprLocalGet, 2, kAtomicPrefix,
+ kExprLoop, kWasmVoid, kExprLocalGet, 2, kAtomicPrefix,
kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
kExprEnd);
}
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 6d37ba5548..3df938af7f 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -399,7 +399,7 @@ function TestStore(func, buffer, value, size) {
builder.addImportedMemory("m", "imported_mem", 16, 128, "shared");
builder.addFunction("main", kSig_i_v)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprI32Const, 16,
kExprI32Const, 20,
kAtomicPrefix,
@@ -442,7 +442,7 @@ function CmpExchgLoop(opcode, alignment) {
builder.addFunction("main", makeSig([kWasmI32], []))
.addLocals(kWasmI64, 2)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprLocalGet, 1,
kExprLocalGet, 2,
diff --git a/deps/v8/test/mjsunit/wasm/atomics64-stress.js b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
index 99e9016f1a..472bba81c0 100644
--- a/deps/v8/test/mjsunit/wasm/atomics64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
@@ -302,7 +302,7 @@ function generateFunctionBodyForSequence(sequence) {
kExprLocalGet, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
0,
// Spin until zero.
- kExprLoop, kWasmStmt, kExprLocalGet, 2, kAtomicPrefix,
+ kExprLoop, kWasmVoid, kExprLocalGet, 2, kAtomicPrefix,
kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
kExprEnd);
}
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
index 97ed71b9e1..7d979b65ec 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange-stress.js
@@ -43,20 +43,20 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
kExprI32Mul,
kExprLocalSet, kArgSeqenceLength,
// Outer block so we have something to jump for return.
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Set counter to 0.
kExprI32Const, 0,
kExprLocalSet, kLocalCurrentOffset,
// Outer loop until maxcount.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Find the next value to wait for.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Check end of sequence.
kExprLocalGet, kLocalCurrentOffset,
kExprLocalGet, kArgSeqenceLength,
kExprI32Eq,
kExprBrIf, 2, // return
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Load next value.
kExprLocalGet, kArgSequencePtr,
kExprLocalGet, kLocalCurrentOffset,
@@ -95,7 +95,7 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
loadMemOpcode, 0, 0,
kExprLocalSet, kLocalNextValue,
// Hammer on memory until value found.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Load address.
kExprLocalGet, kArgMemoryCell,
// Load expected value.
diff --git a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
index be219f3a07..ae266d1139 100644
--- a/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/compare-exchange64-stress.js
@@ -46,20 +46,20 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
kExprI32Mul,
kExprLocalSet, kArgSeqenceLength,
// Outer block so we have something to jump for return.
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Set counter to 0.
kExprI32Const, 0,
kExprLocalSet, kLocalCurrentOffset,
// Outer loop until maxcount.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Find the next value to wait for.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Check end of sequence.
kExprLocalGet, kLocalCurrentOffset,
kExprLocalGet, kArgSeqenceLength,
kExprI32Eq,
kExprBrIf, 2, // return
- ...[kExprBlock, kWasmStmt,
+ ...[kExprBlock, kWasmVoid,
// Load next value.
kExprLocalGet, kArgSequencePtr,
kExprLocalGet, kLocalCurrentOffset,
@@ -100,7 +100,7 @@ function makeWorkerCodeForOpcode(compareExchangeOpcode, size, functionName,
loadMemOpcode, 0, 0,
kExprLocalSet, kLocalNextValue,
// Hammer on memory until value found.
- ...[kExprLoop, kWasmStmt,
+ ...[kExprLoop, kWasmVoid,
// Load address.
kExprLocalGet, kArgMemoryCell,
// Load expected value.
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
index 59c1a9ed3a..1d30b4e5cf 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
@@ -74,9 +74,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
{mod: {pow: Math.pow}})
.then(assertUnreachable,
- error => assertEquals("WebAssembly.instantiateStreaming(): call[1] " +
+ error => assertEquals("WebAssembly.instantiateStreaming(): call[0] " +
"expected type f32, found local.get of type " +
- "i32 @+94",
+ "i32 @+92",
error.message)));
})();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index 63568fe657..d527a72ca6 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -359,13 +359,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_i)
- .addBody([kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
+ .addBody([kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0,
kExprBrTable, 6, 0, 1, 2, 3, 4, 5, 6,
kExprEnd,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
index ec689791ca..e7dc9e4f0b 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-rethrow.js
@@ -14,7 +14,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprCatch, except,
kExprRethrow, 0,
@@ -27,7 +27,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprCatch, except,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 1,
kExprEnd,
kExprI32Const, 23,
@@ -47,7 +47,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("rethrow0", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprCatchAll,
kExprRethrow, 0,
@@ -60,7 +60,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprCatchAll,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 1,
kExprEnd,
kExprI32Const, 23,
@@ -91,13 +91,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 1,
kExprEnd,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 2,
kExprEnd,
kExprI32Const, 23,
@@ -125,7 +125,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprRethrow, 2,
kExprEnd,
kExprI32Const, 42,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-shared.js b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
index 8b3defb9af..d2d595dc73 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-shared.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-shared.js
@@ -30,7 +30,7 @@ function NewExportedException() {
]).exportFunc();
builder.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except,
kExprEnd,
@@ -62,7 +62,7 @@ function NewExportedException() {
]).exportFunc();
builder.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except,
kExprEnd,
@@ -97,7 +97,7 @@ function NewExportedException() {
]).exportFunc();
builder.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except1,
kExprEnd,
@@ -136,7 +136,7 @@ function NewExportedException() {
let except = builder2.addImportedException("m", "ex", kSig_v_v);
builder2.addFunction("catch", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, fun,
kExprCatch, except,
kExprEnd,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-simd.js b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
index 9082a7f49e..ae2d8ee40c 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-simd.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-simd.js
@@ -40,7 +40,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprEnd,
kExprLocalGet, 0,
kSimdPrefix, kExprI32x4Eq,
- kSimdPrefix, kExprV8x16AllTrue,
+ kSimdPrefix, kExprI8x16AllTrue,
])
.exportFunc();
var instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index d7539119ab..88e92fa3bb 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -17,7 +17,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Ne,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except,
kExprEnd,
kExprI32Const, 1
@@ -36,7 +36,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction("catch_empty_try", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCatch, except,
kExprEnd,
]).exportFunc();
@@ -55,7 +55,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except,
kExprEnd,
kExprI32Const, 42,
@@ -74,14 +74,14 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let builder = new WasmModuleBuilder();
builder.addFunction('unreachable_in_try', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprUnreachable,
kExprCatchAll,
kExprEnd
]).exportFunc();
builder.addFunction('unreachable_in_try_unwind', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprUnreachable,
kExprUnwind,
kExprEnd
@@ -229,13 +229,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction('test', kSig_v_v)
.addBody([
// Calling "throw" directly should produce the expected exception.
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, throw_fn.index,
kExprCatch, except,
kExprEnd,
// Calling through JS produces a wrapped exceptions which does not match
// the catch.
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, imp,
kExprCatch, except,
kExprEnd
@@ -277,13 +277,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except2,
kExprElse,
kExprThrow, except3,
@@ -317,13 +317,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprTry, kWasmI32,
kExprLocalGet, 0,
kExprI32Eqz,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except2,
kExprElse,
kExprThrow, except3,
@@ -440,6 +440,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_l);
builder.addFunction("throw_catch_param", kSig_i_i)
+ .addLocals(kWasmI64, 1)
.addBody([
kExprLocalGet, 0,
kExprI64UConvertI32,
@@ -457,7 +458,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprI32Const, 0,
kExprEnd,
kExprEnd,
- ]).addLocals(kWasmI64, 1).exportFunc();
+ ]).exportFunc();
let instance = builder.instantiate();
assertEquals(1, instance.exports.throw_catch_param(5));
@@ -608,7 +609,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Ne,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 0,
kExprThrow, except,
kExprUnreachable,
@@ -663,6 +664,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
// p == 2 -> path == 298
// p == 3 -> path == 338
// else -> path == 146
+ .addLocals(kWasmI32, 1)
.addBody([
kExprTry, kWasmI32,
kExprTry, kWasmI32,
@@ -670,7 +672,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprI32Const, 1,
kExprThrow, except,
kExprUnreachable,
@@ -686,7 +688,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 1,
kExprI32Const, 8,
kExprI32Ior,
@@ -705,7 +707,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 3,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 1,
kExprI32Const, /*64=*/ 192, 0,
kExprI32Ior,
@@ -719,7 +721,6 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprI32Ior,
kExprEnd,
])
- .addLocals(kWasmI32, 1)
.exportFunc();
// Scenario 2: Catches an exception raised from the direct callee.
@@ -765,7 +766,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("string_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowString,
kExprCatch, except,
kExprUnreachable,
@@ -775,7 +776,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("fp_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowFP,
kExprCatch, except,
kExprUnreachable,
@@ -785,7 +786,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("large_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowLarge,
kExprCatch, except,
kExprUnreachable,
@@ -795,7 +796,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("undefined_from_js", kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprCallFunction, kJSThrowUndefined,
kExprCatch, except,
kExprUnreachable,
@@ -865,7 +866,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let throw_if = builder.addFunction('throw', kSig_v_i)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except,
kExprEnd]).exportFunc();
builder.addFunction('test', kSig_i_i)
@@ -923,9 +924,9 @@ load("test/mjsunit/wasm/exceptions-utils.js");
// 2 -> throw except2
let throw_fn = builder.addFunction('throw', kSig_v_i)
.addBody([
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
- kExprBlock, kWasmStmt,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
+ kExprBlock, kWasmVoid,
kExprLocalGet, 0,
kExprBrTable, 2, 0, 1, 2,
kExprEnd,
@@ -992,7 +993,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction('test', kSig_i_v)
.addBody([
kExprTry, kWasmI32,
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
kExprThrow, except1,
kExprDelegate, 0,
kExprI32Const, 1,
@@ -1012,8 +1013,8 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except = builder.addException(kSig_v_v);
builder.addFunction('test', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprDelegate, 1,
kExprCatchAll,
@@ -1021,8 +1022,8 @@ load("test/mjsunit/wasm/exceptions-utils.js");
]).exportFunc();
builder.addFunction('test_unwind', kSig_v_v)
.addBody([
- kExprTry, kWasmStmt,
- kExprTry, kWasmStmt,
+ kExprTry, kWasmVoid,
+ kExprTry, kWasmVoid,
kExprThrow, except,
kExprDelegate, 1,
kExprUnwind,
@@ -1032,3 +1033,61 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertTraps(WebAssembly.RuntimeError, () => instance.exports.test());
assertTraps(WebAssembly.RuntimeError, () => instance.exports.test_unwind());
})();
+
+(function TestThrowBeforeUnreachable() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction('throw_before_unreachable', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprUnreachable,
+ kExprCatchAll,
+ kExprI32Const, 42,
+ kExprEnd,
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+ assertEquals(42, instance.exports.throw_before_unreachable());
+})();
+
+(function TestUnreachableInCatchAll() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction('throw_before_unreachable', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprCatchAll,
+ kExprUnreachable,
+ kExprI32Const, 42,
+ kExprEnd,
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+})();
+
+(function TestThrowWithLocal() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_v);
+ builder.addFunction('throw_with_local', kSig_i_v)
+ .addLocals(kWasmI32, 4)
+ .addBody([
+ kExprI32Const, 42,
+ kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0,
+ kExprTry, kWasmF32,
+ kExprThrow, except,
+ kExprCatchAll,
+ kExprF32Const, 0, 0, 0, 0,
+ kExprEnd,
+ kExprDrop, // Drop the f32.
+ kExprDrop, // Drop the f64.
+ // Leave the '42' on the stack.
+ ]).exportFunc();
+
+ let instance = builder.instantiate();
+ assertEquals(42, instance.exports.throw_with_local());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/externref.js b/deps/v8/test/mjsunit/wasm/externref.js
index a954f273ae..ed63ab5886 100644
--- a/deps/v8/test/mjsunit/wasm/externref.js
+++ b/deps/v8/test/mjsunit/wasm/externref.js
@@ -333,3 +333,28 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
instance.exports.main({hello: 4}, 5, {world: 6}, null, {bar: 7});
})();
+
+(function MultiReturnRefTest() {
+ print("MultiReturnTest");
+ let builder = new WasmModuleBuilder();
+ let sig = makeSig([kWasmExternRef],
+ [kWasmExternRef, kWasmExternRef, kWasmExternRef, kWasmExternRef]);
+
+ builder.addFunction("callee", sig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ kExprLocalGet, 0,
+ ]);
+ builder.addFunction("main", sig)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprCallFunction, 0
+ ])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(null), [null, null, null, null]);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/globals.js b/deps/v8/test/mjsunit/wasm/globals.js
index ba7bef301c..8a9bb2517e 100644
--- a/deps/v8/test/mjsunit/wasm/globals.js
+++ b/deps/v8/test/mjsunit/wasm/globals.js
@@ -183,7 +183,7 @@ TestGlobalIndexSpace(kWasmF64, 12345.678);
builder.addFunction("set", kSig_v_ii)
.addBody([
kExprLocalGet, 0,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprLocalGet, 1,
kExprGlobalSet, g.index,
kExprElse,
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
index 8babc66b75..9c9e881fab 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-branch.js
@@ -33,7 +33,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -60,7 +60,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -95,7 +95,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, index, // put index on stack
kExprI32Const, newValue, // put the value on stack
kExprI32StoreMem, 0, 0, // store
@@ -128,7 +128,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -160,7 +160,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -192,7 +192,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPages, // put deltaPages on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
@@ -227,7 +227,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprLocalGet, 1, // get index parameter
kExprI32Const, value, // put the value on stack
kExprI32StoreMem, 0, 0, // store
@@ -264,7 +264,7 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
kExprLocalGet, 0, // get condition parameter
- kExprIf, kWasmStmt, // if it's 1 then enter if
+ kExprIf, kWasmVoid, // if it's 1 then enter if
kExprI32Const, deltaPagesIf, // put deltaPagesIf on stack
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
index 660ec08e90..29ece1cba2 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-call.js
@@ -124,9 +124,9 @@ print('=== grow_memory in direct calls ===');
builder.addFunction('main', kSig_i_ii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprLocalGet, 1, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
@@ -174,9 +174,9 @@ print('=== grow_memory in direct calls ===');
builder.addFunction('main', kSig_i_iii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprLocalGet, 1, // get number of new pages
kExprCallFunction, kGrowFunction, // call the grow function
@@ -338,9 +338,9 @@ print('\n=== grow_memory in indirect calls ===');
builder.addFunction('main', kSig_i_iii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 1, // -
- kExprIf, kWasmStmt, // if <param1> != 0
+ kExprIf, kWasmVoid, // if <param1> != 0
// Grow memory.
kExprLocalGet, 2, // get number of new pages
kExprLocalGet, 0, // get index of the function
@@ -393,9 +393,9 @@ print('\n=== grow_memory in indirect calls ===');
'main', makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32], [kWasmI32]))
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 1, // -
- kExprIf, kWasmStmt, // if <param1> != 0
+ kExprIf, kWasmVoid, // if <param1> != 0
// Grow memory.
kExprLocalGet, 2, // get number of new pages
kExprLocalGet, 0, // get index of the function
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
index 143b555b17..eb99902c14 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory-in-loop.js
@@ -30,9 +30,9 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_i)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -82,9 +82,9 @@ function generateBuilder() {
kExprI32Const, deltaPagesOut, // -
kExprMemoryGrow, kMemoryZero, // grow memory
kExprDrop, // drop the result of grow
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -131,9 +131,9 @@ function generateBuilder() {
builder.addFunction('main', kSig_i_ii)
.addBody([
// clang-format off
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPages, // -
kExprMemoryGrow, kMemoryZero, // grow memory
@@ -202,9 +202,9 @@ function generateBuilder() {
kExprI32Add, // increase value on stack
kExprI32StoreMem, 0, 0, // store new value
// Start loop.
- kExprLoop, kWasmStmt, // while
+ kExprLoop, kWasmVoid, // while
kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
+ kExprIf, kWasmVoid, // if <param0> != 0
// Grow memory.
kExprI32Const, deltaPagesIn, // -
kExprMemoryGrow, kMemoryZero, // grow memory
diff --git a/deps/v8/test/mjsunit/wasm/loop-rotation.js b/deps/v8/test/mjsunit/wasm/loop-rotation.js
index 7805f5ccf5..538bdb0bd0 100644
--- a/deps/v8/test/mjsunit/wasm/loop-rotation.js
+++ b/deps/v8/test/mjsunit/wasm/loop-rotation.js
@@ -11,7 +11,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_v_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
@@ -32,7 +32,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_v_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
@@ -55,7 +55,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addMemory(1, undefined, false);
builder.addFunction("main", kSig_v_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Sub,
diff --git a/deps/v8/test/mjsunit/wasm/loop-unrolling.js b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
index b0e125413f..43852dec26 100644
--- a/deps/v8/test/mjsunit/wasm/loop-unrolling.js
+++ b/deps/v8/test/mjsunit/wasm/loop-unrolling.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-typed-funcref --experimental-wasm-eh
-// Flags: --wasm-loop-unrolling
+// Flags: --wasm-loop-unrolling --experimental-wasm-return-call
// Needed for exceptions-utils.js.
// Flags: --allow-natives-syntax
@@ -17,10 +17,10 @@ load("test/mjsunit/wasm/exceptions-utils.js");
builder.addFunction("main", kSig_i_i)
.addBody([
...wasmI32Const(1),
- kExprLet, kWasmStmt, 1, 1, kWasmI32,
- kExprLoop, kWasmStmt,
+ kExprLet, kWasmVoid, 1, 1, kWasmI32,
+ kExprLoop, kWasmVoid,
...wasmI32Const(10),
- kExprLet, kWasmStmt, 1, 1, kWasmI32,
+ kExprLet, kWasmVoid, 1, 1, kWasmI32,
kExprLocalGet, 0,
kExprLocalGet, 1,
kExprI32Sub,
@@ -38,6 +38,33 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertEquals(instance.exports.main(100), 109);
})();
+// Test the interaction between tail calls and loop unrolling.
+(function TailCallTest() {
+ let builder = new WasmModuleBuilder();
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0]);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprIf, kWasmVoid,
+ kExprLocalGet, 0,
+ kExprReturnCall, callee.index,
+ kExprElse,
+ kExprBr, 1,
+ kExprEnd,
+ kExprEnd,
+ kExprUnreachable
+ ])
+ .exportAs("main");
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ let instance = new WebAssembly.Instance(module);
+ assertEquals(instance.exports.main(1), 1);
+})();
+
// Test the interaction between the eh proposal and loop unrolling.
(function TestRethrowNested() {
@@ -59,16 +86,16 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 0,
kExprI32Eq,
- kExprIf, kWasmStmt,
- kExprLoop, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprLoop, kWasmVoid,
kExprRethrow, 2,
kExprEnd,
kExprEnd,
kExprLocalGet, 0,
kExprI32Const, 1,
kExprI32Eq,
- kExprIf, kWasmStmt,
- kExprLoop, kWasmStmt,
+ kExprIf, kWasmVoid,
+ kExprLoop, kWasmVoid,
kExprRethrow, 3,
kExprEnd,
kExprEnd,
@@ -92,11 +119,11 @@ load("test/mjsunit/wasm/exceptions-utils.js");
let except1 = builder.addException(kSig_v_v);
builder.addFunction("throw", kSig_i_i)
.addBody([
- kExprLoop, kWasmStmt,
+ kExprLoop, kWasmVoid,
kExprLocalGet, 0,
kExprI32Const, 10,
kExprI32GtS,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
@@ -125,7 +152,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
kExprLocalGet, 0,
kExprI32Const, 10,
kExprI32GtS,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprThrow, except1,
kExprElse,
kExprLocalGet, 0,
diff --git a/deps/v8/test/mjsunit/wasm/memory64.js b/deps/v8/test/mjsunit/wasm/memory64.js
index e764635846..5376ba87db 100644
--- a/deps/v8/test/mjsunit/wasm/memory64.js
+++ b/deps/v8/test/mjsunit/wasm/memory64.js
@@ -81,3 +81,28 @@ function BasicMemory64Tests(num_pages) {
// let num_pages = 5 * 1024 * 1024 * 1024 / kPageSize;
// BasicMemory64Tests(num_pages);
//})();
+
+(function TestGrow64() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addMemory64(1, 10, false);
+
+ builder.addFunction('grow', makeSig([kWasmI64], [kWasmI64]))
+ .addBody([
+ kExprLocalGet, 0, // local.get 0
+ kExprMemoryGrow, 0, // memory.grow 0
+ ])
+ .exportFunc();
+
+ let instance = builder.instantiate();
+
+ assertEquals(1n, instance.exports.grow(2n));
+ assertEquals(3n, instance.exports.grow(1n));
+ assertEquals(-1n, instance.exports.grow(-1n));
+ assertEquals(-1n, instance.exports.grow(1n << 31n));
+ assertEquals(-1n, instance.exports.grow(1n << 32n));
+ assertEquals(-1n, instance.exports.grow(1n << 33n));
+ assertEquals(-1n, instance.exports.grow(1n << 63n));
+ assertEquals(-1n, instance.exports.grow(7n)); // Above the of 10.
+ assertEquals(4n, instance.exports.grow(6n)); // Just at the maximum of 10.
+})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index d5a4e7119f..5d5e81c6d3 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -17,12 +17,12 @@ function genModule(memory) {
.addBody([
// main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
// TODO(titzer): this manual bytecode has a copy of test-run-wasm.cc
- /**/ kExprLoop, kWasmStmt, // --
+ /**/ kExprLoop, kWasmVoid, // --
/* */ kExprLocalGet, 0, // --
- /* */ kExprIf, kWasmStmt, // --
+ /* */ kExprIf, kWasmVoid, // --
/* */ kExprLocalGet, 0, // --
/* */ kExprI32LoadMem, 0, 0, // --
- /* */ kExprIf, kWasmStmt, // --
+ /* */ kExprIf, kWasmVoid, // --
/* */ kExprI32Const, 127, // --
/* */ kExprReturn, // --
/* */ kExprEnd, // --
diff --git a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
index 9e786bed23..342b3c682d 100644
--- a/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
+++ b/deps/v8/test/mjsunit/wasm/multiple-code-spaces.js
@@ -32,7 +32,7 @@ while (true) {
// Each function f<n> with argument {i} then calls f<n/10> with argument
// {i + 1} and returns whatever that function returns.
const body_template = [
- kExprLocalGet, 0, kExprI32Eqz, kExprIf, kWasmStmt, // if (i == 0)
+ kExprLocalGet, 0, kExprI32Eqz, kExprIf, kWasmVoid, // if (i == 0)
kExprLocalGet, 0 // get i
];
for (let i = 0; i < 1000; ++i) body_template.push(kExprI32LoadMem, 0, 0);
diff --git a/deps/v8/test/mjsunit/wasm/reference-tables.js b/deps/v8/test/mjsunit/wasm/reference-tables.js
index 756ec04d44..cfbe238e6b 100644
--- a/deps/v8/test/mjsunit/wasm/reference-tables.js
+++ b/deps/v8/test/mjsunit/wasm/reference-tables.js
@@ -4,74 +4,81 @@
// Flags: --experimental-wasm-typed-funcref
-load("test/mjsunit/wasm/wasm-module-builder.js");
+load('test/mjsunit/wasm/wasm-module-builder.js');
-(function Test1() {
- var exporting_instance = (function () {
+(function TestTables() {
+ var exporting_instance = (function() {
var builder = new WasmModuleBuilder();
var binary_type = builder.addType(kSig_i_ii);
- builder.addFunction("addition", kSig_i_ii)
- .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
- .exportFunc();
+ builder.addFunction('addition', kSig_i_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
+ .exportFunc();
- builder.addFunction("id", kSig_i_i)
- .addBody([kExprLocalGet, 0])
- .exportFunc();
+ builder.addFunction('succ', kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add])
+ .exportFunc();
- builder.addTable(wasmOptRefType(binary_type), 1, 100).exportAs("table");
+ builder.addTable(wasmOptRefType(binary_type), 1, 100).exportAs('table');
return builder.instantiate({});
})();
// Wrong type for imported table.
- assertThrows(
- () => {
- var builder = new WasmModuleBuilder();
- var unary_type = builder.addType(kSig_i_i);
- builder.addImportedTable("imports", "table", 1, 100,
- wasmOptRefType(unary_type));
- builder.instantiate({imports: {table: exporting_instance.exports.table}})
- },
- WebAssembly.LinkError,
- /imported table does not match the expected type/
- )
+ assertThrows(() => {
+ var builder = new WasmModuleBuilder();
+ var unary_type = builder.addType(kSig_i_i);
+ builder.addImportedTable(
+ 'imports', 'table', 1, 100, wasmOptRefType(unary_type));
+ builder.instantiate({imports: {table: exporting_instance.exports.table}})
+ }, WebAssembly.LinkError, /imported table does not match the expected type/)
// Type for imported table must match exactly.
- assertThrows(
- () => {
- var builder = new WasmModuleBuilder();
- builder.addImportedTable("imports", "table", 1, 100, kWasmFuncRef);
- builder.instantiate({imports: {table: exporting_instance.exports.table}})
- },
- WebAssembly.LinkError,
- /imported table does not match the expected type/
- )
-
- var instance = (function () {
+ assertThrows(() => {
+ var builder = new WasmModuleBuilder();
+ builder.addImportedTable('imports', 'table', 1, 100, kWasmFuncRef);
+ builder.instantiate({imports: {table: exporting_instance.exports.table}})
+ }, WebAssembly.LinkError, /imported table does not match the expected type/)
+
+ var instance = (function() {
var builder = new WasmModuleBuilder();
var unary_type = builder.addType(kSig_i_i);
var binary_type = builder.addType(kSig_i_ii);
- builder.addImportedTable("imports", "table", 1, 100,
- wasmOptRefType(binary_type));
-
- var table = builder.addTable(wasmOptRefType(unary_type), 1)
- .exportAs("table");
- builder.addTable(kWasmFuncRef, 1).exportAs("generic_table");
-
- builder.addFunction("table_test", makeSig([wasmRefType(unary_type)],
- [kWasmI32]))
- // Set table[0] to input function, then retrieve it and call it.
- .addBody([kExprI32Const, 0, kExprLocalGet, 0, kExprTableSet, table.index,
- kExprI32Const, 42, kExprI32Const, 0, kExprTableGet, table.index,
- kExprCallRef])
- .exportFunc();
+ builder.addImportedTable(
+ 'imports', 'table', 1, 100, wasmOptRefType(binary_type));
+
+ var table =
+ builder.addTable(wasmOptRefType(unary_type), 10).exportAs('table');
+ builder.addTable(kWasmFuncRef, 1).exportAs('generic_table');
+
+ builder
+ .addFunction(
+ 'table_test', makeSig([wasmRefType(unary_type)], [kWasmI32]))
+ // Set table[0] to input function, then retrieve it and call it.
+ .addBody([
+ kExprI32Const, 0, kExprLocalGet, 0, kExprTableSet, table.index,
+ kExprI32Const, 42, kExprI32Const, 0, kExprTableGet, table.index,
+ kExprCallRef
+ ])
+ .exportFunc();
+
+ // Same, but with table[1] and call_indirect
+ builder
+ .addFunction(
+ 'table_indirect_test',
+ makeSig([wasmRefType(unary_type)], [kWasmI32]))
+ .addBody([
+ kExprI32Const, 1, kExprLocalGet, 0, kExprTableSet, table.index,
+ kExprI32Const, 42, kExprI32Const, 0, kExprCallIndirect, unary_type,
+ table.index
+ ])
+ .exportFunc();
// Instantiate with a table of the correct type.
return builder.instantiate(
- {imports: {table: exporting_instance.exports.table}});
+ {imports: {table: exporting_instance.exports.table}});
})();
// This module is valid.
@@ -79,13 +86,57 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// The correct function reference is preserved when setting it to and getting
// it back from a table.
- assertEquals(42, instance.exports.table_test(exporting_instance.exports.id));
+ assertEquals(
+ 43, instance.exports.table_test(exporting_instance.exports.succ));
+ // Same for call indirect (the indirect call tables are also set correctly).
+ assertEquals(
+ 43,
+ instance.exports.table_indirect_test(exporting_instance.exports.succ));
// Setting from JS API respects types.
- instance.exports.generic_table.set(0, exporting_instance.exports.id);
- instance.exports.table.set(0, exporting_instance.exports.id);
+ instance.exports.generic_table.set(0, exporting_instance.exports.succ);
+ instance.exports.table.set(0, exporting_instance.exports.succ);
assertThrows(
- () => instance.exports.table.set(0, exporting_instance.exports.addition),
- TypeError,
- /Argument 1 must be null or a WebAssembly function of type compatible to 'this'/);
+ () => instance.exports.table.set(0, exporting_instance.exports.addition),
+ TypeError,
+ /Argument 1 must be null or a WebAssembly function of type compatible to/);
+})();
+
+(function TestNonNullableTables() {
+ var builder = new WasmModuleBuilder();
+
+ var binary_type = builder.addType(kSig_i_ii);
+
+ var addition = builder.addFunction('addition', kSig_i_ii).addBody([
+ kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add
+ ]);
+ var subtraction =
+ builder.addFunction('subtraction', kSig_i_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Sub])
+ .exportFunc();
+
+ var table = builder.addTable(wasmRefType(binary_type), 3, 3, addition.index);
+
+ builder.addFunction('init', kSig_v_v)
+ .addBody([
+ kExprI32Const, 1, kExprRefFunc, subtraction.index, kExprTableSet,
+ table.index
+ ])
+ .exportFunc();
+
+ // (index, arg1, arg2) -> table[index](arg1, arg2)
+ builder.addFunction('table_test', kSig_i_iii)
+ .addBody([
+ kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 0, kExprCallIndirect,
+ binary_type, table.index
+ ])
+ .exportFunc();
+
+ var instance = builder.instantiate({});
+
+ assertTrue(!!instance);
+
+ instance.exports.init();
+ assertEquals(44, instance.exports.table_test(0, 33, 11));
+ assertEquals(22, instance.exports.table_test(1, 33, 11));
})();
diff --git a/deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js b/deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js
new file mode 100644
index 0000000000..e50feb2e54
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/simd-i64x2-mul.js
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-enable-avx
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Carefully hand-crafted test case to exercie a codegen bug in Liftoff. In
+// i64x2.mul, non-AVX case, we will overwrite rhs if dst == rhs. The intention
+// is to do dst = lhs * rhs, but if dst == rhs && dst != lhs, we will overwrite
+// dst (and hence rhs) with lhs, effectively doing lhs^2.
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32);
+builder.addFunction(undefined, kSig_l_v)
+.addBody([
+ kExprI64Const, 0,
+ kSimdPrefix, kExprI64x2Splat,
+ kExprI64Const, 1,
+ kSimdPrefix, kExprI64x2Splat,
+ kExprI64Const, 2,
+ kSimdPrefix, kExprI64x2Splat,
+ kExprCallFunction, 1,
+]);
+
+let sig = makeSig([kWasmS128, kWasmS128, kWasmS128], [kWasmI64]);
+builder.addFunction(undefined, sig)
+.addLocals(kWasmS128, 10)
+.addBody([
+ kExprLocalGet, 2, // This is 2 (lhs).
+ kExprI64Const, 4, // This is 4 (rhs).
+ kSimdPrefix, kExprI64x2Splat,
+ kSimdPrefix, kExprI64x2Mul, 0x01, // The bug will write 2 to rhs.
+ kSimdPrefix, kExprI64x2ExtractLane, 0,
+]);
+builder.addExport('main', 0);
+const module = builder.instantiate();
+// Should be 2 * 4, the buggy codegen will give 2 * 2 instead.
+assertEquals(8n, module.exports.main());
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 4f91f58fc7..be9d54a064 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -88,11 +88,11 @@ Error.prepareStackTrace = function(error, frames) {
module.exports.main();
verifyStack(stack, [
- // isWasm function line pos file offset funcIndex
- [ false, "STACK", 38, 0, "stack.js"],
- [ true, "main", 1, 0x86, null, '0x86', 1],
- [ false, "testStackFrames", 88, 0, "stack.js"],
- [ false, null, 97, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ false, "STACK", 38, 0, "stack.js"],
+ [ true, "main", 1, 0x86, "wasm://wasm/7168ab72", '0x86', 1],
+ [ false, "testStackFrames", 88, 0, "stack.js"],
+ [ false, null, 97, 0, "stack.js"]
]);
})();
@@ -103,10 +103,10 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("unreachable", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, "exec_unreachable", 1, 0x8b, null, '0x8b', 2],
- [ false, "testWasmUnreachable", 101, 0, "stack.js"],
- [ false, null, 112, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ true, "exec_unreachable", 1, 0x8b, "wasm://wasm/7168ab72", '0x8b', 2],
+ [ false, "testWasmUnreachable", 101, 0, "stack.js"],
+ [ false, null, 112, 0, "stack.js"]
]);
}
})();
@@ -118,11 +118,11 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("out of bounds", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, "mem_out_of_bounds", 1, 0x91, null, '0x91', 3],
- [ true, "call_mem_out_of_bounds", 1, 0x97, null, '0x97', 4],
- [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
- [ false, null, 128, 0, "stack.js"]
+ // isWasm function line pos file offset funcIndex
+ [ true, "mem_out_of_bounds", 1, 0x91, "wasm://wasm/7168ab72", '0x91', 3],
+ [ true, "call_mem_out_of_bounds", 1, 0x97, "wasm://wasm/7168ab72", '0x97', 4],
+ [ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
+ [ false, null, 128, 0, "stack.js"]
]);
}
})();
@@ -147,11 +147,11 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals("Maximum call stack size exceeded", e.message, "trap reason");
assertTrue(e.stack.length >= 4, "expected at least 4 stack entries");
verifyStack(e.stack.splice(0, 4), [
- // isWasm function line pos file offset funcIndex
- [ true, "recursion", 1, 0x34, null, '0x34', 0],
- [ true, "recursion", 1, 0x37, null, '0x37', 0],
- [ true, "recursion", 1, 0x37, null, '0x37', 0],
- [ true, "recursion", 1, 0x37, null, '0x37', 0]
+ // isWasm function line pos file offset funcIndex
+ [ true, "recursion", 1, 0x34, "wasm://wasm/80a35e5a", '0x34', 0],
+ [ true, "recursion", 1, 0x37, "wasm://wasm/80a35e5a", '0x37', 0],
+ [ true, "recursion", 1, 0x37, "wasm://wasm/80a35e5a", '0x37', 0],
+ [ true, "recursion", 1, 0x37, "wasm://wasm/80a35e5a", '0x37', 0]
]);
}
})();
@@ -175,10 +175,10 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals('unreachable', e.message, 'trap reason');
let hexOffset = '0x' + (unreachable_pos + 0x25).toString(16);
verifyStack(e.stack, [
- // isWasm function line pos file offset funcIndex
- [ true, 'main', 1, unreachable_pos + 0x25, null, hexOffset, 0],
- [ false, 'testBigOffset', 172, 0, 'stack.js'],
- [ false, null, 184, 0, 'stack.js']
+ // isWasm function line pos file offset funcIndex
+ [ true, 'main', 1, unreachable_pos + 0x25, 'wasm://wasm/000600e6', hexOffset, 0],
+ [ false, 'testBigOffset', 172, 0, 'stack.js'],
+ [ false, null, 184, 0, 'stack.js']
]);
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/streaming-error-position.js b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
index 50f795f770..6984f22ea2 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-error-position.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-error-position.js
@@ -390,7 +390,7 @@ function testErrorPosition(bytes, pos, message) {
1, // number of types
kWasmFunctionTypeForm, // type
1, // number of parameter
- 0x7b, // invalid type
+ kWasmVoid, // invalid type
0 // number of returns
]);
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index bde5793acc..1f070d01f9 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -147,7 +147,6 @@ const dummy_func = exports.set_table_func1;
kExprTableSet, table_index, // --
kExprI32Const, index, // entry index
kExprCallIndirect, sig_index, table_index // --
-
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index a34162ab8c..719af7e7b7 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -57,7 +57,7 @@ builder.addFunction("main", kSig_i_i)
kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32LtU,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
// offset 9
kExprI32Const, 0x7e /* -2 */,
kExprLocalGet, 0,
@@ -70,7 +70,7 @@ builder.addFunction("main", kSig_i_i)
kExprLocalGet, 0,
kExprI32Const, 2,
kExprI32Eq,
- kExprIf, kWasmStmt,
+ kExprIf, kWasmVoid,
kExprUnreachable,
kExprEnd,
// offset 30
diff --git a/deps/v8/test/mjsunit/wasm/unreachable-validation.js b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
index 70768ff7d4..a9165639ab 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable-validation.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable-validation.js
@@ -45,13 +45,13 @@ let brt1 = [kExprBrTable, 0, 1];
let brt01 = [kExprBrTable, 1, 0, 1];
let f32 = [kExprF32Const, 0, 0, 0, 0];
let zero = [kExprI32Const, 0];
-let if_else_empty = [kExprIf, kWasmStmt, kExprElse, kExprEnd];
-let if_unr = [kExprIf, kWasmStmt, kExprUnreachable, kExprEnd];
-let if_else_unr = [kExprIf, kWasmStmt, kExprUnreachable, kExprElse, kExprUnreachable, kExprEnd];
-let block_unr = [kExprBlock, kWasmStmt, kExprUnreachable, kExprEnd];
-let loop_unr = [kExprLoop, kWasmStmt, kExprUnreachable, kExprEnd];
-let block_block_unr = [kExprBlock, kWasmStmt, kExprBlock, kWasmStmt, kExprUnreachable, kExprEnd, kExprEnd];
-let block = [kExprBlock, kWasmStmt]
+let if_else_empty = [kExprIf, kWasmVoid, kExprElse, kExprEnd];
+let if_unr = [kExprIf, kWasmVoid, kExprUnreachable, kExprEnd];
+let if_else_unr = [kExprIf, kWasmVoid, kExprUnreachable, kExprElse, kExprUnreachable, kExprEnd];
+let block_unr = [kExprBlock, kWasmVoid, kExprUnreachable, kExprEnd];
+let loop_unr = [kExprLoop, kWasmVoid, kExprUnreachable, kExprEnd];
+let block_block_unr = [kExprBlock, kWasmVoid, kExprBlock, kWasmVoid, kExprUnreachable, kExprEnd, kExprEnd];
+let block = [kExprBlock, kWasmVoid]
let iblock = [kExprBlock, kWasmI32]
let fblock = [kExprBlock, kWasmF32]
let end = kExprEnd;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
new file mode 100644
index 0000000000..13c19813ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
@@ -0,0 +1,149 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let instance = (() => {
+ let builder = new WasmModuleBuilder();
+ let struct = builder.addStruct([makeField(kWasmI32, true)]);
+ let array = builder.addArray(kWasmF64, true);
+ let sig = builder.addType(makeSig([kWasmI32], [kWasmI32]));
+
+ let func = builder.addFunction('inc', sig)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add])
+ .exportAs('inc');
+
+ builder.addFunction('struct_producer', makeSig([], [kWasmDataRef]))
+ .addBody([
+ kGCPrefix, kExprRttCanon, struct, kGCPrefix, kExprStructNewDefault,
+ struct
+ ])
+ .exportFunc();
+
+ builder.addFunction('array_producer', makeSig([], [kWasmDataRef]))
+ .addBody([
+ kExprI32Const, 10, kGCPrefix, kExprRttCanon, array, kGCPrefix,
+ kExprArrayNewDefault, array
+ ])
+ .exportFunc();
+
+ builder.addFunction('i31_producer', makeSig([], [kWasmI31Ref]))
+ .addBody([kExprI32Const, 5, kGCPrefix, kExprI31New])
+ .exportFunc();
+
+ builder.addFunction('func_producer', makeSig([], [wasmRefType(sig)]))
+ .addBody([kExprRefFunc, func.index])
+ .exportFunc();
+
+ let test_types = {
+ i31: kWasmI31Ref,
+ struct: kWasmDataRef,
+ array: kWasmDataRef,
+ raw_struct: struct,
+ raw_array: array,
+ typed_func: sig,
+ data: kWasmDataRef,
+ eq: kWasmEqRef,
+ func: kWasmFuncRef,
+ any: kWasmAnyRef,
+ };
+
+ for (key in test_types) {
+ let type = wasmOptRefType(test_types[key]);
+ builder.addFunction(key + '_id', makeSig([type], [type]))
+ .addBody([kExprLocalGet, 0])
+ .exportFunc();
+ builder.addFunction(key + '_null', makeSig([], [type]))
+ .addBody([kExprRefNull, test_types[key]])
+ .exportFunc();
+ }
+
+ return builder.instantiate({});
+})();
+
+// Wasm-exposed null is the same as JS null.
+assertEquals(instance.exports.struct_null(), null);
+
+// We can roundtrip an i31.
+instance.exports.i31_id(instance.exports.i31_producer());
+// We can roundtrip any null as i31.
+instance.exports.i31_id(instance.exports.i31_null());
+instance.exports.i31_id(instance.exports.struct_null());
+// We cannot roundtrip a struct as i31.
+assertThrows(
+ () => instance.exports.i31_id(instance.exports.struct_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a struct as dataref.
+instance.exports.data_id(instance.exports.struct_producer());
+// We can roundtrip an array as dataref.
+instance.exports.data_id(instance.exports.array_producer());
+// We can roundtrip any null as dataref.
+instance.exports.data_id(instance.exports.data_null());
+instance.exports.data_id(instance.exports.i31_null());
+// We cannot roundtrip an i31 as dataref.
+assertThrows(
+ () => instance.exports.data_id(instance.exports.i31_producer()), TypeError,
+ 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a struct as eqref.
+instance.exports.eq_id(instance.exports.struct_producer());
+// We can roundtrip an array as eqref.
+instance.exports.eq_id(instance.exports.array_producer());
+// We can roundtrip an i31 as eqref.
+instance.exports.eq_id(instance.exports.i31_producer());
+// We can roundtrip any null as eqref.
+instance.exports.eq_id(instance.exports.data_null());
+instance.exports.eq_id(instance.exports.i31_null());
+instance.exports.eq_id(instance.exports.func_null());
+// We cannot roundtrip a func as eqref.
+assertThrows(
+ () => instance.exports.eq_id(instance.exports.func_producer()), TypeError,
+ 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a struct as anyref.
+instance.exports.any_id(instance.exports.struct_producer());
+// We can roundtrip an array as anyref.
+instance.exports.any_id(instance.exports.array_producer());
+// We can roundtrip an i31 as anyref.
+instance.exports.any_id(instance.exports.i31_producer());
+// We can roundtrip a func as anyref.
+instance.exports.any_id(instance.exports.func_producer());
+// We can roundtrip any null as anyref.
+instance.exports.any_id(instance.exports.data_null());
+instance.exports.any_id(instance.exports.i31_null());
+instance.exports.any_id(instance.exports.func_null());
+// We can roundtrip a JS object as anyref.
+instance.exports.any_id(instance);
+
+// We can roundtrip a typed function.
+instance.exports.typed_func_id(instance.exports.func_producer());
+// We can roundtrip any null as typed funcion.
+instance.exports.typed_func_id(instance.exports.i31_null());
+instance.exports.typed_func_id(instance.exports.struct_null());
+// We cannot roundtrip a struct as typed funcion.
+assertThrows(
+ () => instance.exports.typed_func_id(instance.exports.struct_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
+
+// We can roundtrip a func.
+instance.exports.func_id(instance.exports.func_producer());
+// We can roundtrip any null as func.
+instance.exports.func_id(instance.exports.i31_null());
+instance.exports.func_id(instance.exports.struct_null());
+// We cannot roundtrip an i31 as func.
+assertThrows(
+ () => instance.exports.func_id(instance.exports.i31_producer()), TypeError,
+ 'type incompatibility when transforming from/to JS');
+
+// We cannot directly roundtrip structs or arrays.
+// TODO(7748): Switch these tests once we can.
+assertThrows(
+ () => instance.exports.raw_struct_id(instance.exports.struct_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
+assertThrows(
+ () => instance.exports.raw_array_id(instance.exports.array_producer()),
+ TypeError, 'type incompatibility when transforming from/to JS');
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index b5021a313c..d3ae3a9b4a 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -92,13 +92,13 @@ let kPassiveWithElements = 5;
let kDeclarativeWithElements = 7;
// Function declaration flags
-let kDeclFunctionName = 0x01;
+let kDeclFunctionName = 0x01;
let kDeclFunctionImport = 0x02;
let kDeclFunctionLocals = 0x04;
let kDeclFunctionExport = 0x08;
// Value types and related
-let kWasmStmt = 0x40;
+let kWasmVoid = 0x40;
let kWasmI32 = 0x7f;
let kWasmI64 = 0x7e;
let kWasmF32 = 0x7d;
@@ -107,19 +107,28 @@ let kWasmS128 = 0x7b;
let kWasmI8 = 0x7a;
let kWasmI16 = 0x79;
let kWasmFuncRef = 0x70;
-let kWasmAnyFunc = kWasmFuncRef; // Alias named as in the JS API spec
+let kWasmAnyFunc = kWasmFuncRef; // Alias named as in the JS API spec
let kWasmExternRef = 0x6f;
let kWasmAnyRef = 0x6e;
let kWasmEqRef = 0x6d;
let kWasmOptRef = 0x6c;
let kWasmRef = 0x6b;
-function wasmOptRefType(index) { return {opcode: kWasmOptRef, index: index}; }
-function wasmRefType(index) { return {opcode: kWasmRef, index: index}; }
+function wasmOptRefType(index) {
+ return {opcode: kWasmOptRef, index: index};
+}
+function wasmRefType(index) {
+ return {opcode: kWasmRef, index: index};
+}
let kWasmI31Ref = 0x6a;
-let kWasmRtt = 0x69;
+let kWasmRttWithDepth = 0x69;
function wasmRtt(index, depth) {
- return {opcode: kWasmRtt, index: index, depth: depth};
+ return { opcode: kWasmRttWithDepth, index: index, depth: depth };
}
+let kWasmRtt = 0x68;
+function wasmRttNoDepth(index) {
+ return { opcode: kWasmRtt, index: index };
+}
+let kWasmDataRef = 0x67;
let kExternalFunction = 0;
let kExternalTable = 1;
@@ -400,7 +409,10 @@ const kWasmOpcodes = {
'I64SExtendI32': 0xc4,
'RefNull': 0xd0,
'RefIsNull': 0xd1,
- 'RefFunc': 0xd2
+ 'RefFunc': 0xd2,
+ 'RefAsNonNull': 0xd3,
+ 'BrOnNull': 0xd4,
+ 'RefEq': 0xd5,
};
function defineWasmOpcode(name, value) {
@@ -451,6 +463,15 @@ let kExprRttSub = 0x31;
let kExprRefTest = 0x40;
let kExprRefCast = 0x41;
let kExprBrOnCast = 0x42;
+let kExprRefIsFunc = 0x50;
+let kExprRefIsData = 0x51;
+let kExprRefIsI31 = 0x52;
+let kExprRefAsFunc = 0x58;
+let kExprRefAsData = 0x59;
+let kExprRefAsI31 = 0x5a;
+let kExprBrOnFunc = 0x60;
+let kExprBrOnData = 0x61;
+let kExprBrOnI31 = 0x62;
// Numeric opcodes.
let kExprI32SConvertSatF32 = 0x00;
@@ -554,23 +575,27 @@ let kExprS128Load16Splat = 0x08;
let kExprS128Load32Splat = 0x09;
let kExprS128Load64Splat = 0x0a;
let kExprS128StoreMem = 0x0b;
-
let kExprS128Const = 0x0c;
let kExprI8x16Shuffle = 0x0d;
-
let kExprI8x16Swizzle = 0x0e;
+
let kExprI8x16Splat = 0x0f;
let kExprI16x8Splat = 0x10;
let kExprI32x4Splat = 0x11;
let kExprI64x2Splat = 0x12;
let kExprF32x4Splat = 0x13;
let kExprF64x2Splat = 0x14;
+let kExprI8x16ExtractLaneS = 0x15;
+let kExprI8x16ExtractLaneU = 0x16;
let kExprI8x16ReplaceLane = 0x17;
let kExprI16x8ExtractLaneS = 0x18;
+let kExprI16x8ExtractLaneU = 0x19;
let kExprI16x8ReplaceLane = 0x1a;
let kExprI32x4ExtractLane = 0x1b;
let kExprI32x4ReplaceLane = 0x1c;
+let kExprI64x2ExtractLane = 0x1d;
let kExprI64x2ReplaceLane = 0x1e;
+let kExprF32x4ExtractLane = 0x1f;
let kExprF32x4ReplaceLane = 0x20;
let kExprF64x2ExtractLane = 0x21;
let kExprF64x2ReplaceLane = 0x22;
@@ -622,12 +647,30 @@ let kExprS128AndNot = 0x4f;
let kExprS128Or = 0x50;
let kExprS128Xor = 0x51;
let kExprS128Select = 0x52;
+let kExprV128AnyTrue = 0x53;
+let kExprS128Load8Lane = 0x54;
+let kExprS128Load16Lane = 0x55;
+let kExprS128Load32Lane = 0x56;
+let kExprS128Load64Lane = 0x57;
+let kExprS128Store8Lane = 0x58;
+let kExprS128Store16Lane = 0x59;
+let kExprS128Store32Lane = 0x5a;
+let kExprS128Store64Lane = 0x5b;
+let kExprS128Load32Zero = 0x5c;
+let kExprS128Load64Zero = 0x5d;
+let kExprF32x4DemoteF64x2Zero = 0x5e;
+let kExprF64x2PromoteLowF32x4 = 0x5f;
let kExprI8x16Abs = 0x60;
let kExprI8x16Neg = 0x61;
-let kExprV128AnyTrue = 0x62;
-let kExprV8x16AllTrue = 0x63;
+let kExprI8x16Popcnt = 0x62;
+let kExprI8x16AllTrue = 0x63;
+let kExprI8x16BitMask = 0x64;
let kExprI8x16SConvertI16x8 = 0x65;
let kExprI8x16UConvertI16x8 = 0x66;
+let kExprF32x4Ceil = 0x67;
+let kExprF32x4Floor = 0x68;
+let kExprF32x4Trunc = 0x69;
+let kExprF32x4NearestInt = 0x6a;
let kExprI8x16Shl = 0x6b;
let kExprI8x16ShrS = 0x6c;
let kExprI8x16ShrU = 0x6d;
@@ -637,14 +680,23 @@ let kExprI8x16AddSatU = 0x70;
let kExprI8x16Sub = 0x71;
let kExprI8x16SubSatS = 0x72;
let kExprI8x16SubSatU = 0x73;
+let kExprF64x2Ceil = 0x74;
+let kExprF64x2Floor = 0x75;
let kExprI8x16MinS = 0x76;
let kExprI8x16MinU = 0x77;
let kExprI8x16MaxS = 0x78;
let kExprI8x16MaxU = 0x79;
+let kExprF64x2Trunc = 0x7a;
let kExprI8x16RoundingAverageU = 0x7b;
+let kExprI16x8ExtAddPairwiseI8x16S = 0x7c;
+let kExprI16x8ExtAddPairwiseI8x16U = 0x7d;
+let kExprI32x4ExtAddPairwiseI16x8S = 0x7e;
+let kExprI32x4ExtAddPairwiseI16x8U = 0x7f;
let kExprI16x8Abs = 0x80;
let kExprI16x8Neg = 0x81;
-let kExprV16x8AllTrue = 0x83;
+let kExprI16x8Q15MulRSatS = 0x82;
+let kExprI16x8AllTrue = 0x83;
+let kExprI16x8BitMask = 0x84;
let kExprI16x8SConvertI32x4 = 0x85;
let kExprI16x8UConvertI32x4 = 0x86;
let kExprI16x8SConvertI8x16Low = 0x87;
@@ -660,15 +712,21 @@ let kExprI16x8AddSatU = 0x90;
let kExprI16x8Sub = 0x91;
let kExprI16x8SubSatS = 0x92;
let kExprI16x8SubSatU = 0x93;
+let kExprF64x2NearestInt = 0x94;
let kExprI16x8Mul = 0x95;
let kExprI16x8MinS = 0x96;
let kExprI16x8MinU = 0x97;
let kExprI16x8MaxS = 0x98;
let kExprI16x8MaxU = 0x99;
let kExprI16x8RoundingAverageU = 0x9b;
+let kExprI16x8ExtMulLowI8x16S = 0x9c;
+let kExprI16x8ExtMulHighI8x16S = 0x9d;
+let kExprI16x8ExtMulLowI8x16U = 0x9e;
+let kExprI16x8ExtMulHighI8x16U = 0x9f;
let kExprI32x4Abs = 0xa0;
let kExprI32x4Neg = 0xa1;
-let kExprV32x4AllTrue = 0xa3;
+let kExprI32x4AllTrue = 0xa3;
+let kExprI32x4BitMask = 0xa4;
let kExprI32x4SConvertI16x8Low = 0xa7;
let kExprI32x4SConvertI16x8High = 0xa8;
let kExprI32x4UConvertI16x8Low = 0xa9;
@@ -683,14 +741,35 @@ let kExprI32x4MinS = 0xb6;
let kExprI32x4MinU = 0xb7;
let kExprI32x4MaxS = 0xb8;
let kExprI32x4MaxU = 0xb9;
+let kExprI32x4DotI16x8S = 0xba;
+let kExprI32x4ExtMulLowI16x8S = 0xbc;
+let kExprI32x4ExtMulHighI16x8S = 0xbd;
+let kExprI32x4ExtMulLowI16x8U = 0xbe;
+let kExprI32x4ExtMulHighI16x8U = 0xbf;
+let kExprI64x2Abs = 0xc0;
let kExprI64x2Neg = 0xc1;
+let kExprI64x2AllTrue = 0xc3;
+let kExprI64x2BitMask = 0xc4;
+let kExprI64x2SConvertI32x4Low = 0xc7;
+let kExprI64x2SConvertI32x4High = 0xc8;
+let kExprI64x2UConvertI32x4Low = 0xc9;
+let kExprI64x2UConvertI32x4High = 0xca;
let kExprI64x2Shl = 0xcb;
let kExprI64x2ShrS = 0xcc;
let kExprI64x2ShrU = 0xcd;
let kExprI64x2Add = 0xce;
let kExprI64x2Sub = 0xd1;
let kExprI64x2Mul = 0xd5;
-let kExprI64x2ExtMulHighI32x4U = 0xd7;
+let kExprI64x2Eq = 0xd6;
+let kExprI64x2Ne = 0xd7;
+let kExprI64x2LtS = 0xd8;
+let kExprI64x2GtS = 0xd9;
+let kExprI64x2LeS = 0xda;
+let kExprI64x2GeS = 0xdb;
+let kExprI64x2ExtMulLowI32x4S = 0xdc;
+let kExprI64x2ExtMulHighI32x4S = 0xdd;
+let kExprI64x2ExtMulLowI32x4U = 0xde;
+let kExprI64x2ExtMulHighI32x4U = 0xdf;
let kExprF32x4Abs = 0xe0;
let kExprF32x4Neg = 0xe1;
let kExprF32x4Sqrt = 0xe3;
@@ -700,6 +779,8 @@ let kExprF32x4Mul = 0xe6;
let kExprF32x4Div = 0xe7;
let kExprF32x4Min = 0xe8;
let kExprF32x4Max = 0xe9;
+let kExprF32x4Pmin = 0xea;
+let kExprF32x4Pmax = 0xeb;
let kExprF64x2Abs = 0xec;
let kExprF64x2Neg = 0xed;
let kExprF64x2Sqrt = 0xef;
@@ -709,10 +790,16 @@ let kExprF64x2Mul = 0xf2;
let kExprF64x2Div = 0xf3;
let kExprF64x2Min = 0xf4;
let kExprF64x2Max = 0xf5;
+let kExprF64x2Pmin = 0xf6;
+let kExprF64x2Pmax = 0xf7;
let kExprI32x4SConvertF32x4 = 0xf8;
let kExprI32x4UConvertF32x4 = 0xf9;
let kExprF32x4SConvertI32x4 = 0xfa;
let kExprF32x4UConvertI32x4 = 0xfb;
+let kExprI32x4TruncSatF64x2SZero = 0xfc;
+let kExprI32x4TruncSatF64x2UZero = 0xfd;
+let kExprF64x2ConvertLowI32x4S = 0xfe;
+let kExprF64x2ConvertLowI32x4U = 0xff;
// Compilation hint constants.
let kCompilationHintStrategyDefault = 0x00;
@@ -723,32 +810,32 @@ let kCompilationHintTierDefault = 0x00;
let kCompilationHintTierBaseline = 0x01;
let kCompilationHintTierOptimized = 0x02;
-let kTrapUnreachable = 0;
-let kTrapMemOutOfBounds = 1;
-let kTrapDivByZero = 2;
-let kTrapDivUnrepresentable = 3;
-let kTrapRemByZero = 4;
+let kTrapUnreachable = 0;
+let kTrapMemOutOfBounds = 1;
+let kTrapDivByZero = 2;
+let kTrapDivUnrepresentable = 3;
+let kTrapRemByZero = 4;
let kTrapFloatUnrepresentable = 5;
-let kTrapTableOutOfBounds = 6;
-let kTrapFuncSigMismatch = 7;
-let kTrapUnalignedAccess = 8;
-let kTrapDataSegmentDropped = 9;
-let kTrapElemSegmentDropped = 10;
-let kTrapRethrowNull = 11;
+let kTrapTableOutOfBounds = 6;
+let kTrapFuncSigMismatch = 7;
+let kTrapUnalignedAccess = 8;
+let kTrapDataSegmentDropped = 9;
+let kTrapElemSegmentDropped = 10;
+let kTrapRethrowNull = 11;
let kTrapMsgs = [
- "unreachable",
- "memory access out of bounds",
- "divide by zero",
- "divide result unrepresentable",
- "remainder by zero",
- "float unrepresentable in integer range",
- "table index is out of bounds",
- "function signature mismatch",
- "operation does not support unaligned accesses",
- "data segment has been dropped",
- "element segment has been dropped",
- "rethrowing null value"
+ 'unreachable',
+ 'memory access out of bounds',
+ 'divide by zero',
+ 'divide result unrepresentable',
+ 'remainder by zero',
+ 'float unrepresentable in integer range',
+ 'table index is out of bounds',
+ 'function signature mismatch',
+ 'operation does not support unaligned accesses',
+ 'data segment has been dropped',
+ 'element segment has been dropped',
+ 'rethrowing null value'
];
function assertTraps(trap, code) {
@@ -808,7 +895,7 @@ class Binary {
}
this.buffer[this.length++] = v | 0x80;
}
- throw new Error("Leb value exceeds maximum length of " + max_len);
+ throw new Error('Leb value exceeds maximum length of ' + max_len);
}
emit_u32v(val) {
@@ -843,8 +930,9 @@ class Binary {
}
emit_type(type) {
- if ((typeof type) == "number") this.emit_u8(type);
- else {
+ if ((typeof type) == 'number') {
+ this.emit_u8(type);
+ } else {
this.emit_u8(type.opcode);
if ('depth' in type) this.emit_u8(type.depth);
this.emit_u32v(type.index);
@@ -887,7 +975,7 @@ class WasmFunctionBuilder {
numLocalNames() {
let num_local_names = 0;
for (let loc_name of this.local_names) {
- if (typeof loc_name == "string") ++num_local_names;
+ if (typeof loc_name == 'string') ++num_local_names;
}
return num_local_names;
}
@@ -909,8 +997,10 @@ class WasmFunctionBuilder {
addBody(body) {
for (let b of body) {
- if (typeof b !== 'number' || (b & (~0xFF)) !== 0 )
- throw new Error('invalid body (entries must be 8 bit numbers): ' + body);
+ if (typeof b !== 'number' || (b & (~0xFF)) !== 0) {
+ throw new Error(
+ 'invalid body (entries must be 8 bit numbers): ' + body);
+ }
}
this.body = body.slice();
// Automatically add the end for the function block to the body.
@@ -954,37 +1044,39 @@ class WasmGlobalBuilder {
}
exportAs(name) {
- this.module.exports.push({name: name, kind: kExternalGlobal,
- index: this.index});
+ this.module.exports.push(
+ {name: name, kind: kExternalGlobal, index: this.index});
return this;
}
}
class WasmTableBuilder {
- constructor(module, type, initial_size, max_size) {
+ constructor(module, type, initial_size, max_size, init_func_index) {
this.module = module;
this.type = type;
this.initial_size = initial_size;
this.has_max = max_size != undefined;
this.max_size = max_size;
+ this.init_func_index = init_func_index;
+ this.has_init = init_func_index != undefined;
}
exportAs(name) {
- this.module.exports.push({name: name, kind: kExternalTable,
- index: this.index});
+ this.module.exports.push(
+ {name: name, kind: kExternalTable, index: this.index});
return this;
}
}
function makeField(type, mutability) {
- assertEquals("boolean", typeof mutability,
- "field mutability must be boolean");
+ assertEquals(
+ 'boolean', typeof mutability, 'field mutability must be boolean');
return {type: type, mutability: mutability};
}
class WasmStruct {
constructor(fields) {
- assertTrue(Array.isArray(fields), "struct fields must be an array");
+ assertTrue(Array.isArray(fields), 'struct fields must be an array');
this.fields = fields;
}
}
@@ -1072,8 +1164,8 @@ class WasmModuleBuilder {
addType(type) {
this.types.push(type);
- var pl = type.params.length; // should have params
- var rl = type.results.length; // should have results
+ var pl = type.params.length; // should have params
+ var rl = type.results.length; // should have results
return this.types.length - 1;
}
@@ -1094,19 +1186,21 @@ class WasmModuleBuilder {
return glob;
}
- addTable(type, initial_size, max_size = undefined) {
+ addTable(
+ type, initial_size, max_size = undefined, init_func_index = undefined) {
if (type == kWasmI32 || type == kWasmI64 || type == kWasmF32 ||
- type == kWasmF64 || type == kWasmS128 || type == kWasmStmt) {
+ type == kWasmF64 || type == kWasmS128 || type == kWasmVoid) {
throw new Error('Tables must be of a reference type');
}
- let table = new WasmTableBuilder(this, type, initial_size, max_size);
+ let table = new WasmTableBuilder(
+ this, type, initial_size, max_size, init_func_index);
table.index = this.tables.length + this.num_imported_tables;
this.tables.push(table);
return table;
}
addException(type) {
- let type_index = (typeof type) == "number" ? type : this.addType(type);
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
let except_index = this.exceptions.length + this.num_imported_exceptions;
this.exceptions.push(type_index);
return except_index;
@@ -1114,10 +1208,12 @@ class WasmModuleBuilder {
addFunction(name, type, arg_names) {
arg_names = arg_names || [];
- let type_index = (typeof type) == "number" ? type : this.addType(type);
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
let num_args = this.types[type_index].params.length;
- if (num_args < arg_names.length) throw new Error("too many arg names provided");
- if (num_args > arg_names.length) arg_names.push(num_args - arg_names.length);
+ if (num_args < arg_names.length)
+ throw new Error('too many arg names provided');
+ if (num_args > arg_names.length)
+ arg_names.push(num_args - arg_names.length);
let func = new WasmFunctionBuilder(this, name, type_index, arg_names);
func.index = this.functions.length + this.num_imported_funcs;
this.functions.push(func);
@@ -1128,9 +1224,13 @@ class WasmModuleBuilder {
if (this.functions.length != 0) {
throw new Error('Imported functions must be declared before local ones');
}
- let type_index = (typeof type) == "number" ? type : this.addType(type);
- this.imports.push({module: module, name: name, kind: kExternalFunction,
- type_index: type_index});
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
+ this.imports.push({
+ module: module,
+ name: name,
+ kind: kExternalFunction,
+ type_index: type_index
+ });
return this.num_imported_funcs++;
}
@@ -1138,15 +1238,26 @@ class WasmModuleBuilder {
if (this.globals.length != 0) {
throw new Error('Imported globals must be declared before local ones');
}
- let o = {module: module, name: name, kind: kExternalGlobal, type: type,
- mutable: mutable};
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalGlobal,
+ type: type,
+ mutable: mutable
+ };
this.imports.push(o);
return this.num_imported_globals++;
}
addImportedMemory(module, name, initial = 0, maximum, shared) {
- let o = {module: module, name: name, kind: kExternalMemory,
- initial: initial, maximum: maximum, shared: shared};
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalMemory,
+ initial: initial,
+ maximum: maximum,
+ shared: shared
+ };
this.imports.push(o);
return this;
}
@@ -1155,8 +1266,14 @@ class WasmModuleBuilder {
if (this.tables.length != 0) {
throw new Error('Imported tables must be declared before local ones');
}
- let o = {module: module, name: name, kind: kExternalTable, initial: initial,
- maximum: maximum, type: type || kWasmFuncRef};
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalTable,
+ initial: initial,
+ maximum: maximum,
+ type: type || kWasmFuncRef
+ };
this.imports.push(o);
return this.num_imported_tables++;
}
@@ -1165,9 +1282,13 @@ class WasmModuleBuilder {
if (this.exceptions.length != 0) {
throw new Error('Imported exceptions must be declared before local ones');
}
- let type_index = (typeof type) == "number" ? type : this.addType(type);
- let o = {module: module, name: name, kind: kExternalException,
- type_index: type_index};
+ let type_index = (typeof type) == 'number' ? type : this.addType(type);
+ let o = {
+ module: module,
+ name: name,
+ kind: kExternalException,
+ type_index: type_index
+ };
this.imports.push(o);
return this.num_imported_exceptions++;
}
@@ -1181,7 +1302,7 @@ class WasmModuleBuilder {
if (index == undefined && kind != kExternalTable &&
kind != kExternalMemory) {
throw new Error(
- 'Index for exports other than tables/memories must be provided');
+ 'Index for exports other than tables/memories must be provided');
}
if (index !== undefined && (typeof index) != 'number') {
throw new Error('Index for exports must be a number')
@@ -1191,8 +1312,11 @@ class WasmModuleBuilder {
}
setCompilationHint(strategy, baselineTier, topTier, index) {
- this.compilation_hints[index] = {strategy: strategy, baselineTier:
- baselineTier, topTier: topTier};
+ this.compilation_hints[index] = {
+ strategy: strategy,
+ baselineTier: baselineTier,
+ topTier: topTier
+ };
return this;
}
@@ -1256,7 +1380,7 @@ class WasmModuleBuilder {
setTableBounds(min, max = undefined) {
if (this.tables.length != 0) {
- throw new Error("The table bounds of table '0' have already been set.");
+ throw new Error('The table bounds of table \'0\' have already been set.');
}
this.addTable(kWasmAnyFunc, min, max);
return this;
@@ -1276,7 +1400,7 @@ class WasmModuleBuilder {
// Add type section
if (wasm.types.length > 0) {
- if (debug) print("emitting types @ " + binary.length);
+ if (debug) print('emitting types @ ' + binary.length);
binary.emit_section(kTypeSectionCode, section => {
section.emit_u32v(wasm.types.length);
for (let type of wasm.types) {
@@ -1290,7 +1414,7 @@ class WasmModuleBuilder {
} else if (type instanceof WasmArray) {
section.emit_u8(kWasmArrayTypeForm);
section.emit_type(type.type);
- section.emit_u8(1); // Only mutable arrays supported currently.
+ section.emit_u8(1); // Only mutable arrays supported currently.
} else {
section.emit_u8(kWasmFunctionTypeForm);
section.emit_u32v(type.params.length);
@@ -1308,7 +1432,7 @@ class WasmModuleBuilder {
// Add imports section
if (wasm.imports.length > 0) {
- if (debug) print("emitting imports @ " + binary.length);
+ if (debug) print('emitting imports @ ' + binary.length);
binary.emit_section(kImportSectionCode, section => {
section.emit_u32v(wasm.imports.length);
for (let imp of wasm.imports) {
@@ -1321,26 +1445,26 @@ class WasmModuleBuilder {
section.emit_type(imp.type);
section.emit_u8(imp.mutable);
} else if (imp.kind == kExternalMemory) {
- var has_max = (typeof imp.maximum) != "undefined";
- var is_shared = (typeof imp.shared) != "undefined";
+ var has_max = (typeof imp.maximum) != 'undefined';
+ var is_shared = (typeof imp.shared) != 'undefined';
if (is_shared) {
- section.emit_u8(has_max ? 3 : 2); // flags
+ section.emit_u8(has_max ? 3 : 2); // flags
} else {
- section.emit_u8(has_max ? 1 : 0); // flags
+ section.emit_u8(has_max ? 1 : 0); // flags
}
- section.emit_u32v(imp.initial); // initial
- if (has_max) section.emit_u32v(imp.maximum); // maximum
+ section.emit_u32v(imp.initial); // initial
+ if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalTable) {
section.emit_type(imp.type);
- var has_max = (typeof imp.maximum) != "undefined";
- section.emit_u8(has_max ? 1 : 0); // flags
- section.emit_u32v(imp.initial); // initial
- if (has_max) section.emit_u32v(imp.maximum); // maximum
+ var has_max = (typeof imp.maximum) != 'undefined';
+ section.emit_u8(has_max ? 1 : 0); // flags
+ section.emit_u32v(imp.initial); // initial
+ if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalException) {
section.emit_u32v(kExceptionAttribute);
section.emit_u32v(imp.type_index);
} else {
- throw new Error("unknown/unsupported import kind " + imp.kind);
+ throw new Error('unknown/unsupported import kind ' + imp.kind);
}
}
});
@@ -1348,7 +1472,7 @@ class WasmModuleBuilder {
// Add functions declarations
if (wasm.functions.length > 0) {
- if (debug) print("emitting function decls @ " + binary.length);
+ if (debug) print('emitting function decls @ ' + binary.length);
binary.emit_section(kFunctionSectionCode, section => {
section.emit_u32v(wasm.functions.length);
for (let func of wasm.functions) {
@@ -1359,7 +1483,7 @@ class WasmModuleBuilder {
// Add table section
if (wasm.tables.length > 0) {
- if (debug) print ("emitting tables @ " + binary.length);
+ if (debug) print('emitting tables @ ' + binary.length);
binary.emit_section(kTableSectionCode, section => {
section.emit_u32v(wasm.tables.length);
for (let table of wasm.tables) {
@@ -1367,13 +1491,18 @@ class WasmModuleBuilder {
section.emit_u8(table.has_max);
section.emit_u32v(table.initial_size);
if (table.has_max) section.emit_u32v(table.max_size);
+ if (table.has_init) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(table.init_func_index);
+ section.emit_u8(kExprEnd);
+ }
}
});
}
// Add memory section
if (wasm.memory !== undefined) {
- if (debug) print("emitting memory @ " + binary.length);
+ if (debug) print('emitting memory @ ' + binary.length);
binary.emit_section(kMemorySectionCode, section => {
section.emit_u8(1); // one memory entry
const has_max = wasm.memory.max !== undefined;
@@ -1398,7 +1527,7 @@ class WasmModuleBuilder {
// Add event section.
if (wasm.exceptions.length > 0) {
- if (debug) print("emitting events @ " + binary.length);
+ if (debug) print('emitting events @ ' + binary.length);
binary.emit_section(kExceptionSectionCode, section => {
section.emit_u32v(wasm.exceptions.length);
for (let type_index of wasm.exceptions) {
@@ -1410,55 +1539,55 @@ class WasmModuleBuilder {
// Add global section.
if (wasm.globals.length > 0) {
- if (debug) print ("emitting globals @ " + binary.length);
+ if (debug) print('emitting globals @ ' + binary.length);
binary.emit_section(kGlobalSectionCode, section => {
section.emit_u32v(wasm.globals.length);
for (let global of wasm.globals) {
section.emit_type(global.type);
section.emit_u8(global.mutable);
- if ((typeof global.init_index) == "undefined") {
+ if ((typeof global.init_index) == 'undefined') {
// Emit a constant initializer.
switch (global.type) {
- case kWasmI32:
- section.emit_u8(kExprI32Const);
- section.emit_u32v(global.init);
- break;
- case kWasmI64:
- section.emit_u8(kExprI64Const);
- section.emit_u64v(global.init);
- break;
- case kWasmF32:
- section.emit_bytes(wasmF32Const(global.init));
- break;
- case kWasmF64:
- section.emit_bytes(wasmF64Const(global.init));
- break;
- case kWasmS128:
- section.emit_bytes(wasmS128Const(global.init));
- break;
- case kWasmExternRef:
- section.emit_u8(kExprRefNull);
- section.emit_u8(kWasmExternRef);
- assertEquals(global.function_index, undefined);
- break;
- case kWasmAnyFunc:
- if (global.function_index !== undefined) {
- section.emit_u8(kExprRefFunc);
- section.emit_u32v(global.function_index);
- } else {
- section.emit_u8(kExprRefNull);
- section.emit_u8(kWasmAnyFunc);
- }
- break;
- default:
- if (global.function_index !== undefined) {
- section.emit_u8(kExprRefFunc);
- section.emit_u32v(global.function_index);
- } else {
+ case kWasmI32:
+ section.emit_u8(kExprI32Const);
+ section.emit_u32v(global.init);
+ break;
+ case kWasmI64:
+ section.emit_u8(kExprI64Const);
+ section.emit_u64v(global.init);
+ break;
+ case kWasmF32:
+ section.emit_bytes(wasmF32Const(global.init));
+ break;
+ case kWasmF64:
+ section.emit_bytes(wasmF64Const(global.init));
+ break;
+ case kWasmS128:
+ section.emit_bytes(wasmS128Const(global.init));
+ break;
+ case kWasmExternRef:
section.emit_u8(kExprRefNull);
- section.emit_u32v(global.type.index);
- }
- break;
+ section.emit_u8(kWasmExternRef);
+ assertEquals(global.function_index, undefined);
+ break;
+ case kWasmAnyFunc:
+ if (global.function_index !== undefined) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(global.function_index);
+ } else {
+ section.emit_u8(kExprRefNull);
+ section.emit_u8(kWasmAnyFunc);
+ }
+ break;
+ default:
+ if (global.function_index !== undefined) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(global.function_index);
+ } else {
+ section.emit_u8(kExprRefNull);
+ section.emit_u32v(global.type.index);
+ }
+ break;
}
} else {
// Emit a global-index initializer.
@@ -1474,7 +1603,7 @@ class WasmModuleBuilder {
var mem_export = (wasm.memory !== undefined && wasm.memory.exported);
var exports_count = wasm.exports.length + (mem_export ? 1 : 0);
if (exports_count > 0) {
- if (debug) print("emitting exports @ " + binary.length);
+ if (debug) print('emitting exports @ ' + binary.length);
binary.emit_section(kExportSectionCode, section => {
section.emit_u32v(exports_count);
for (let exp of wasm.exports) {
@@ -1483,7 +1612,7 @@ class WasmModuleBuilder {
section.emit_u32v(exp.index);
}
if (mem_export) {
- section.emit_string("memory");
+ section.emit_string('memory');
section.emit_u8(kExternalMemory);
section.emit_u8(0);
}
@@ -1492,7 +1621,7 @@ class WasmModuleBuilder {
// Add start function section.
if (wasm.start_index !== undefined) {
- if (debug) print("emitting start function @ " + binary.length);
+ if (debug) print('emitting start function @ ' + binary.length);
binary.emit_section(kStartSectionCode, section => {
section.emit_u32v(wasm.start_index);
});
@@ -1500,7 +1629,7 @@ class WasmModuleBuilder {
// Add element segments
if (wasm.element_segments.length > 0) {
- if (debug) print("emitting element segments @ " + binary.length);
+ if (debug) print('emitting element segments @ ' + binary.length);
binary.emit_section(kElementSectionCode, section => {
var inits = wasm.element_segments;
section.emit_u32v(inits.length);
@@ -1570,7 +1699,7 @@ class WasmModuleBuilder {
// If there are compilation hints add a custom section 'compilationHints'
// after the function section and before the code section.
if (wasm.compilation_hints.length > 0) {
- if (debug) print("emitting compilation hints @ " + binary.length);
+ if (debug) print('emitting compilation hints @ ' + binary.length);
// Build custom section payload.
let payloadBinary = new Binary();
let implicit_compilation_hints_count = wasm.functions.length;
@@ -1585,18 +1714,18 @@ class WasmModuleBuilder {
for (let i = 0; i < implicit_compilation_hints_count; i++) {
let index = wasm.num_imported_funcs + i;
var hintByte;
- if(index in wasm.compilation_hints) {
+ if (index in wasm.compilation_hints) {
let hint = wasm.compilation_hints[index];
- hintByte = hint.strategy | (hint.baselineTier << 2) |
- (hint.topTier << 4);
- } else{
+ hintByte =
+ hint.strategy | (hint.baselineTier << 2) | (hint.topTier << 4);
+ } else {
hintByte = defaultHintByte;
}
payloadBinary.emit_u8(hintByte);
}
// Finalize as custom section.
- let name = "compilationHints";
+ let name = 'compilationHints';
let bytes = this.createCustomSection(name, payloadBinary.trunc_buffer());
binary.emit_bytes(bytes);
}
@@ -1604,7 +1733,7 @@ class WasmModuleBuilder {
// Add function bodies.
if (wasm.functions.length > 0) {
// emit function bodies
- if (debug) print("emitting code @ " + binary.length);
+ if (debug) print('emitting code @ ' + binary.length);
let section_length = 0;
binary.emit_section(kCodeSectionCode, section => {
section.emit_u32v(wasm.functions.length);
@@ -1633,7 +1762,7 @@ class WasmModuleBuilder {
// Add data segments.
if (wasm.data_segments.length > 0) {
- if (debug) print("emitting data segments @ " + binary.length);
+ if (debug) print('emitting data segments @ ' + binary.length);
binary.emit_section(kDataSectionCode, section => {
section.emit_u32v(wasm.data_segments.length);
for (let seg of wasm.data_segments) {
@@ -1660,7 +1789,7 @@ class WasmModuleBuilder {
// Add any explicitly added sections
for (let exp of wasm.explicit) {
- if (debug) print("emitting explicit @ " + binary.length);
+ if (debug) print('emitting explicit @ ' + binary.length);
binary.emit_bytes(exp);
}
@@ -1703,7 +1832,7 @@ class WasmModuleBuilder {
name_section.emit_u32v(func.numLocalNames());
let name_index = 0;
for (let i = 0; i < func.local_names.length; ++i) {
- if (typeof func.local_names[i] == "string") {
+ if (typeof func.local_names[i] == 'string') {
name_section.emit_u32v(name_index);
name_section.emit_string(func.local_names[i]);
name_index++;
@@ -1772,8 +1901,8 @@ function wasmF64Const(f) {
// Write in little-endian order at offset 0.
data_view.setFloat64(0, f, true);
return [
- kExprF64Const, byte_view[0], byte_view[1], byte_view[2],
- byte_view[3], byte_view[4], byte_view[5], byte_view[6], byte_view[7]
+ kExprF64Const, byte_view[0], byte_view[1], byte_view[2], byte_view[3],
+ byte_view[4], byte_view[5], byte_view[6], byte_view[7]
];
}
diff --git a/deps/v8/test/mkgrokdump/BUILD.gn b/deps/v8/test/mkgrokdump/BUILD.gn
index 67f8f98e7a..1b06b87f97 100644
--- a/deps/v8/test/mkgrokdump/BUILD.gn
+++ b/deps/v8/test/mkgrokdump/BUILD.gn
@@ -19,7 +19,6 @@ v8_executable("mkgrokdump") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "../..:v8_wrappers",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index f31091f3d0..8cae693b58 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -578,12 +578,17 @@
'built-ins/String/prototype/at/*': [FAIL],
'built-ins/TypedArray/prototype/at/*': [FAIL],
- # Temporarily disabled until upstream tests are changed to use /d
- 'built-ins/RegExp/match-indices/*': [FAIL],
+ # http://crbug/v8/11530
+ 'built-ins/Function/internals/Call/class-ctor-realm': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=11411
- 'intl402/DateTimeFormat/prototype/formatRange/date-same-returns-single-date': [FAIL],
- 'intl402/DateTimeFormat/prototype/formatRangeToParts/date-same-returns-single-date': [FAIL],
+ # http://crbug/v8/11531
+ 'built-ins/RegExp/prototype/flags/get-order': [FAIL],
+
+ # http://crbug/v8/11532
+ 'language/expressions/object/dstr/object-rest-proxy-gopd-not-called-on-excluded-keys': [FAIL],
+
+ # http://crbug/v8/11533
+ 'language/statements/class/subclass/default-constructor-spread-override': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -734,8 +739,6 @@
'built-ins/ArrayBuffer/length-is-too-large-throws': [SKIP],
'built-ins/SharedArrayBuffer/allocation-limit': [SKIP],
'built-ins/SharedArrayBuffer/length-is-too-large-throws': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=11438
- 'intl402/DateTimeFormat/timezone-invalid' : [SKIP],
}], # asan == True or msan == True or tsan == True
['system == android', {
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 24e02607d6..42aeac4723 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -45,7 +45,6 @@ from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
'Intl.DateTimeFormat-dayPeriod': '--harmony-intl-dateformat-day-period',
- 'String.prototype.replaceAll': '--harmony_string_replaceall',
'Symbol.prototype.description': '--harmony-symbol-description',
'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
'WeakRef': '--harmony-weak-refs-with-cleanup-some',
@@ -55,8 +54,6 @@ FEATURE_FLAGS = {
'regexp-match-indices': '--harmony-regexp-match-indices',
# https://github.com/tc39/test262/pull/2395
'regexp-named-groups': '--harmony-regexp-match-indices',
- 'logical-assignment-operators': '--harmony-logical-assignment',
- 'Atomics.waitAsync': '--harmony-atomics-waitasync',
}
SKIPPED_FEATURES = set([])
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 1940dfa77e..43858603dc 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -10,7 +10,7 @@ if (is_fuchsia) {
cr_fuchsia_package("v8_unittests_pkg") {
testonly = true
binary = ":unittests"
- manifest = "//build/config/fuchsia/tests-with-exec.cmx"
+ manifest = "../../gni/v8.cmx"
package_name_override = "v8_unittests"
}
@@ -92,6 +92,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/cross-thread-persistent-unittest.cc",
"heap/cppgc/custom-spaces-unittest.cc",
"heap/cppgc/ephemeron-pair-unittest.cc",
+ "heap/cppgc/explicit-management-unittest.cc",
"heap/cppgc/finalizer-trait-unittest.cc",
"heap/cppgc/free-list-unittest.cc",
"heap/cppgc/garbage-collected-unittest.cc",
@@ -193,7 +194,6 @@ v8_source_set("unittests_sources") {
sources = [
"../../test/common/assembler-tester.h",
- "../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
"api/access-check-unittest.cc",
@@ -226,6 +226,8 @@ v8_source_set("unittests_sources") {
"base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
"base/vlq-base64-unittest.cc",
+ "base/vlq-unittest.cc",
+ "codegen/aligned-slot-allocator-unittest.cc",
"codegen/code-stub-assembler-unittest.cc",
"codegen/code-stub-assembler-unittest.h",
"codegen/register-configuration-unittest.cc",
@@ -246,16 +248,17 @@ v8_source_set("unittests_sources") {
"compiler/constant-folding-reducer-unittest.cc",
"compiler/control-equivalence-unittest.cc",
"compiler/control-flow-optimizer-unittest.cc",
+ "compiler/csa-load-elimination-unittest.cc",
"compiler/dead-code-elimination-unittest.cc",
"compiler/decompression-optimizer-unittest.cc",
"compiler/diamond-unittest.cc",
"compiler/effect-control-linearizer-unittest.cc",
+ "compiler/frame-unittest.cc",
"compiler/graph-reducer-unittest.cc",
"compiler/graph-reducer-unittest.h",
"compiler/graph-trimmer-unittest.cc",
"compiler/graph-unittest.cc",
"compiler/graph-unittest.h",
- "compiler/int64-lowering-unittest.cc",
"compiler/js-call-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
"compiler/js-intrinsic-lowering-unittest.cc",
@@ -308,7 +311,6 @@ v8_source_set("unittests_sources") {
"heap/heap-utils.cc",
"heap/heap-utils.h",
"heap/index-generator-unittest.cc",
- "heap/item-parallel-job-unittest.cc",
"heap/list-unittest.cc",
"heap/local-factory-unittest.cc",
"heap/local-heap-unittest.cc",
@@ -350,7 +352,6 @@ v8_source_set("unittests_sources") {
"logging/counters-unittest.cc",
"numbers/bigint-unittest.cc",
"numbers/conversions-unittest.cc",
- "objects/backing-store-unittest.cc",
"objects/object-unittest.cc",
"objects/osr-optimized-code-cache-unittest.cc",
"objects/value-serializer-unittest.cc",
@@ -381,21 +382,6 @@ v8_source_set("unittests_sources") {
"utils/locked-queue-unittest.cc",
"utils/utils-unittest.cc",
"utils/vector-unittest.cc",
- "wasm/control-transfer-unittest.cc",
- "wasm/decoder-unittest.cc",
- "wasm/function-body-decoder-unittest.cc",
- "wasm/leb-helper-unittest.cc",
- "wasm/loop-assignment-analysis-unittest.cc",
- "wasm/module-decoder-memory64-unittest.cc",
- "wasm/module-decoder-unittest.cc",
- "wasm/simd-shuffle-unittest.cc",
- "wasm/streaming-decoder-unittest.cc",
- "wasm/subtyping-unittest.cc",
- "wasm/wasm-code-manager-unittest.cc",
- "wasm/wasm-compiler-unittest.cc",
- "wasm/wasm-macro-gen-unittest.cc",
- "wasm/wasm-module-builder-unittest.cc",
- "wasm/wasm-module-sourcemap-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
@@ -403,8 +389,27 @@ v8_source_set("unittests_sources") {
if (v8_enable_webassembly) {
sources += [
+ "../../test/common/wasm/wasm-macro-gen.h",
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
+ "compiler/int64-lowering-unittest.cc",
+ "objects/wasm-backing-store-unittest.cc",
+ "wasm/control-transfer-unittest.cc",
+ "wasm/decoder-unittest.cc",
+ "wasm/function-body-decoder-unittest.cc",
+ "wasm/leb-helper-unittest.cc",
+ "wasm/liftoff-register-unittests.cc",
+ "wasm/loop-assignment-analysis-unittest.cc",
+ "wasm/module-decoder-memory64-unittest.cc",
+ "wasm/module-decoder-unittest.cc",
+ "wasm/simd-shuffle-unittest.cc",
+ "wasm/streaming-decoder-unittest.cc",
+ "wasm/subtyping-unittest.cc",
+ "wasm/wasm-code-manager-unittest.cc",
+ "wasm/wasm-compiler-unittest.cc",
+ "wasm/wasm-macro-gen-unittest.cc",
+ "wasm/wasm-module-builder-unittest.cc",
+ "wasm/wasm-module-sourcemap-unittest.cc",
]
}
@@ -450,8 +455,10 @@ v8_source_set("unittests_sources") {
sources += [
"assembler/turbo-assembler-x64-unittest.cc",
"compiler/x64/instruction-selector-x64-unittest.cc",
- "wasm/trap-handler-x64-unittest.cc",
]
+ if (v8_enable_webassembly) {
+ sources += [ "wasm/trap-handler-x64-unittest.cc" ]
+ }
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
"assembler/turbo-assembler-ppc-unittest.cc",
@@ -464,11 +471,11 @@ v8_source_set("unittests_sources") {
]
}
- if (is_posix) {
+ if (is_posix && v8_enable_webassembly) {
sources += [ "wasm/trap-handler-posix-unittest.cc" ]
}
- if (is_win) {
+ if (is_win && v8_enable_webassembly) {
sources += [ "wasm/trap-handler-win-unittest.cc" ]
}
@@ -484,14 +491,16 @@ v8_source_set("unittests_sources") {
"../..:v8_libbase",
"../..:v8_libplatform",
"../..:v8_shared_internal_headers",
- "../..:v8_wrappers",
- "../..:wasm_test_common",
"../../third_party/inspector_protocol:crdtp_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
]
+ if (v8_enable_webassembly) {
+ deps += [ "../..:wasm_test_common" ]
+ }
+
if (is_win) {
# This warning is benignly triggered by the U16 and U32 macros in
# bytecode-utils.h.
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index cdcce68efd..1216100b23 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "include/v8.h"
+#include "src/debug/debug.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index 1146bd5d83..bbfcfd984b 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -230,11 +230,26 @@ void operator<<(std::ostream& str, TestEnum6 val) {
TEST(LoggingDeathTest, OutputEnumWithOutputOperator) {
ASSERT_DEATH_IF_SUPPORTED(
([&] { CHECK_EQ(TEST_A, TEST_B); })(),
- FailureMessage("Check failed: TEST_A == TEST_B", "A", "B"));
+ FailureMessage("Check failed: TEST_A == TEST_B", "A (0)", "B (1)"));
ASSERT_DEATH_IF_SUPPORTED(
([&] { CHECK_GE(TestEnum6::TEST_C, TestEnum6::TEST_D); })(),
FailureMessage("Check failed: TestEnum6::TEST_C >= TestEnum6::TEST_D",
- "C", "D"));
+ "C (0)", "D (1)"));
+}
+
+enum TestEnum7 : uint8_t { A = 2, B = 7 };
+enum class TestEnum8 : int8_t { A, B };
+
+TEST(LoggingDeathTest, OutputSingleCharEnum) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ(TestEnum7::A, TestEnum7::B); })(),
+ FailureMessage("Check failed: TestEnum7::A == TestEnum7::B", "2", "7"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GT(TestEnum7::A, TestEnum7::B); })(),
+ FailureMessage("Check failed: TestEnum7::A > TestEnum7::B", "2", "7"));
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_GE(TestEnum8::A, TestEnum8::B); })(),
+ FailureMessage("Check failed: TestEnum8::A >= TestEnum8::B", "0", "1"));
}
TEST(LoggingDeathTest, OutputLongValues) {
@@ -328,6 +343,12 @@ TEST(LoggingTest, LogFunctionPointers) {
}
#endif // defined(DEBUG)
+TEST(LoggingDeathTest, CheckChars) {
+ ASSERT_DEATH_IF_SUPPORTED(
+ ([&] { CHECK_EQ('a', 'b'); })(),
+ FailureMessage("Check failed: 'a' == 'b'", "'97'", "'98'"));
+}
+
} // namespace logging_unittest
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/vlq-unittest.cc b/deps/v8/test/unittests/base/vlq-unittest.cc
new file mode 100644
index 0000000000..647873410a
--- /dev/null
+++ b/deps/v8/test/unittests/base/vlq-unittest.cc
@@ -0,0 +1,123 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/vlq.h"
+
+#include <cmath>
+#include <limits>
+
+#include "src/base/memory.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+int ExpectedBytesUsed(int64_t value, bool is_signed) {
+ uint64_t bits = value;
+ if (is_signed) {
+ bits = (value < 0 ? -value : value) << 1;
+ }
+ int num_bits = 0;
+ while (bits != 0) {
+ num_bits++;
+ bits >>= 1;
+ }
+ return std::max(1, static_cast<int>(ceil(static_cast<float>(num_bits) / 7)));
+}
+
+void TestVLQUnsignedEquals(uint32_t value) {
+ std::vector<byte> buffer;
+ VLQEncodeUnsigned(&buffer, value);
+ byte* data_start = buffer.data();
+ int index = 0;
+ int expected_bytes_used = ExpectedBytesUsed(value, false);
+ EXPECT_EQ(buffer.size(), static_cast<size_t>(expected_bytes_used));
+ EXPECT_EQ(value, VLQDecodeUnsigned(data_start, &index));
+ EXPECT_EQ(index, expected_bytes_used);
+}
+
+void TestVLQEquals(int32_t value) {
+ std::vector<byte> buffer;
+ VLQEncode(&buffer, value);
+ byte* data_start = buffer.data();
+ int index = 0;
+ int expected_bytes_used = ExpectedBytesUsed(value, true);
+ EXPECT_EQ(buffer.size(), static_cast<size_t>(expected_bytes_used));
+ EXPECT_EQ(value, VLQDecode(data_start, &index));
+ EXPECT_EQ(index, expected_bytes_used);
+}
+
+TEST(VLQ, Unsigned) {
+ TestVLQUnsignedEquals(0);
+ TestVLQUnsignedEquals(1);
+ TestVLQUnsignedEquals(63);
+ TestVLQUnsignedEquals(64);
+ TestVLQUnsignedEquals(127);
+ TestVLQUnsignedEquals(255);
+ TestVLQUnsignedEquals(256);
+}
+
+TEST(VLQ, Positive) {
+ TestVLQEquals(0);
+ TestVLQEquals(1);
+ TestVLQEquals(63);
+ TestVLQEquals(64);
+ TestVLQEquals(127);
+ TestVLQEquals(255);
+ TestVLQEquals(256);
+}
+
+TEST(VLQ, Negative) {
+ TestVLQEquals(-1);
+ TestVLQEquals(-63);
+ TestVLQEquals(-64);
+ TestVLQEquals(-127);
+ TestVLQEquals(-255);
+ TestVLQEquals(-256);
+}
+
+TEST(VLQ, LimitsUnsigned) {
+ TestVLQEquals(std::numeric_limits<uint8_t>::max());
+ TestVLQEquals(std::numeric_limits<uint8_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<uint8_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<uint16_t>::max());
+ TestVLQEquals(std::numeric_limits<uint16_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<uint16_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<uint32_t>::max());
+ TestVLQEquals(std::numeric_limits<uint32_t>::max() - 1);
+}
+
+TEST(VLQ, LimitsSigned) {
+ TestVLQEquals(std::numeric_limits<int8_t>::max());
+ TestVLQEquals(std::numeric_limits<int8_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<int8_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::max());
+ TestVLQEquals(std::numeric_limits<int16_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::max() + 1);
+ TestVLQEquals(std::numeric_limits<int32_t>::max());
+ TestVLQEquals(std::numeric_limits<int32_t>::max() - 1);
+ TestVLQEquals(std::numeric_limits<int8_t>::min());
+ TestVLQEquals(std::numeric_limits<int8_t>::min() - 1);
+ TestVLQEquals(std::numeric_limits<int8_t>::min() + 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::min());
+ TestVLQEquals(std::numeric_limits<int16_t>::min() - 1);
+ TestVLQEquals(std::numeric_limits<int16_t>::min() + 1);
+ // int32_t::min() is not supported.
+ TestVLQEquals(std::numeric_limits<int32_t>::min() + 1);
+}
+
+TEST(VLQ, Random) {
+ static constexpr int RANDOM_RUNS = 50;
+
+ base::RandomNumberGenerator rng(::testing::FLAGS_gtest_random_seed);
+ for (int i = 0; i < RANDOM_RUNS; ++i) {
+ TestVLQUnsignedEquals(rng.NextInt(std::numeric_limits<int32_t>::max()));
+ }
+ for (int i = 0; i < RANDOM_RUNS; ++i) {
+ TestVLQEquals(rng.NextInt());
+ }
+}
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc b/deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc
new file mode 100644
index 0000000000..3b04f7888a
--- /dev/null
+++ b/deps/v8/test/unittests/codegen/aligned-slot-allocator-unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/aligned-slot-allocator.h"
+
+#include "src/base/bits.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+class AlignedSlotAllocatorUnitTest : public ::testing::Test {
+ public:
+ AlignedSlotAllocatorUnitTest() = default;
+ ~AlignedSlotAllocatorUnitTest() override = default;
+
+ // Helper method to test AlignedSlotAllocator::Allocate.
+ void Allocate(int size, int expected) {
+ int next = allocator_.NextSlot(size);
+ int result = allocator_.Allocate(size);
+ EXPECT_EQ(next, result); // NextSlot/Allocate are consistent.
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(0, result & (size - 1)); // result is aligned to size.
+ int slot_end = result + static_cast<int>(base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(size)));
+ EXPECT_LE(slot_end, allocator_.Size()); // allocator Size is beyond slot.
+ }
+
+ // Helper method to test AlignedSlotAllocator::AllocateUnaligned.
+ void AllocateUnaligned(int size, int expected, int expected1, int expected2,
+ int expected4) {
+ int size_before = allocator_.Size();
+ int result = allocator_.AllocateUnaligned(size);
+ EXPECT_EQ(size_before, result); // AllocateUnaligned/Size are consistent.
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(result + size, allocator_.Size());
+ EXPECT_EQ(expected1, allocator_.NextSlot(1));
+ EXPECT_EQ(expected2, allocator_.NextSlot(2));
+ EXPECT_EQ(expected4, allocator_.NextSlot(4));
+ }
+
+ AlignedSlotAllocator allocator_;
+};
+
+TEST_F(AlignedSlotAllocatorUnitTest, NumSlotsForWidth) {
+ constexpr int kSlotBytes = AlignedSlotAllocator::kSlotSize;
+ for (int slot_size = 1; slot_size <= 4 * kSlotBytes; ++slot_size) {
+ EXPECT_EQ(AlignedSlotAllocator::NumSlotsForWidth(slot_size),
+ (slot_size + kSlotBytes - 1) / kSlotBytes);
+ }
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Allocate1) {
+ Allocate(1, 0);
+ EXPECT_EQ(2, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(1, 1);
+ EXPECT_EQ(2, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(1, 2);
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(1, 3);
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ // Make sure we use 1-fragments.
+ Allocate(1, 4);
+ Allocate(2, 6);
+ Allocate(1, 5);
+
+ // Make sure we use 2-fragments.
+ Allocate(2, 8);
+ Allocate(1, 10);
+ Allocate(1, 11);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Allocate2) {
+ Allocate(2, 0);
+ EXPECT_EQ(2, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ Allocate(2, 2);
+ EXPECT_EQ(4, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+
+ // Make sure we use 2-fragments.
+ Allocate(1, 4);
+ Allocate(2, 6);
+ Allocate(2, 8);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Allocate4) {
+ Allocate(4, 0);
+ EXPECT_EQ(4, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+
+ Allocate(1, 4);
+ Allocate(4, 8);
+
+ Allocate(2, 6);
+ Allocate(4, 12);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, AllocateUnaligned) {
+ AllocateUnaligned(1, 0, 1, 2, 4);
+ AllocateUnaligned(1, 1, 2, 2, 4);
+
+ Allocate(1, 2);
+
+ AllocateUnaligned(2, 3, 5, 6, 8);
+
+ // Advance to leave 1- and 2- fragments below Size.
+ Allocate(4, 8);
+
+ // AllocateUnaligned should allocate at the end, and clear fragments.
+ AllocateUnaligned(0, 12, 12, 12, 12);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, LargeAllocateUnaligned) {
+ AllocateUnaligned(11, 0, 11, 12, 12);
+ AllocateUnaligned(11, 11, 22, 22, 24);
+ AllocateUnaligned(13, 22, 35, 36, 36);
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Size) {
+ allocator_.Allocate(1);
+ EXPECT_EQ(1, allocator_.Size());
+ // Allocate 2, leaving a fragment at 1. Size should be at 4.
+ allocator_.Allocate(2);
+ EXPECT_EQ(4, allocator_.Size());
+ // Allocate should consume fragment.
+ EXPECT_EQ(1, allocator_.Allocate(1));
+ // Size should still be 4.
+ EXPECT_EQ(4, allocator_.Size());
+}
+
+TEST_F(AlignedSlotAllocatorUnitTest, Align) {
+ EXPECT_EQ(0, allocator_.Align(1));
+ EXPECT_EQ(0, allocator_.Size());
+
+ // Allocate 1 to become misaligned.
+ Allocate(1, 0);
+
+ // 4-align.
+ EXPECT_EQ(3, allocator_.Align(4));
+ EXPECT_EQ(4, allocator_.NextSlot(1));
+ EXPECT_EQ(4, allocator_.NextSlot(2));
+ EXPECT_EQ(4, allocator_.NextSlot(4));
+ EXPECT_EQ(4, allocator_.Size());
+
+ // Allocate 2 to become misaligned.
+ Allocate(2, 4);
+
+ // 4-align.
+ EXPECT_EQ(2, allocator_.Align(4));
+ EXPECT_EQ(8, allocator_.NextSlot(1));
+ EXPECT_EQ(8, allocator_.NextSlot(2));
+ EXPECT_EQ(8, allocator_.NextSlot(4));
+ EXPECT_EQ(8, allocator_.Size());
+
+ // No change when we're already aligned.
+ EXPECT_EQ(0, allocator_.Align(2));
+ EXPECT_EQ(8, allocator_.NextSlot(1));
+ EXPECT_EQ(8, allocator_.NextSlot(2));
+ EXPECT_EQ(8, allocator_.NextSlot(4));
+ EXPECT_EQ(8, allocator_.Size());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index 4aa4aaba2b..6137b3425a 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -13,7 +13,6 @@
#include "test/unittests/compiler/node-test-utils.h"
using ::testing::_;
-using v8::internal::compiler::Node;
namespace c = v8::internal::compiler;
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 6c9c6321cc..7a865c6d7f 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/common/globals.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
@@ -2165,10 +2166,7 @@ static const SIMDMulDPInst kSIMDMulDPInstructions[] = {
kArm64I32x4Mla, kArm64I32x4Mls, MachineType::Simd128()},
{"I16x8Mul", &MachineOperatorBuilder::I16x8Mul,
&MachineOperatorBuilder::I16x8Add, &MachineOperatorBuilder::I16x8Sub,
- kArm64I16x8Mla, kArm64I16x8Mls, MachineType::Simd128()},
- {"I8x16Mul", &MachineOperatorBuilder::I8x16Mul,
- &MachineOperatorBuilder::I8x16Add, &MachineOperatorBuilder::I8x16Sub,
- kArm64I8x16Mla, kArm64I8x16Mls, MachineType::Simd128()}};
+ kArm64I16x8Mla, kArm64I16x8Mls, MachineType::Simd128()}};
using InstructionSelectorSIMDDPWithSIMDMulTest =
InstructionSelectorTestWithParam<SIMDMulDPInst>;
@@ -2220,6 +2218,204 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorSIMDDPWithSIMDMulTest,
::testing::ValuesIn(kSIMDMulDPInstructions));
+struct SIMDMulDupInst {
+ const uint8_t shuffle[16];
+ int32_t lane;
+ int shuffle_input_index;
+};
+
+const SIMDMulDupInst kSIMDF32x4MulDuplInstructions[] = {
+ {
+ {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
+ 0,
+ 0,
+ },
+ {
+ {4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7},
+ 1,
+ 0,
+ },
+ {
+ {8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11},
+ 2,
+ 0,
+ },
+ {
+ {12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15},
+ 3,
+ 0,
+ },
+ {
+ {16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19, 16, 17, 18, 19},
+ 0,
+ 1,
+ },
+ {
+ {20, 21, 22, 23, 20, 21, 22, 23, 20, 21, 22, 23, 20, 21, 22, 23},
+ 1,
+ 1,
+ },
+ {
+ {24, 25, 26, 27, 24, 25, 26, 27, 24, 25, 26, 27, 24, 25, 26, 27},
+ 2,
+ 1,
+ },
+ {
+ {28, 29, 30, 31, 28, 29, 30, 31, 28, 29, 30, 31, 28, 29, 30, 31},
+ 3,
+ 1,
+ },
+};
+
+using InstructionSelectorSimdF32x4MulWithDupTest =
+ InstructionSelectorTestWithParam<SIMDMulDupInst>;
+
+TEST_P(InstructionSelectorSimdF32x4MulWithDupTest, MulWithDup) {
+ const SIMDMulDupInst param = GetParam();
+ const MachineType type = MachineType::Simd128();
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F32x4Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F32x4MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+
+ // Multiplication operator should be commutative, so test shuffle op as lhs.
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F32x4Mul(), shuffle, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F32x4MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSimdF32x4MulWithDupTest,
+ ::testing::ValuesIn(kSIMDF32x4MulDuplInstructions));
+
+TEST_F(InstructionSelectorTest, SimdF32x4MulWithDupNegativeTest) {
+ const MachineType type = MachineType::Simd128();
+ // Check that optimization does not match when the shuffle is not a f32x4.dup.
+ const uint8_t mask[kSimd128Size] = {0};
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode((m.machine()->I8x16Shuffle(mask)), m.Parameter(0),
+ m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F32x4Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // The shuffle is a i8x16.dup of lane 0.
+ EXPECT_EQ(kArm64S128Dup, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kArm64F32x4Mul, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
+const SIMDMulDupInst kSIMDF64x2MulDuplInstructions[] = {
+ {
+ {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7},
+ 0,
+ 0,
+ },
+ {
+ {8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15},
+ 1,
+ 0,
+ },
+ {
+ {16, 17, 18, 19, 20, 21, 22, 23, 16, 17, 18, 19, 20, 21, 22, 23},
+ 0,
+ 1,
+ },
+ {
+ {24, 25, 26, 27, 28, 29, 30, 31, 24, 25, 26, 27, 28, 29, 30, 31},
+ 1,
+ 1,
+ },
+};
+
+using InstructionSelectorSimdF64x2MulWithDupTest =
+ InstructionSelectorTestWithParam<SIMDMulDupInst>;
+
+TEST_P(InstructionSelectorSimdF64x2MulWithDupTest, MulWithDup) {
+ const SIMDMulDupInst param = GetParam();
+ const MachineType type = MachineType::Simd128();
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F64x2Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F64x2MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+
+ // Multiplication operator should be commutative, so test shuffle op as lhs.
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode(m.machine()->I8x16Shuffle(param.shuffle),
+ m.Parameter(0), m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F64x2Mul(), shuffle, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64F64x2MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(param.shuffle_input_index)),
+ s.ToVreg(s[0]->InputAt(1)));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSimdF64x2MulWithDupTest,
+ ::testing::ValuesIn(kSIMDF64x2MulDuplInstructions));
+
+TEST_F(InstructionSelectorTest, SimdF64x2MulWithDupNegativeTest) {
+ const MachineType type = MachineType::Simd128();
+ // Check that optimization does not match when the shuffle is not a f64x2.dup.
+ const uint8_t mask[kSimd128Size] = {0};
+ {
+ StreamBuilder m(this, type, type, type, type);
+ Node* shuffle = m.AddNode((m.machine()->I8x16Shuffle(mask)), m.Parameter(0),
+ m.Parameter(1));
+ m.Return(m.AddNode(m.machine()->F64x2Mul(), m.Parameter(2), shuffle));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ // The shuffle is a i8x16.dup of lane 0.
+ EXPECT_EQ(kArm64S128Dup, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kArm64F64x2Mul, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
@@ -2564,6 +2760,32 @@ TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnLeft) {
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
::testing::ValuesIn(kFPCmpInstructions));
+TEST_F(InstructionSelectorTest, Float32SelectWithRegisters) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* cond = m.Int32Constant(1);
+ m.Return(m.Float32Select(cond, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_select, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+TEST_F(InstructionSelectorTest, Float64SelectWithRegisters) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* cond = m.Int32Constant(1);
+ m.Return(m.Float64Select(cond, m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_select, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
// -----------------------------------------------------------------------------
// Conversions.
@@ -4543,6 +4765,42 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32Abd) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const fsub = m.Float32Sub(p0, p1);
+ Node* const fabs = m.Float32Abs(fsub);
+ m.Return(fabs);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Abd, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(fabs), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Abd) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const fsub = m.Float64Sub(p0, p1);
+ Node* const fabs = m.Float64Abs(fsub);
+ m.Return(fabs);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Abd, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(fabs), s.ToVreg(s[0]->Output()));
+}
+
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index 0c0214ce43..60ba115713 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -90,7 +90,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
if (input->IsImmediate()) {
auto imm = ImmediateOperand::cast(input);
- if (imm->type() == ImmediateOperand::INDEXED) {
+ if (imm->type() == ImmediateOperand::INDEXED_IMM) {
int index = imm->indexed_value();
s.immediates_.insert(
std::make_pair(index, sequence.GetImmediate(imm)));
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
index 05c4d04eac..203daca69f 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
@@ -267,8 +267,10 @@ class InstructionSelectorTest : public TestWithNativeContextAndZone {
} else {
EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
auto imm = ImmediateOperand::cast(operand);
- if (imm->type() == ImmediateOperand::INLINE) {
- return Constant(imm->inline_value());
+ if (imm->type() == ImmediateOperand::INLINE_INT32) {
+ return Constant(imm->inline_int32_value());
+ } else if (imm->type() == ImmediateOperand::INLINE_INT64) {
+ return Constant(imm->inline_int64_value());
}
i = immediates_.find(imm->indexed_value());
EXPECT_EQ(imm->indexed_value(), i->first);
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index c66685b710..3cfb050c79 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -344,7 +344,7 @@ InstructionOperand* InstructionSequenceTest::ConvertInputs(
InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
if (op.type_ == kImmediate) {
CHECK_EQ(op.vreg_.value_, kNoValue);
- return ImmediateOperand(ImmediateOperand::INLINE, op.value_);
+ return ImmediateOperand(ImmediateOperand::INLINE_INT32, op.value_);
}
CHECK_NE(op.vreg_.value_, kNoValue);
switch (op.type_) {
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 97ddd8ee52..cacff09652 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
-
#include "src/compiler/bytecode-analysis.h"
+
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
diff --git a/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
new file mode 100644
index 0000000000..87bbdf4041
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/csa-load-elimination-unittest.cc
@@ -0,0 +1,155 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/csa-load-elimination.h"
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CsaLoadEliminationTest : public GraphTest {
+ public:
+ CsaLoadEliminationTest()
+ : GraphTest(3),
+ simplified_(zone()),
+ machine_(zone()),
+ jsgraph_(isolate(), graph(), common(), nullptr, simplified(),
+ machine()),
+ reducer_(zone(), graph(), tick_counter(), broker()),
+ csa_(reducer(), jsgraph(), zone()),
+ mcr_(reducer(), jsgraph()) {
+ reducer()->AddReducer(&csa_);
+ reducer()->AddReducer(&mcr_);
+ }
+
+ ~CsaLoadEliminationTest() override = default;
+
+ protected:
+ JSGraph* jsgraph() { return &jsgraph_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ GraphReducer* reducer() { return &reducer_; }
+ Node* param1() {
+ return graph()->NewNode(common()->Parameter(1), graph()->start());
+ }
+ Node* constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+ }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+ MachineOperatorBuilder machine_;
+ JSGraph jsgraph_;
+ GraphReducer reducer_;
+ CsaLoadElimination csa_;
+ MachineOperatorReducer mcr_;
+};
+
+#define SETUP_SIMPLE_TEST(store_type, load_type, value_) \
+ Node* object = graph()->NewNode(common()->Parameter(0), graph()->start()); \
+ Node* offset = graph()->NewNode(common()->Int32Constant(5)); \
+ Node* value = value_; \
+ Node* control = graph()->start(); \
+ \
+ ObjectAccess store_access(MachineType::store_type(), kNoWriteBarrier); \
+ ObjectAccess load_access(MachineType::load_type(), kNoWriteBarrier); \
+ \
+ Node* store = \
+ graph()->NewNode(simplified()->StoreToObject(store_access), object, \
+ offset, value, graph()->start(), control); \
+ \
+ Node* load = graph()->NewNode(simplified()->LoadFromObject(load_access), \
+ object, offset, store, control); \
+ \
+ Node* ret = graph()->NewNode(common()->Return(0), load, load, control); \
+ \
+ graph()->end()->InsertInput(zone(), 0, ret); \
+ \
+ reducer()->ReduceGraph();
+
+TEST_F(CsaLoadEliminationTest, Int32) {
+ SETUP_SIMPLE_TEST(Int32, Int32, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kParameter);
+}
+
+TEST_F(CsaLoadEliminationTest, Int64) {
+ SETUP_SIMPLE_TEST(Int64, Int64, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kParameter);
+}
+
+TEST_F(CsaLoadEliminationTest, Int64_to_Int32) {
+ SETUP_SIMPLE_TEST(Int64, Int32, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kTruncateInt64ToInt32);
+}
+
+TEST_F(CsaLoadEliminationTest, Int16_to_Int16) {
+ SETUP_SIMPLE_TEST(Int16, Int16, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kWord32Sar);
+}
+
+TEST_F(CsaLoadEliminationTest, Int16_to_Uint8) {
+ SETUP_SIMPLE_TEST(Int16, Uint8, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kWord32And);
+}
+
+TEST_F(CsaLoadEliminationTest, Int8_to_Uint16) {
+ SETUP_SIMPLE_TEST(Int8, Uint16, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kLoadFromObject);
+}
+
+TEST_F(CsaLoadEliminationTest, Int8_to_Uint64) {
+ SETUP_SIMPLE_TEST(Int8, Uint64, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kLoadFromObject);
+}
+
+TEST_F(CsaLoadEliminationTest, Int32_to_Int64) {
+ SETUP_SIMPLE_TEST(Int32, Int64, param1())
+
+ EXPECT_EQ(ret->InputAt(0)->opcode(), IrOpcode::kLoadFromObject);
+}
+
+TEST_F(CsaLoadEliminationTest, Int16_constant) {
+ SETUP_SIMPLE_TEST(Int32, Int16, constant(0xfedcba98))
+
+ Int32Matcher m(ret->InputAt(0));
+
+ EXPECT_TRUE(m.HasResolvedValue());
+ EXPECT_EQ(m.ResolvedValue(), int32_t(0xffffba98));
+}
+
+TEST_F(CsaLoadEliminationTest, Uint8_constant) {
+ SETUP_SIMPLE_TEST(Int32, Uint8, constant(0xfedcba98))
+
+ Uint32Matcher m(ret->InputAt(0));
+
+ EXPECT_TRUE(m.HasResolvedValue());
+ EXPECT_EQ(m.ResolvedValue(), uint32_t(0x98));
+}
+
+#undef SETUP_SIMPLE_TEST
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/frame-unittest.cc b/deps/v8/test/unittests/compiler/frame-unittest.cc
new file mode 100644
index 0000000000..f74e4d34ec
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/frame-unittest.cc
@@ -0,0 +1,242 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame.h"
+
+#include "src/codegen/aligned-slot-allocator.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+constexpr int kSlotSize = AlignedSlotAllocator::kSlotSize;
+
+constexpr int kFixed1 = 1;
+constexpr int kFixed3 = 3;
+} // namespace
+
+class FrameTest : public ::testing::Test {
+ public:
+ FrameTest() = default;
+ ~FrameTest() override = default;
+};
+
+TEST_F(FrameTest, Constructor) {
+ Frame frame(kFixed3);
+ EXPECT_EQ(kFixed3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, ReserveSpillSlots) {
+ Frame frame(kFixed3);
+ constexpr int kReserve2 = 2;
+
+ frame.ReserveSpillSlots(kReserve2);
+ EXPECT_EQ(kFixed3 + kReserve2, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(kReserve2, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, EnsureReturnSlots) {
+ Frame frame(kFixed3);
+ constexpr int kReturn3 = 3;
+ constexpr int kReturn5 = 5;
+ constexpr int kReturn2 = 2;
+
+ frame.EnsureReturnSlots(kReturn3);
+ EXPECT_EQ(kFixed3 + kReturn3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn3, frame.GetReturnSlotCount());
+
+ // Returns should grow by 2 slots.
+ frame.EnsureReturnSlots(kReturn5);
+ EXPECT_EQ(kFixed3 + kReturn5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn5, frame.GetReturnSlotCount());
+
+ // Returns shouldn't grow.
+ frame.EnsureReturnSlots(kReturn2);
+ EXPECT_EQ(kFixed3 + kReturn5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn5, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSavedCalleeRegisterSlots) {
+ Frame frame(kFixed3);
+ constexpr int kFirstSlots = 2;
+ constexpr int kSecondSlots = 3;
+
+ frame.AllocateSavedCalleeRegisterSlots(kFirstSlots);
+ EXPECT_EQ(kFixed3 + kFirstSlots, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ frame.AllocateSavedCalleeRegisterSlots(kSecondSlots);
+ EXPECT_EQ(kFixed3 + kFirstSlots + kSecondSlots,
+ frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(0, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AlignSavedCalleeRegisterSlots) {
+ Frame frame(kFixed3);
+ constexpr int kSlots = 2; // An even number leaves the slots misaligned.
+
+ frame.AllocateSavedCalleeRegisterSlots(kSlots);
+
+ // Align, which should add 1 padding slot.
+ frame.AlignSavedCalleeRegisterSlots(2 * kSlotSize);
+ EXPECT_EQ(kFixed3 + kSlots + 1, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(1, frame.GetSpillSlotCount()); // padding
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Align again, which should not add a padding slot.
+ frame.AlignSavedCalleeRegisterSlots(2 * kSlotSize);
+ EXPECT_EQ(kFixed3 + kSlots + 1, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(1, frame.GetSpillSlotCount()); // padding
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotAligned) {
+ Frame frame(kFixed1);
+
+ // Allocate a quad slot, which must add 3 padding slots. Frame returns the
+ // last index of the 4 slot allocation.
+ int end = kFixed1 + 3 + 4;
+ int slot = kFixed1 + 3 + 4 - 1;
+ EXPECT_EQ(slot, frame.AllocateSpillSlot(4 * kSlotSize, 4 * kSlotSize));
+ EXPECT_EQ(end, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(end - kFixed1, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a double slot, which should leave the first padding slot and
+ // take the last two slots of padding.
+ slot = kFixed1 + 1 + 2 - 1;
+ EXPECT_EQ(slot, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
+ EXPECT_EQ(end, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(end - kFixed1, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a single slot, which should take the last padding slot.
+ slot = kFixed1;
+ EXPECT_EQ(slot, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(end, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(end - kFixed1, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotAlignedWithReturns) {
+ Frame frame(kFixed3);
+ constexpr int kReturn3 = 3;
+ constexpr int kReturn5 = 5;
+
+ frame.EnsureReturnSlots(kReturn3);
+
+ // Allocate a double slot, which must add 1 padding slot. This should occupy
+ // slots 4 and 5, and AllocateSpillSlot returns the last slot index.
+ EXPECT_EQ(kFixed3 + 2, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
+ EXPECT_EQ(kFixed3 + kReturn3 + 3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(3, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn3, frame.GetReturnSlotCount());
+
+ frame.EnsureReturnSlots(kReturn5);
+
+ // Allocate a single slot, which should take the padding slot.
+ EXPECT_EQ(kFixed3, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed3 + kReturn5 + 3, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(3, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn5, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotAndEndSpillArea) {
+ Frame frame(kFixed3);
+
+ // Allocate a double slot, which must add 1 padding slot.
+ EXPECT_EQ(kFixed3 + 2, frame.AllocateSpillSlot(2 * kSlotSize, 2 * kSlotSize));
+
+ // Allocate an unaligned double slot. This should be at the end.
+ EXPECT_EQ(kFixed3 + 4, frame.AllocateSpillSlot(2 * kSlotSize));
+ EXPECT_EQ(kFixed3 + 5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(5, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a single slot. This should not be the padding slot, since that
+ // area has been closed by the unaligned allocation.
+ EXPECT_EQ(kFixed3 + 5, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed3 + 6, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(6, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotOverAligned) {
+ Frame frame(kFixed1);
+
+ // Allocate a 4-aligned double slot, which must add 3 padding slots. This
+ // also terminates the slot area. Returns the starting slot in this case.
+ EXPECT_EQ(kFixed1 + 4, frame.AllocateSpillSlot(2 * kSlotSize, 4 * kSlotSize));
+ EXPECT_EQ(kFixed1 + 5, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(5, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+
+ // Allocate a single slot. This should not use any padding slot.
+ EXPECT_EQ(kFixed1 + 5, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed1 + 6, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(6, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AllocateSpillSlotUnderAligned) {
+ Frame frame(kFixed1);
+
+ // Allocate a 1-aligned double slot. This also terminates the slot area.
+ EXPECT_EQ(kFixed1 + 1, frame.AllocateSpillSlot(2 * kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed1 + 2, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed1, frame.GetFixedSlotCount());
+ EXPECT_EQ(2, frame.GetSpillSlotCount());
+ EXPECT_EQ(0, frame.GetReturnSlotCount());
+}
+
+TEST_F(FrameTest, AlignFrame) {
+ Frame frame(kFixed3);
+ constexpr int kReturn3 = 3;
+
+ frame.EnsureReturnSlots(kReturn3);
+
+ // Allocate two single slots, which leaves spill slots not 2-aligned.
+ EXPECT_EQ(kFixed3, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+ EXPECT_EQ(kFixed3 + 1, frame.AllocateSpillSlot(kSlotSize, kSlotSize));
+
+ // Align to 2 slots. This should pad the spill and return slot areas.
+ frame.AlignFrame(2 * kSlotSize);
+
+ EXPECT_EQ(kFixed3 + 3 + kReturn3 + 1, frame.GetTotalFrameSlotCount());
+ EXPECT_EQ(kFixed3, frame.GetFixedSlotCount());
+ EXPECT_EQ(3, frame.GetSpillSlotCount());
+ EXPECT_EQ(kReturn3 + 1, frame.GetReturnSlotCount());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 1a153eff9a..4478b36047 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -886,6 +886,50 @@ TEST_F(InstructionSelectorTest, SIMDSplatZero) {
}
}
+struct SwizzleConstants {
+ uint8_t shuffle[kSimd128Size];
+ bool omit_add;
+};
+
+static constexpr SwizzleConstants kSwizzleConstants[] = {
+ {
+ // all lanes < kSimd128Size
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ true,
+ },
+ {
+ // lanes that are >= kSimd128Size have top bit set
+ {12, 13, 14, 15, 0x90, 0x91, 0x92, 0x93, 0xA0, 0xA1, 0xA2, 0xA3, 0xFC,
+ 0xFD, 0xFE, 0xFF},
+ true,
+ },
+ {
+ {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27},
+ false,
+ },
+};
+
+using InstructionSelectorSIMDSwizzleConstantTest =
+ InstructionSelectorTestWithParam<SwizzleConstants>;
+
+TEST_P(InstructionSelectorSIMDSwizzleConstantTest, SimdSwizzleConstant) {
+ // Test optimization of swizzle with constant indices.
+ auto param = GetParam();
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
+ Node* const c = m.S128Const(param.shuffle);
+ Node* swizzle = m.AddNode(m.machine()->I8x16Swizzle(), m.Parameter(0), c);
+ m.Return(swizzle);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(kIA32I8x16Swizzle, s[1]->arch_opcode());
+ ASSERT_EQ(param.omit_add, s[1]->misc());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDSwizzleConstantTest,
+ ::testing::ValuesIn(kSwizzleConstants));
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index e6660b7823..8eafd4fe15 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -32,13 +32,15 @@ class Int64LoweringTest : public GraphTest {
Int64LoweringTest()
: GraphTest(),
machine_(zone(), MachineRepresentation::kWord32,
- MachineOperatorBuilder::Flag::kAllOptionalOps) {
+ MachineOperatorBuilder::Flag::kAllOptionalOps),
+ simplified_(zone()) {
value_[0] = 0x1234567890ABCDEF;
value_[1] = 0x1EDCBA098765432F;
value_[2] = 0x1133557799886644;
}
MachineOperatorBuilder* machine() { return &machine_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
@@ -46,7 +48,8 @@ class Int64LoweringTest : public GraphTest {
graph()->start(), graph()->start());
NodeProperties::MergeControlToEnd(graph(), common(), ret);
- Int64Lowering lowering(graph(), machine(), common(), zone(), signature);
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
+ signature);
lowering.LowerGraph();
}
@@ -64,7 +67,7 @@ class Int64LoweringTest : public GraphTest {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
sig_builder.AddReturn(rep);
- Int64Lowering lowering(graph(), machine(), common(), zone(),
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build(), std::move(special_case));
lowering.LowerGraph();
}
@@ -134,6 +137,7 @@ class Int64LoweringTest : public GraphTest {
private:
MachineOperatorBuilder machine_;
+ SimplifiedOperatorBuilder simplified_;
int64_t value_[3];
};
@@ -177,22 +181,64 @@ TEST_F(Int64LoweringTest, Int64Constant) {
start()));
#endif
-#define INT64_LOAD_LOWERING(kLoad) \
- int32_t base = 0x1234; \
- int32_t index = 0x5678; \
- \
- LowerGraph(graph()->NewNode(machine()->kLoad(MachineType::Int64()), \
- Int32Constant(base), Int32Constant(index), \
- start(), start()), \
- MachineRepresentation::kWord64); \
- \
- Capture<Node*> high_word_load; \
+#define INT64_LOAD_LOWERING(kLoad, param, builder) \
+ int32_t base = 0x1234; \
+ int32_t index = 0x5678; \
+ \
+ LowerGraph(graph()->NewNode(builder()->kLoad(param), Int32Constant(base), \
+ Int32Constant(index), start(), start()), \
+ MachineRepresentation::kWord64); \
+ \
+ Capture<Node*> high_word_load; \
LOAD_VERIFY(kLoad)
-TEST_F(Int64LoweringTest, Int64Load) { INT64_LOAD_LOWERING(Load); }
+TEST_F(Int64LoweringTest, Int64Load) {
+ INT64_LOAD_LOWERING(Load, MachineType::Int64(), machine);
+}
TEST_F(Int64LoweringTest, UnalignedInt64Load) {
- INT64_LOAD_LOWERING(UnalignedLoad);
+ INT64_LOAD_LOWERING(UnalignedLoad, MachineType::Int64(), machine);
+}
+
+TEST_F(Int64LoweringTest, Int64LoadFromObject) {
+ INT64_LOAD_LOWERING(LoadFromObject,
+ ObjectAccess(MachineType::Int64(), kNoWriteBarrier),
+ simplified);
+}
+
+TEST_F(Int64LoweringTest, Int64LoadImmutable) {
+ int32_t base = 0x1234;
+ int32_t index = 0x5678;
+
+ LowerGraph(graph()->NewNode(machine()->LoadImmutable(MachineType::Int64()),
+ Int32Constant(base), Int32Constant(index)),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> high_word_load;
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ Matcher<Node*> high_word_load_matcher =
+ IsLoadImmutable(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)));
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsLoadImmutable(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Constant(index)),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
+ start(), start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ Matcher<Node*> high_word_load_matcher = IsLoadImmutable(
+ MachineType::Int32(), IsInt32Constant(base), IsInt32Constant(index));
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsLoadImmutable(
+ MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4))),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
+ start(), start()));
+#endif
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -225,7 +271,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
start()));
#endif
-#define INT64_STORE_LOWERING(kStore, kRep32, kRep64) \
+#define INT64_STORE_LOWERING(kStore, kRep32, kRep64, builder) \
int32_t base = 1111; \
int32_t index = 2222; \
int32_t return_value = 0x5555; \
@@ -233,7 +279,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \
sig_builder.AddReturn(MachineRepresentation::kWord32); \
\
- Node* store = graph()->NewNode(machine()->kStore(kRep64), \
+ Node* store = graph()->NewNode(builder()->kStore(kRep64), \
Int32Constant(base), Int32Constant(index), \
Int64Constant(value(0)), start(), start()); \
\
@@ -243,7 +289,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
\
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\
- Int64Lowering lowering(graph(), machine(), common(), zone(), \
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
sig_builder.Build()); \
lowering.LowerGraph(); \
\
@@ -254,7 +300,7 @@ TEST_F(Int64LoweringTest, Int64Store) {
WriteBarrierKind::kNoWriteBarrier);
const StoreRepresentation rep32(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier);
- INT64_STORE_LOWERING(Store, rep32, rep64);
+ INT64_STORE_LOWERING(Store, rep32, rep64, machine);
}
TEST_F(Int64LoweringTest, Int32Store) {
@@ -277,7 +323,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
- Int64Lowering lowering(graph(), machine(), common(), zone(),
+ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build());
lowering.LowerGraph();
@@ -292,7 +338,13 @@ TEST_F(Int64LoweringTest, Int32Store) {
TEST_F(Int64LoweringTest, Int64UnalignedStore) {
const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64);
const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32);
- INT64_STORE_LOWERING(UnalignedStore, rep32, rep64);
+ INT64_STORE_LOWERING(UnalignedStore, rep32, rep64, machine);
+}
+
+TEST_F(Int64LoweringTest, Int64StoreToObject) {
+ const ObjectAccess access64(MachineType::Int64(), kNoWriteBarrier);
+ const ObjectAccess access32(MachineType::Int32(), kNoWriteBarrier);
+ INT64_STORE_LOWERING(StoreToObject, access32, access64, simplified);
}
TEST_F(Int64LoweringTest, Int64And) {
@@ -988,6 +1040,22 @@ TEST_F(Int64LoweringTest, LoopCycle) {
LowerGraph(load, MachineRepresentation::kWord64);
}
+TEST_F(Int64LoweringTest, LoopExitValue) {
+ Node* loop_header = graph()->NewNode(common()->Loop(1), graph()->start());
+ Node* loop_exit =
+ graph()->NewNode(common()->LoopExit(), loop_header, loop_header);
+ Node* exit =
+ graph()->NewNode(common()->LoopExitValue(MachineRepresentation::kWord64),
+ Int64Constant(value(2)), loop_exit);
+ LowerGraph(exit, MachineRepresentation::kWord64);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsLoopExitValue(MachineRepresentation::kWord32,
+ IsInt32Constant(low_word_value(2))),
+ IsLoopExitValue(MachineRepresentation::kWord32,
+ IsInt32Constant(high_word_value(2))),
+ start(), start()));
+}
+
TEST_F(Int64LoweringTest, WasmBigIntSpecialCaseBigIntToI64) {
Node* target = Int32Constant(1);
Node* context = Int32Constant(2);
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index ceed584d85..5a1f1ac8ab 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -169,8 +169,8 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to add one slot of padding to the callee arguments.
- int expected = kPadArguments ? 2 : 1;
+ // We might need to add padding slots to the callee arguments.
+ int expected = 1 + ArgumentPaddingSlots(1);
EXPECT_EQ(expected, stack_param_delta);
}
@@ -192,8 +192,8 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to drop one slot of padding from the caller's arguments.
- int expected = kPadArguments ? -2 : -1;
+ // We might need to drop padding slots from the caller's arguments.
+ int expected = -1 - ArgumentPaddingSlots(1);
EXPECT_EQ(expected, stack_param_delta);
}
@@ -329,8 +329,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to add one slot of padding to the callee arguments.
- int expected = kPadArguments ? 0 : -1;
+ // We might need to add padding slots to the callee arguments.
+ int expected = ArgumentPaddingSlots(1) - 1;
EXPECT_EQ(expected, stack_param_delta);
}
@@ -359,8 +359,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
EXPECT_TRUE(desc1->CanTailCall(CallDescriptorOf(node->op())));
int stack_param_delta = desc2->GetStackParameterDelta(desc1);
- // We might need to drop one slot of padding from the caller's arguments.
- int expected = kPadArguments ? 0 : 1;
+ // We might need to drop padding slots from the caller's arguments.
+ int expected = 1 - ArgumentPaddingSlots(1);
EXPECT_EQ(expected, stack_param_delta);
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index e53050ad55..edb2942084 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -306,6 +306,8 @@ const OptionalOperatorEntry kOptionalOperators[] = {
OPTIONAL_ENTRY(Float64RoundDown, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTruncate, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
+ OPTIONAL_ENTRY(Float64Select, 3, 0, 1), // --
+ OPTIONAL_ENTRY(Float32Select, 3, 0, 1), // --
#undef OPTIONAL_ENTRY
};
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index aeceabeffa..5305fef574 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -102,6 +102,36 @@ class IsBranchMatcher final : public TestNodeMatcher {
const Matcher<Node*> control_matcher_;
};
+class IsLoopExitValueMatcher final : public TestNodeMatcher {
+ public:
+ IsLoopExitValueMatcher(const Matcher<MachineRepresentation>& rep_matcher,
+ const Matcher<Node*>& value_matcher)
+ : TestNodeMatcher(IrOpcode::kLoopExitValue),
+ rep_matcher_(rep_matcher),
+ value_matcher_(value_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ TestNodeMatcher::DescribeTo(os);
+ *os << ") whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << " and value (";
+ value_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (TestNodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(LoopExitValueRepresentationOf(node->op()),
+ "representation", rep_matcher_, listener)) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "value",
+ value_matcher_, listener);
+ }
+
+ private:
+ const Matcher<MachineRepresentation> rep_matcher_;
+ const Matcher<Node*> value_matcher_;
+};
+
class IsSwitchMatcher final : public TestNodeMatcher {
public:
IsSwitchMatcher(const Matcher<Node*>& value_matcher,
@@ -1123,10 +1153,47 @@ LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject)
-#define STORE_MATCHER(kStore) \
+class IsLoadImmutableMatcher final : public TestNodeMatcher {
+ public:
+ IsLoadImmutableMatcher(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher)
+ : TestNodeMatcher(IrOpcode::kLoadImmutable),
+ rep_matcher_(rep_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ TestNodeMatcher::DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << ") and index (";
+ index_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ LoadRepresentation rep = LoadRepresentationOf(node->op());
+ return TestNodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(rep, "rep", rep_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "index",
+ index_matcher_, listener);
+ }
+
+ private:
+ const Matcher<LoadRepresentation> rep_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+};
+
+#define STORE_MATCHER(kStore, representation) \
class Is##kStore##Matcher final : public TestNodeMatcher { \
public: \
- Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \
+ Is##kStore##Matcher(const Matcher<representation>& rep_matcher, \
const Matcher<Node*>& base_matcher, \
const Matcher<Node*>& index_matcher, \
const Matcher<Node*>& value_matcher, \
@@ -1168,9 +1235,8 @@ LOAD_MATCHER(LoadFromObject)
control_node = NodeProperties::GetControlInput(node); \
} \
return (TestNodeMatcher::MatchAndExplain(node, listener) && \
- PrintMatchAndExplain( \
- OpParameter<kStore##Representation>(node->op()), "rep", \
- rep_matcher_, listener) && \
+ PrintMatchAndExplain(OpParameter<representation>(node->op()), \
+ "rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
"base", base_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
@@ -1184,7 +1250,7 @@ LOAD_MATCHER(LoadFromObject)
} \
\
private: \
- const Matcher<kStore##Representation> rep_matcher_; \
+ const Matcher<representation> rep_matcher_; \
const Matcher<Node*> base_matcher_; \
const Matcher<Node*> index_matcher_; \
const Matcher<Node*> value_matcher_; \
@@ -1192,8 +1258,9 @@ LOAD_MATCHER(LoadFromObject)
const Matcher<Node*> control_matcher_; \
};
-STORE_MATCHER(Store)
-STORE_MATCHER(UnalignedStore)
+STORE_MATCHER(Store, StoreRepresentation)
+STORE_MATCHER(UnalignedStore, UnalignedStoreRepresentation)
+STORE_MATCHER(StoreToObject, ObjectAccess)
class IsStackSlotMatcher final : public TestNodeMatcher {
public:
@@ -1556,6 +1623,10 @@ Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
control1_matcher, control2_matcher));
}
+Matcher<Node*> IsLoopExitValue(const Matcher<MachineRepresentation> rep_matcher,
+ const Matcher<Node*>& value_matcher) {
+ return MakeMatcher(new IsLoopExitValueMatcher(rep_matcher, value_matcher));
+}
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
@@ -2062,6 +2133,13 @@ Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
control_matcher));
}
+Matcher<Node*> IsLoadImmutable(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher) {
+ return MakeMatcher(
+ new IsLoadImmutableMatcher(rep_matcher, base_matcher, index_matcher));
+}
+
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -2083,6 +2161,17 @@ Matcher<Node*> IsUnalignedStore(
control_matcher));
}
+Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsStoreToObjectMatcher(
+ rep_matcher, base_matcher, index_matcher, value_matcher, effect_matcher,
+ control_matcher));
+}
+
Matcher<Node*> IsStackSlot(
const Matcher<StackSlotRepresentation>& rep_matcher) {
return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 42d6db82cf..d9afb36963 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -58,6 +58,8 @@ Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher,
const Matcher<Node*>& control2_matcher);
+Matcher<Node*> IsLoopExitValue(const Matcher<MachineRepresentation> rep_matcher,
+ const Matcher<Node*>& value_matcher);
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfSuccess(const Matcher<Node*>& control_matcher);
@@ -341,6 +343,9 @@ Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadImmutable(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -352,6 +357,12 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index fc04f419a0..4dcbbf2eb8 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -4,10 +4,15 @@
#include <limits>
+#include "src/common/globals.h"
#include "src/compiler/node-matchers.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/simd-shuffle.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -1888,6 +1893,9 @@ TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
}
}
+// -----------------------------------------------------------------------------
+// SIMD.
+
TEST_F(InstructionSelectorTest, SIMDSplatZero) {
// Test optimization for splat of contant 0.
// {i8x16,i16x8,i32x4,i64x2}.splat(const(0)) -> v128.zero().
@@ -1935,6 +1943,286 @@ TEST_F(InstructionSelectorTest, SIMDSplatZero) {
}
}
+#if V8_ENABLE_WEBASSEMBLY
+struct ArchShuffle {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode arch_opcode;
+ size_t input_count;
+};
+
+static constexpr ArchShuffle kArchShuffles[] = {
+ // These are architecture specific shuffles defined in
+ // instruction-selecor-x64.cc arch_shuffles.
+ {
+ {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23},
+ kX64S64x2UnpackLow,
+ 2,
+ },
+ {
+ {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31},
+ kX64S64x2UnpackHigh,
+ 2,
+ },
+ {
+ {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kX64S32x4UnpackLow,
+ 2,
+ },
+ {
+ {8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kX64S32x4UnpackHigh,
+ 2,
+ },
+ {
+ {0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kX64S16x8UnpackLow,
+ 2,
+ },
+ {
+ {8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kX64S16x8UnpackHigh,
+ 2,
+ },
+ {
+ {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kX64S8x16UnpackLow,
+ 2,
+ },
+ {
+ {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kX64S8x16UnpackHigh,
+ 2,
+ },
+ {
+ {0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kX64S16x8UnzipLow,
+ 2,
+ },
+ {
+ {2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kX64S16x8UnzipHigh,
+ 2,
+ },
+ {
+ {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kX64S8x16UnzipLow,
+ 2,
+ },
+ {
+ {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kX64S8x16UnzipHigh,
+ 2,
+ },
+ {
+ {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kX64S8x16TransposeLow,
+ 2,
+ },
+ {
+ {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kX64S8x16TransposeHigh,
+ 2,
+ },
+ {
+ {7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kX64S8x8Reverse,
+ 1,
+ },
+ {
+ {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kX64S8x4Reverse,
+ 1,
+ },
+ {
+ {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kX64S8x2Reverse,
+ 1,
+ },
+ // These are matched by TryMatchConcat && TryMatch32x4Rotate.
+ {
+ {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3},
+ kX64S32x4Rotate,
+ 2,
+ },
+ {
+ {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7},
+ kX64S32x4Rotate,
+ 2,
+ },
+ {
+ {12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+ kX64S32x4Rotate,
+ 2,
+ },
+ // These are matched by TryMatchConcat && !TryMatch32x4Rotate.
+ {
+ {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2},
+ kX64S8x16Alignr,
+ 3,
+ },
+ {
+ {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1},
+ kX64S8x16Alignr,
+ 3,
+ },
+ {
+ {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17},
+ kX64S8x16Alignr,
+ 3,
+ },
+ // These are matched by TryMatch32x4Shuffle && is_swizzle.
+ {
+ {0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15},
+ kX64S32x4Swizzle,
+ 2,
+ },
+ {
+ {0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15, 8, 9, 10, 11},
+ kX64S32x4Swizzle,
+ 2,
+ },
+ // These are matched by TryMatch32x4Shuffle && !is_swizzle && TryMatchBlend.
+ {
+ {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+ kX64S16x8Blend,
+ 3,
+ },
+ {
+ {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ kX64S16x8Blend,
+ 3,
+ },
+ // These are matched by TryMatch32x4Shuffle && !is_swizzle &&
+ // TryMatchShufps.
+ {
+ {0, 1, 2, 3, 8, 9, 10, 11, 28, 29, 30, 31, 28, 29, 30, 31},
+ kX64Shufps,
+ 3,
+ },
+ {
+ {8, 9, 10, 11, 0, 1, 2, 3, 28, 29, 30, 31, 28, 29, 30, 31},
+ kX64Shufps,
+ 3,
+ },
+ // These are matched by TryMatch32x4Shuffle && !is_swizzle.
+ {
+ {28, 29, 30, 31, 0, 1, 2, 3, 28, 29, 30, 31, 28, 29, 30, 31},
+ kX64S32x4Shuffle,
+ 4,
+ },
+ // These are matched by TryMatch16x8Shuffle && TryMatchBlend.
+ {
+ {16, 17, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ kX64S16x8Blend,
+ 3,
+ },
+ // These are matched by TryMatch16x8Shuffle && TryMatchSplat<8>.
+ {
+ {2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3},
+ kX64S16x8Dup,
+ 2,
+ },
+ // These are matched by TryMatch16x8Shuffle && TryMatch16x8HalfShuffle.
+ {
+ {6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kX64S16x8HalfShuffle1,
+ 3,
+ },
+ {
+ {6, 7, 4, 5, 2, 3, 0, 1, 30, 31, 28, 29, 26, 27, 24, 25},
+ kX64S16x8HalfShuffle2,
+ 5,
+ },
+ // These are matched by TryMatchSplat<16>.
+ {
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ kX64S8x16Dup,
+ 2,
+ },
+ // Generic shuffle that only uses 1 input.
+ {
+ {1, 15, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8},
+ kX64I8x16Shuffle,
+ 5,
+ },
+ // Generic shuffle that uses both input.
+ {
+ {1, 31, 2, 14, 3, 13, 4, 12, 5, 11, 6, 10, 7, 9, 8},
+ kX64I8x16Shuffle,
+ 6,
+ },
+};
+
+using InstructionSelectorSIMDArchShuffleTest =
+ InstructionSelectorTestWithParam<ArchShuffle>;
+
+TEST_P(InstructionSelectorSIMDArchShuffleTest, SIMDArchShuffle) {
+ MachineType type = MachineType::Simd128();
+ {
+ // Tests various shuffle optimizations
+ StreamBuilder m(this, type, type, type);
+ auto param = GetParam();
+ auto shuffle = param.shuffle;
+ const Operator* op = m.machine()->I8x16Shuffle(shuffle);
+ Node* n = m.AddNode(op, m.Parameter(0), m.Parameter(1));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(param.arch_opcode, s[0]->arch_opcode());
+ ASSERT_EQ(param.input_count, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDArchShuffleTest,
+ ::testing::ValuesIn(kArchShuffles));
+#endif // V8_ENABLE_WEBASSEMBLY
+
+struct SwizzleConstants {
+ uint8_t shuffle[kSimd128Size];
+ bool omit_add;
+};
+
+static constexpr SwizzleConstants kSwizzleConstants[] = {
+ {
+ // all lanes < kSimd128Size
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ true,
+ },
+ {
+ // lanes that are >= kSimd128Size have top bit set
+ {12, 13, 14, 15, 0x90, 0x91, 0x92, 0x93, 0xA0, 0xA1, 0xA2, 0xA3, 0xFC,
+ 0xFD, 0xFE, 0xFF},
+ true,
+ },
+ {
+ {12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27},
+ false,
+ },
+};
+
+using InstructionSelectorSIMDSwizzleConstantTest =
+ InstructionSelectorTestWithParam<SwizzleConstants>;
+
+TEST_P(InstructionSelectorSIMDSwizzleConstantTest, SimdSwizzleConstant) {
+ // Test optimization of swizzle with constant indices.
+ auto param = GetParam();
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
+ Node* const c = m.S128Const(param.shuffle);
+ Node* swizzle = m.AddNode(m.machine()->I8x16Swizzle(), m.Parameter(0), c);
+ m.Return(swizzle);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(kX64I8x16Swizzle, s[1]->arch_opcode());
+ ASSERT_EQ(param.omit_add, s[1]->misc());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDSwizzleConstantTest,
+ ::testing::ValuesIn(kSwizzleConstants));
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 84e29f2596..7db9261179 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -41,6 +41,7 @@ class WithFinalizationRegistryMixin : public TMixin {
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
+ FLAG_harmony_weak_refs = true;
FLAG_expose_gc = true;
FLAG_allow_natives_syntax = true;
TMixin::SetUpTestCase();
diff --git a/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
index 9298a77541..893b55f24b 100644
--- a/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
@@ -76,13 +76,6 @@ class CompactorTest : public testing::TestWithPlatform {
EXPECT_TRUE(compactor().IsEnabledForTesting());
}
- void CancelCompaction() {
- bool cancelled = compactor().CancelIfShouldNotCompact(
- GarbageCollector::Config::MarkingType::kAtomic,
- GarbageCollector::Config::StackState::kMayContainHeapPointers);
- EXPECT_TRUE(cancelled);
- }
-
void FinishCompaction() { compactor().CompactSpacesIfEnabled(); }
void StartGC() {
@@ -134,11 +127,6 @@ TEST_F(CompactorTest, NothingToCompact) {
heap()->stats_collector()->NotifySweepingCompleted();
}
-TEST_F(CompactorTest, CancelledNothingToCompact) {
- StartCompaction();
- CancelCompaction();
-}
-
TEST_F(CompactorTest, NonEmptySpaceAllLive) {
static constexpr int kNumObjects = 10;
Persistent<CompactableHolder<kNumObjects>> holder =
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
index c4aea68f15..3a8d0307d0 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
@@ -46,11 +46,6 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
return marker->IncrementalMarkingStepForTesting(stack_state);
}
- void FinishSteps(Config::StackState stack_state) {
- while (!SingleStep(stack_state)) {
- }
- }
-
void FinishGC() {
Heap* heap = Heap::From(GetHeap());
heap->marker()->SetMainThreadMarkingDisabledForTesting(false);
@@ -114,7 +109,7 @@ TEST_F(ConcurrentMarkingTest, MarkingObjects) {
*last_object = MakeGarbageCollected<GCed>(GetAllocationHandle());
last_object = &(*last_object)->child_;
}
- // Use SignleStep to re-post concurrent jobs.
+ // Use SingleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
@@ -133,7 +128,7 @@ TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
last_object = &(*last_object)->child_;
});
}
- // Use SignleStep to re-post concurrent jobs.
+ // Use SingleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
@@ -149,7 +144,7 @@ TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
*last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
last_object = &(*last_object)->child_;
}
- // Use SignleStep to re-post concurrent jobs.
+ // Use SingleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
index 4541216b1f..b03f3388d5 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
@@ -132,7 +132,7 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
// The corresponding page could be removed.
if (!backend->Lookup(static_cast<ConstAddress>(object))) continue;
- if (!freelist.Contains({object, 0})) return false;
+ if (!freelist.ContainsForTesting({object, 0})) return false;
}
return true;
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
index 33adc71ca6..32a5929fe4 100644
--- a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -5,6 +5,7 @@
#include "include/cppgc/ephemeron-pair.h"
#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marking-visitor.h"
@@ -21,7 +22,7 @@ class GCed : public GarbageCollected<GCed> {
void Trace(cppgc::Visitor*) const {}
};
-class EphemeronHolder : public GarbageCollected<GCed> {
+class EphemeronHolder : public GarbageCollected<EphemeronHolder> {
public:
EphemeronHolder(GCed* key, GCed* value) : ephemeron_pair_(key, value) {}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
@@ -168,5 +169,75 @@ TEST_F(EphemeronPairGCTest, EphemeronPairValueIsCleared) {
EXPECT_EQ(nullptr, holder->ephemeron_pair().value.Get());
}
+namespace {
+
+class Mixin : public GarbageCollectedMixin {
+ public:
+ void Trace(Visitor* v) const override {}
+};
+
+class OtherMixin : public GarbageCollectedMixin {
+ public:
+ void Trace(Visitor* v) const override {}
+};
+
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>,
+ public OtherMixin,
+ public Mixin {
+ public:
+ void Trace(Visitor* v) const override {
+ OtherMixin::Trace(v);
+ Mixin::Trace(v);
+ }
+};
+
+class EphemeronHolderWithMixins
+ : public GarbageCollected<EphemeronHolderWithMixins> {
+ public:
+ EphemeronHolderWithMixins(Mixin* key, Mixin* value)
+ : ephemeron_pair_(key, value) {}
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
+
+ const EphemeronPair<Mixin, Mixin>& ephemeron_pair() const {
+ return ephemeron_pair_;
+ }
+
+ private:
+ EphemeronPair<Mixin, Mixin> ephemeron_pair_;
+};
+
+} // namespace
+
+TEST_F(EphemeronPairTest, EphemeronPairWithMixinKey) {
+ GCedWithMixin* key =
+ MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ GCedWithMixin* value =
+ MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ Persistent<EphemeronHolderWithMixins> holder =
+ MakeGarbageCollected<EphemeronHolderWithMixins>(GetAllocationHandle(),
+ key, value);
+ EXPECT_NE(static_cast<void*>(key), holder->ephemeron_pair().key.Get());
+ EXPECT_NE(static_cast<void*>(value), holder->ephemeron_pair().value.Get());
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishSteps();
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(value).IsMarked());
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(key).TryMarkAtomic());
+ FinishMarking();
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+TEST_F(EphemeronPairTest, EphemeronPairWithEmptyMixinValue) {
+ GCedWithMixin* key =
+ MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
+ Persistent<EphemeronHolderWithMixins> holder =
+ MakeGarbageCollected<EphemeronHolderWithMixins>(GetAllocationHandle(),
+ key, nullptr);
+ EXPECT_NE(static_cast<void*>(key), holder->ephemeron_pair().key.Get());
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(key).TryMarkAtomic());
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishSteps();
+ FinishMarking();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc b/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
new file mode 100644
index 0000000000..6ca8569b3f
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/explicit-management-unittest.cc
@@ -0,0 +1,194 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/explicit-management.h"
+
+#include "include/cppgc/garbage-collected.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/sweeper.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+class ExplicitManagementTest : public testing::TestWithHeap {
+ public:
+ size_t AllocatedObjectSize() const {
+ auto* heap = Heap::From(GetHeap());
+ heap->stats_collector()->NotifySafePointForTesting();
+ return heap->stats_collector()->allocated_object_size();
+ }
+
+ void ResetLinearAllocationBuffers() const {
+ return Heap::From(GetHeap())
+ ->object_allocator()
+ .ResetLinearAllocationBuffers();
+ }
+
+ void TearDown() override {
+ PreciseGC();
+ TestWithHeap::TearDown();
+ }
+};
+
+namespace {
+
+class DynamicallySized final : public GarbageCollected<DynamicallySized> {
+ public:
+ void Trace(Visitor*) const {}
+};
+
+} // namespace
+
+TEST_F(ExplicitManagementTest, FreeRegularObjectToLAB) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ const auto& lab = space->linear_allocation_buffer();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ const size_t size = header.GetSize();
+ Address needle = reinterpret_cast<Address>(&header);
+ // Test checks freeing to LAB.
+ ASSERT_EQ(lab.start(), header.PayloadEnd());
+ const size_t lab_size_before_free = lab.size();
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(lab.start(), reinterpret_cast<Address>(needle));
+ EXPECT_EQ(lab_size_before_free + size, lab.size());
+ // LAB is included in allocated object size, so no change is expected.
+ EXPECT_EQ(allocated_size_before, AllocatedObjectSize());
+ EXPECT_FALSE(space->free_list().ContainsForTesting({needle, size}));
+}
+
+TEST_F(ExplicitManagementTest, FreeRegularObjectToFreeList) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ const auto& lab = space->linear_allocation_buffer();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ const size_t size = header.GetSize();
+ Address needle = reinterpret_cast<Address>(&header);
+ // Test checks freeing to free list.
+ ResetLinearAllocationBuffers();
+ ASSERT_EQ(lab.start(), nullptr);
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(lab.start(), nullptr);
+ EXPECT_EQ(allocated_size_before - size, AllocatedObjectSize());
+ EXPECT_TRUE(space->free_list().ContainsForTesting({needle, size}));
+}
+
+TEST_F(ExplicitManagementTest, FreeLargeObject) {
+ auto* o = MakeGarbageCollected<DynamicallySized>(
+ GetHeap()->GetAllocationHandle(),
+ AdditionalBytes(kLargeObjectSizeThreshold));
+ const auto* page = BasePage::FromPayload(o);
+ auto* heap = page->heap();
+ ASSERT_TRUE(page->is_large());
+ ConstAddress needle = reinterpret_cast<ConstAddress>(o);
+ const size_t size = LargePage::From(page)->PayloadSize();
+ EXPECT_TRUE(heap->page_backend()->Lookup(needle));
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_FALSE(heap->page_backend()->Lookup(needle));
+ EXPECT_EQ(allocated_size_before - size, AllocatedObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, FreeBailsOutDuringGC) {
+ const size_t snapshot_before = AllocatedObjectSize();
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ auto* heap = BasePage::FromPayload(o)->heap();
+ heap->SetInAtomicPauseForTesting(true);
+ const size_t allocated_size_before = AllocatedObjectSize();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(allocated_size_before, AllocatedObjectSize());
+ heap->SetInAtomicPauseForTesting(false);
+ ResetLinearAllocationBuffers();
+ subtle::FreeUnreferencedObject(o);
+ EXPECT_EQ(snapshot_before, AllocatedObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, FreeNull) {
+ DynamicallySized* o = nullptr;
+ // Noop.
+ subtle::FreeUnreferencedObject(o);
+}
+
+TEST_F(ExplicitManagementTest, GrowAtLAB) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ constexpr size_t kFirstDelta = 8;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kFirstDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kFirstDelta),
+ header.ObjectSize());
+ constexpr size_t kSecondDelta = 9;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kSecondDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kSecondDelta),
+ header.ObjectSize());
+ // Second round didn't actually grow object because alignment restrictions
+ // already forced it to be large enough on the first Grow().
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kFirstDelta),
+ RoundUp<kAllocationGranularity>(size_of_o + kSecondDelta));
+ constexpr size_t kThirdDelta = 16;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kThirdDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kThirdDelta),
+ header.ObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, GrowShrinkAtLAB) {
+ auto* o =
+ MakeGarbageCollected<DynamicallySized>(GetHeap()->GetAllocationHandle());
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ constexpr size_t kDelta = 27;
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(kDelta)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o + kDelta),
+ header.ObjectSize());
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(0)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o), header.ObjectSize());
+}
+
+TEST_F(ExplicitManagementTest, ShrinkFreeList) {
+ auto* o = MakeGarbageCollected<DynamicallySized>(
+ GetHeap()->GetAllocationHandle(),
+ AdditionalBytes(ObjectAllocator::kSmallestSpaceSize));
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ // Force returning to free list by removing the LAB.
+ ResetLinearAllocationBuffers();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(0)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(size_of_o), header.ObjectSize());
+ EXPECT_TRUE(space->free_list().ContainsForTesting(
+ {header.PayloadEnd(), ObjectAllocator::kSmallestSpaceSize}));
+}
+
+TEST_F(ExplicitManagementTest, ShrinkFreeListBailoutAvoidFragmentation) {
+ auto* o = MakeGarbageCollected<DynamicallySized>(
+ GetHeap()->GetAllocationHandle(),
+ AdditionalBytes(ObjectAllocator::kSmallestSpaceSize - 1));
+ const auto* space = NormalPageSpace::From(BasePage::FromPayload(o)->space());
+ // Force returning to free list by removing the LAB.
+ ResetLinearAllocationBuffers();
+ auto& header = HeapObjectHeader::FromPayload(o);
+ constexpr size_t size_of_o = sizeof(DynamicallySized);
+ EXPECT_TRUE(subtle::Resize(*o, AdditionalBytes(0)));
+ EXPECT_EQ(RoundUp<kAllocationGranularity>(
+ size_of_o + ObjectAllocator::kSmallestSpaceSize - 1),
+ header.ObjectSize());
+ EXPECT_FALSE(space->free_list().ContainsForTesting(
+ {header.Payload() + RoundUp<kAllocationGranularity>(size_of_o),
+ ObjectAllocator::kSmallestSpaceSize - 1}));
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc b/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
index e059734cf9..c134877a2a 100644
--- a/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/free-list-unittest.cc
@@ -151,7 +151,7 @@ TEST(FreeListTest, Contains) {
FreeList list = CreatePopulatedFreeList(blocks);
for (const auto& block : blocks) {
- EXPECT_TRUE(list.Contains({block.Address(), block.Size()}));
+ EXPECT_TRUE(list.ContainsForTesting({block.Address(), block.Size()}));
}
}
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
index 3d951dc6cf..da02212176 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
@@ -4,6 +4,8 @@
#include "include/cppgc/internal/gc-info.h"
+#include <type_traits>
+
#include "include/cppgc/platform.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
@@ -18,56 +20,72 @@ namespace {
constexpr GCInfo GetEmptyGCInfo() { return {nullptr, nullptr, nullptr, false}; }
+class GCInfoTableTest : public ::testing::Test {
+ public:
+ GCInfoIndex RegisterNewGCInfoForTesting(const GCInfo& info) {
+ // Unused registered index will result in registering a new index.
+ std::atomic<GCInfoIndex> registered_index{0};
+ return table().RegisterNewGCInfo(registered_index, info);
+ }
+
+ void SetUp() override {
+ table_ = std::make_unique<GCInfoTable>(&page_allocator_);
+ }
+
+ void TearDown() override { table_.reset(); }
+
+ GCInfoTable& table() { return *table_; }
+ const GCInfoTable& table() const { return *table_; }
+
+ private:
+ v8::base::PageAllocator page_allocator_;
+ std::unique_ptr<GCInfoTable> table_;
+};
+
+using GCInfoTableDeathTest = GCInfoTableTest;
+
} // namespace
-TEST(GCInfoTableTest, InitialEmpty) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
- EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfos());
+TEST_F(GCInfoTableTest, InitialEmpty) {
+ EXPECT_EQ(GCInfoTable::kMinIndex, table().NumberOfGCInfos());
}
-TEST(GCInfoTableTest, ResizeToMaxIndex) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
+TEST_F(GCInfoTableTest, ResizeToMaxIndex) {
GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
- GCInfoIndex index = table.RegisterNewGCInfo(info);
+ GCInfoIndex index = RegisterNewGCInfoForTesting(info);
EXPECT_EQ(i, index);
}
}
-TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
+TEST_F(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
GCInfo info = GetEmptyGCInfo();
// Create GCInfoTable::kMaxIndex entries.
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
- table.RegisterNewGCInfo(info);
+ RegisterNewGCInfoForTesting(info);
}
- EXPECT_DEATH_IF_SUPPORTED(table.RegisterNewGCInfo(info), "");
+ EXPECT_DEATH_IF_SUPPORTED(RegisterNewGCInfoForTesting(info), "");
}
-TEST(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
+TEST_F(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
GCInfo info = GetEmptyGCInfo();
// Use up all slots until limit.
- GCInfoIndex limit = table.LimitForTesting();
+ GCInfoIndex limit = table().LimitForTesting();
// Bail out if initial limit is already the maximum because of large committed
// pages. In this case, nothing can be comitted as read-only.
if (limit == GCInfoTable::kMaxIndex) {
return;
}
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < limit; i++) {
- table.RegisterNewGCInfo(info);
+ RegisterNewGCInfoForTesting(info);
}
- EXPECT_EQ(limit, table.LimitForTesting());
- table.RegisterNewGCInfo(info);
- EXPECT_NE(limit, table.LimitForTesting());
+ EXPECT_EQ(limit, table().LimitForTesting());
+ RegisterNewGCInfoForTesting(info);
+ EXPECT_NE(limit, table().LimitForTesting());
// Old area is now read-only.
- auto& first_slot = table.TableSlotForTesting(GCInfoTable::kMinIndex);
+ auto& first_slot = table().TableSlotForTesting(GCInfoTable::kMinIndex);
EXPECT_DEATH_IF_SUPPORTED(first_slot.finalize = nullptr, "");
}
@@ -75,27 +93,27 @@ namespace {
class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
public:
- ThreadRegisteringGCInfoObjects(GCInfoTable* table,
+ ThreadRegisteringGCInfoObjects(GCInfoTableTest* test,
GCInfoIndex num_registrations)
: v8::base::Thread(Options("Thread registering GCInfo objects.")),
- table_(table),
+ test_(test),
num_registrations_(num_registrations) {}
void Run() final {
GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = 0; i < num_registrations_; i++) {
- table_->RegisterNewGCInfo(info);
+ test_->RegisterNewGCInfoForTesting(info);
}
}
private:
- GCInfoTable* table_;
+ GCInfoTableTest* test_;
GCInfoIndex num_registrations_;
};
} // namespace
-TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
+TEST_F(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
constexpr size_t num_threads = 4;
constexpr size_t main_thread_initialized = 2;
constexpr size_t gc_infos_to_register =
@@ -105,17 +123,14 @@ TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
"must sum up to kMaxIndex");
constexpr size_t gc_infos_per_thread = gc_infos_to_register / num_threads;
- v8::base::PageAllocator page_allocator;
- GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
for (size_t i = 0; i < main_thread_initialized; i++) {
- table.RegisterNewGCInfo(info);
+ RegisterNewGCInfoForTesting(info);
}
v8::base::Thread* threads[num_threads];
for (size_t i = 0; i < num_threads; i++) {
- threads[i] =
- new ThreadRegisteringGCInfoObjects(&table, gc_infos_per_thread);
+ threads[i] = new ThreadRegisteringGCInfoObjects(this, gc_infos_per_thread);
}
for (size_t i = 0; i < num_threads; i++) {
CHECK(threads[i]->Start());
@@ -161,5 +176,110 @@ TEST_F(GCInfoTraitTest, TraitReturnsDifferentIndexForDifferentTypes) {
EXPECT_NE(index1, index2);
}
+namespace {
+
+struct Dummy {};
+
+class BaseWithVirtualDestructor
+ : public GarbageCollected<BaseWithVirtualDestructor> {
+ public:
+ virtual ~BaseWithVirtualDestructor() = default;
+ void Trace(Visitor*) const {}
+
+ private:
+ std::unique_ptr<Dummy> non_trivially_destructible_;
+};
+
+class ChildOfBaseWithVirtualDestructor : public BaseWithVirtualDestructor {
+ public:
+ ~ChildOfBaseWithVirtualDestructor() override = default;
+};
+
+static_assert(std::has_virtual_destructor<BaseWithVirtualDestructor>::value,
+ "Must have virtual destructor.");
+static_assert(!std::is_trivially_destructible<BaseWithVirtualDestructor>::value,
+ "Must not be trivially destructible");
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfBaseWithVirtualDestructor,
+ ChildOfBaseWithVirtualDestructor::
+ ParentMostGarbageCollectedType>::ResultType,
+ ChildOfBaseWithVirtualDestructor>::value,
+ "No folding to preserve object names");
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfBaseWithVirtualDestructor,
+ ChildOfBaseWithVirtualDestructor::
+ ParentMostGarbageCollectedType>::ResultType,
+ BaseWithVirtualDestructor>::value,
+ "Must fold into base as base has virtual destructor.");
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+class TriviallyDestructibleBase
+ : public GarbageCollected<TriviallyDestructibleBase> {
+ public:
+ virtual void Trace(Visitor*) const {}
+};
+
+class ChildOfTriviallyDestructibleBase : public TriviallyDestructibleBase {};
+
+static_assert(!std::has_virtual_destructor<TriviallyDestructibleBase>::value,
+ "Must not have virtual destructor.");
+static_assert(std::is_trivially_destructible<TriviallyDestructibleBase>::value,
+ "Must be trivially destructible");
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfTriviallyDestructibleBase,
+ ChildOfTriviallyDestructibleBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ ChildOfTriviallyDestructibleBase>::value,
+ "No folding to preserve object names");
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfTriviallyDestructibleBase,
+ ChildOfTriviallyDestructibleBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ TriviallyDestructibleBase>::value,
+ "Must fold into base as both are trivially destructible.");
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+class TypeWithCustomFinalizationMethodAtBase
+ : public GarbageCollected<TypeWithCustomFinalizationMethodAtBase> {
+ public:
+ void FinalizeGarbageCollectedObject() {}
+ void Trace(Visitor*) const {}
+
+ private:
+ std::unique_ptr<Dummy> non_trivially_destructible_;
+};
+
+class ChildOfTypeWithCustomFinalizationMethodAtBase
+ : public TypeWithCustomFinalizationMethodAtBase {};
+
+static_assert(
+ !std::has_virtual_destructor<TypeWithCustomFinalizationMethodAtBase>::value,
+ "Must not have virtual destructor.");
+static_assert(!std::is_trivially_destructible<
+ TypeWithCustomFinalizationMethodAtBase>::value,
+ "Must not be trivially destructible");
+#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(
+ std::is_same<typename internal::GCInfoFolding<
+ ChildOfTypeWithCustomFinalizationMethodAtBase,
+ ChildOfTypeWithCustomFinalizationMethodAtBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ ChildOfTypeWithCustomFinalizationMethodAtBase>::value,
+ "No folding to preserve object names");
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+static_assert(std::is_same<typename internal::GCInfoFolding<
+ ChildOfTypeWithCustomFinalizationMethodAtBase,
+ ChildOfTypeWithCustomFinalizationMethodAtBase::
+ ParentMostGarbageCollectedType>::ResultType,
+ TypeWithCustomFinalizationMethodAtBase>::value,
+ "Must fold into base as base has custom finalizer dispatch.");
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+
+} // namespace
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
index 2621af2891..11f4498aa0 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
@@ -35,6 +35,14 @@ TEST(HeapObjectHeaderTest, Payload) {
header.Payload());
}
+TEST(HeapObjectHeaderTest, PayloadEnd) {
+ constexpr GCInfoIndex kGCInfoIndex = 17;
+ constexpr size_t kSize = kAllocationGranularity;
+ HeapObjectHeader header(kSize, kGCInfoIndex);
+ EXPECT_EQ(reinterpret_cast<ConstAddress>(&header) + kSize,
+ header.PayloadEnd());
+}
+
TEST(HeapObjectHeaderTest, GetGCInfoIndex) {
constexpr GCInfoIndex kGCInfoIndex = 17;
constexpr size_t kSize = kAllocationGranularity;
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
index 7ccabd00e3..acfcd2d401 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-page-unittest.cc
@@ -195,12 +195,12 @@ TEST_F(PageTest, NormalPageCreationDestruction) {
EXPECT_NE(space->end(), std::find(space->begin(), space->end(), page));
space->free_list().Add({page->PayloadStart(), page->PayloadSize()});
- EXPECT_TRUE(
- space->free_list().Contains({page->PayloadStart(), page->PayloadSize()}));
+ EXPECT_TRUE(space->free_list().ContainsForTesting(
+ {page->PayloadStart(), page->PayloadSize()}));
space->free_list().Clear();
- EXPECT_FALSE(
- space->free_list().Contains({page->PayloadStart(), page->PayloadSize()}));
+ EXPECT_FALSE(space->free_list().ContainsForTesting(
+ {page->PayloadStart(), page->PayloadSize()}));
space->RemovePage(page);
EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
NormalPage::Destroy(page);
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
index eeb4b74b6d..8f8191c6d0 100644
--- a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -4,10 +4,13 @@
#include "src/heap/cppgc/marker.h"
+#include <memory>
+
#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "include/cppgc/member.h"
#include "include/cppgc/persistent.h"
+#include "include/cppgc/trace-trait.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -212,6 +215,7 @@ TEST_F(MarkerTest, NestedObjectsOnStackAreMarked) {
}
namespace {
+
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
@@ -219,8 +223,19 @@ class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
callback(this);
}
- void Trace(Visitor*) const {}
+ template <typename Callback>
+ GCedWithCallback(Callback callback, GCed* gced) : gced_(gced) {
+ callback(this);
+ }
+
+ void Trace(Visitor* visitor) const { visitor->Trace(gced_); }
+
+ GCed* gced() const { return gced_; }
+
+ private:
+ Member<GCed> gced_;
};
+
} // namespace
TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedEmptyStack) {
@@ -254,6 +269,63 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedNonEmptyStack) {
});
}
+namespace {
+
+// Storage that can be used to hide a pointer from the GC. Only useful when
+// dealing with the stack separately.
+class GCObliviousObjectStorage final {
+ public:
+ GCObliviousObjectStorage()
+ : storage_(std::make_unique<const void*>(nullptr)) {}
+
+ template <typename T>
+ void set_object(T* t) {
+ *storage_.get() = TraceTrait<T>::GetTraceDescriptor(t).base_object_payload;
+ }
+
+ const void* object() const { return *storage_; }
+
+ private:
+ std::unique_ptr<const void*> storage_;
+};
+
+V8_NOINLINE void RegisterInConstructionObject(
+ AllocationHandle& allocation_handle, Visitor& v,
+ GCObliviousObjectStorage& storage) {
+ // Create deeper stack to avoid finding any temporary reference in the caller.
+ char space[500];
+ USE(space);
+ MakeGarbageCollected<GCedWithCallback>(
+ allocation_handle,
+ [&visitor = v, &storage](GCedWithCallback* obj) {
+ Member<GCedWithCallback> member(obj);
+ // Adds GCedWithCallback to in-construction objects.
+ visitor.Trace(member);
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(obj).IsMarked());
+ // The inner object GCed is only found if GCedWithCallback is processed.
+ storage.set_object(obj->gced());
+ },
+ // Initializing store does not trigger a write barrier.
+ MakeGarbageCollected<GCed>(allocation_handle));
+}
+
+} // namespace
+
+TEST_F(MarkerTest,
+ InConstructionObjectIsEventuallyMarkedDifferentNonEmptyStack) {
+ static const Marker::MarkingConfig config = {
+ MarkingConfig::CollectionType::kMajor,
+ MarkingConfig::StackState::kMayContainHeapPointers};
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), config);
+
+ GCObliviousObjectStorage storage;
+ RegisterInConstructionObject(GetAllocationHandle(), marker()->Visitor(),
+ storage);
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(storage.object()).IsMarked());
+ marker()->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(storage.object()).IsMarked());
+}
+
TEST_F(MarkerTest, SentinelNotClearedOnWeakPersistentHandling) {
static const Marker::MarkingConfig config = {
MarkingConfig::CollectionType::kMajor,
@@ -290,7 +362,8 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
MarkingConfig::MarkingType::kIncremental};
void FinishSteps(MarkingConfig::StackState stack_state) {
- while (!SingleStep(stack_state)) {}
+ while (!SingleStep(stack_state)) {
+ }
}
void FinishMarking() {
@@ -384,7 +457,7 @@ TEST_F(IncrementalMarkingTest, IncrementalStepDuringAllocation) {
TEST_F(IncrementalMarkingTest, MarkingRunsOutOfWorkEventually) {
InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(),
- IncrementalPreciseMarkingConfig);
+ IncrementalPreciseMarkingConfig);
FinishSteps(MarkingConfig::StackState::kNoHeapPointers);
FinishMarking();
}
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
index 603a47399b..c4e34655fe 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
@@ -7,6 +7,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/member.h"
#include "include/cppgc/persistent.h"
+#include "include/cppgc/prefinalizer.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "test/unittests/heap/cppgc/tests.h"
@@ -145,7 +146,11 @@ TEST_F(MarkingVerifierTest, DoesntDieOnInConstructionObjectWithWriteBarrier) {
namespace {
-class MarkingVerifierDeathTest : public MarkingVerifierTest {};
+class MarkingVerifierDeathTest : public MarkingVerifierTest {
+ protected:
+ template <template <typename T> class Reference>
+ void TestResurrectingPreFinalizer();
+};
} // namespace
@@ -175,5 +180,58 @@ TEST_F(MarkingVerifierDeathTest, DieOnUnmarkedWeakMember) {
"");
}
+namespace {
+
+template <template <typename T> class Reference>
+class ResurrectingPreFinalizer
+ : public GarbageCollected<ResurrectingPreFinalizer<Reference>> {
+ CPPGC_USING_PRE_FINALIZER(ResurrectingPreFinalizer<Reference>, Dispose);
+
+ public:
+ class Storage : public GarbageCollected<Storage> {
+ public:
+ void Trace(Visitor* visitor) const { visitor->Trace(ref); }
+
+ Reference<GCed> ref;
+ };
+
+ ResurrectingPreFinalizer(Storage* storage, GCed* object_that_dies)
+ : storage_(storage), object_that_dies_(object_that_dies) {}
+
+ void Trace(Visitor* visitor) const {
+ visitor->Trace(storage_);
+ visitor->Trace(object_that_dies_);
+ }
+
+ private:
+ void Dispose() { storage_->ref = object_that_dies_; }
+
+ Member<Storage> storage_;
+ Member<GCed> object_that_dies_;
+};
+
+} // namespace
+
+template <template <typename T> class Reference>
+void MarkingVerifierDeathTest::TestResurrectingPreFinalizer() {
+ Persistent<typename ResurrectingPreFinalizer<Reference>::Storage> storage(
+ MakeGarbageCollected<
+ typename ResurrectingPreFinalizer<Reference>::Storage>(
+ GetAllocationHandle()));
+ MakeGarbageCollected<ResurrectingPreFinalizer<Reference>>(
+ GetAllocationHandle(), storage.Get(),
+ MakeGarbageCollected<GCed>(GetAllocationHandle()));
+ EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
+}
+#if DEBUG
+TEST_F(MarkingVerifierDeathTest, DiesOnResurrectedMember) {
+ TestResurrectingPreFinalizer<Member>();
+}
+
+TEST_F(MarkingVerifierDeathTest, DiesOnResurrectedWeakMember) {
+ TestResurrectingPreFinalizer<WeakMember>();
+}
+#endif // DEBUG
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
index 65c3e897ee..1fff7e2c11 100644
--- a/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
@@ -52,20 +52,20 @@ struct PersistentRegionTrait<WeakPersistent> {
template <>
struct PersistentRegionTrait<subtle::CrossThreadPersistent> {
- static PersistentRegion& Get(cppgc::Heap* heap) {
+ static CrossThreadPersistentRegion& Get(cppgc::Heap* heap) {
return internal::Heap::From(heap)->GetStrongCrossThreadPersistentRegion();
}
};
template <>
struct PersistentRegionTrait<subtle::WeakCrossThreadPersistent> {
- static PersistentRegion& Get(cppgc::Heap* heap) {
+ static CrossThreadPersistentRegion& Get(cppgc::Heap* heap) {
return internal::Heap::From(heap)->GetWeakCrossThreadPersistentRegion();
}
};
template <template <typename> class PersistentType>
-PersistentRegion& GetRegion(cppgc::Heap* heap) {
+auto& GetRegion(cppgc::Heap* heap) {
return PersistentRegionTrait<PersistentType>::Get(heap);
}
@@ -114,31 +114,31 @@ class PersistentTest : public testing::TestSupportingAllocationOnly {};
template <template <typename> class PersistentType>
void NullStateCtor(cppgc::Heap* heap) {
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
{
PersistentType<GCed> empty;
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
{
PersistentType<GCed> empty = nullptr;
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
{
PersistentType<GCed> empty = kSentinelPointer;
EXPECT_EQ(kSentinelPointer, empty);
EXPECT_EQ(kSentinelPointer, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
{
// Runtime null must not allocated associated node.
PersistentType<GCed> empty = static_cast<GCed*>(nullptr);
EXPECT_EQ(nullptr, empty.Get());
EXPECT_EQ(nullptr, empty.Release());
- EXPECT_EQ(0u, GetRegion<Persistent>(heap).NodesInUse());
+ EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
}
diff --git a/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
index 5c8044db7e..8641922adb 100644
--- a/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/stats-collector-unittest.cc
@@ -227,5 +227,13 @@ TEST_F(StatsCollectorTest, ObserverTriggersGC) {
stats.UnregisterObserver(&mock_observer);
}
+TEST_F(StatsCollectorTest, AllocatedMemorySize) {
+ EXPECT_EQ(0u, stats.allocated_memory_size());
+ stats.NotifyAllocatedMemory(1024);
+ EXPECT_EQ(1024u, stats.allocated_memory_size());
+ stats.NotifyFreedMemory(1024);
+ EXPECT_EQ(0u, stats.allocated_memory_size());
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
index 94c3479d3a..932ff9bb96 100644
--- a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
@@ -208,12 +208,12 @@ TEST_F(SweeperTest, CoalesceFreeListEntries) {
object2_start, static_cast<size_t>(object3_end - object2_start)};
EXPECT_EQ(0u, g_destructor_callcount);
- EXPECT_FALSE(freelist.Contains(coalesced_block));
+ EXPECT_FALSE(freelist.ContainsForTesting(coalesced_block));
Sweep();
EXPECT_EQ(2u, g_destructor_callcount);
- EXPECT_TRUE(freelist.Contains(coalesced_block));
+ EXPECT_TRUE(freelist.ContainsForTesting(coalesced_block));
}
namespace {
diff --git a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
index 1aa9bd15bb..cf45fe0248 100644
--- a/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/testing-unittest.cc
@@ -51,5 +51,13 @@ TEST_F(TestingTest, OverrideEmbeddertackStateScope) {
}
}
+TEST_F(TestingTest, StandaloneTestingHeap) {
+ // Perform garbage collection through the StandaloneTestingHeap API.
+ cppgc::testing::StandaloneTestingHeap heap(GetHeap()->GetHeapHandle());
+ heap.StartGarbageCollection();
+ heap.PerformMarkingStep(EmbedderStackState::kNoHeapPointers);
+ heap.FinalizeGarbageCollection(EmbedderStackState::kNoHeapPointers);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index c091c7f6ec..d367e45dad 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -100,7 +100,7 @@ class TestWithHeap : public TestWithPlatform {
// Restrictive test fixture that supports allocation but will make sure no
// garbage collection is triggered. This is useful for writing idiomatic
// tests where object are allocated on the managed heap while still avoiding
-// far reaching test consquences of full garbage collection calls.
+// far reaching test consequences of full garbage collection calls.
class TestSupportingAllocationOnly : public TestWithHeap {
protected:
TestSupportingAllocationOnly();
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index c46ee35095..55b8718675 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -21,8 +21,6 @@ class GCIdleTimeHandlerTest : public ::testing::Test {
GCIdleTimeHeapState DefaultHeapState() {
GCIdleTimeHeapState result;
- result.contexts_disposed = 0;
- result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
result.incremental_marking_stopped = false;
result.size_of_objects = kSizeOfObjects;
return result;
@@ -72,80 +70,6 @@ TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
step_size);
}
-
-TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.incremental_marking_stopped = true;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kDone,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate =
- GCIdleTimeHandler::kHighContextDisposalRate - 1;
- heap_state.incremental_marking_stopped = true;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kFullGC,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- heap_state.incremental_marking_stopped = true;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kFullGC,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate =
- GCIdleTimeHandler::kHighContextDisposalRate;
- size_t speed = kMarkCompactSpeed;
- double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate =
- GCIdleTimeHandler::kHighContextDisposalRate;
- size_t speed = kMarkCompactSpeed;
- double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
- EXPECT_EQ(GCIdleTimeAction::kIncrementalStep,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeHeap) {
- if (!handler()->Enabled()) return;
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- heap_state.incremental_marking_stopped = true;
- heap_state.size_of_objects = 101 * MB;
- double idle_time_ms = 0;
- EXPECT_EQ(GCIdleTimeAction::kDone,
- handler()->Compute(idle_time_ms, heap_state));
-}
-
TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
if (!handler()->Enabled()) return;
GCIdleTimeHeapState heap_state = DefaultHeapState();
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
deleted file mode 100644
index 7883283766..0000000000
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/item-parallel-job.h"
-
-#include "src/execution/isolate.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-
-class ItemParallelJobTest : public TestWithIsolate {
- public:
- ItemParallelJobTest() : parallel_job_semaphore_(0) {}
- ItemParallelJobTest(const ItemParallelJobTest&) = delete;
- ItemParallelJobTest& operator=(const ItemParallelJobTest&) = delete;
-
- base::Semaphore* parallel_job_semaphore() { return &parallel_job_semaphore_; }
-
- private:
- base::Semaphore parallel_job_semaphore_;
-};
-
-namespace {
-
-class SimpleTask : public ItemParallelJob::Task {
- public:
- SimpleTask(Isolate* isolate, bool* did_run)
- : ItemParallelJob::Task(isolate), did_run_(did_run) {}
-
- void RunInParallel(Runner runner) override {
- ItemParallelJob::Item* item = nullptr;
- while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
- item->MarkFinished();
- }
- *did_run_ = true;
- }
-
- private:
- bool* did_run_;
-};
-
-// A simple work item which sets |was_processed| to true, if non-null, when it
-// is processed.
-class SimpleItem : public ItemParallelJob::Item {
- public:
- explicit SimpleItem(bool* was_processed = nullptr)
- : ItemParallelJob::Item(), was_processed_(was_processed) {}
- void Process() {
- if (was_processed_) *was_processed_ = true;
- }
-
- private:
- bool* was_processed_;
-};
-
-class EagerTask : public ItemParallelJob::Task {
- public:
- explicit EagerTask(Isolate* isolate) : ItemParallelJob::Task(isolate) {}
-
- void RunInParallel(Runner runner) override {
- SimpleItem* item = nullptr;
- while ((item = GetItem<SimpleItem>()) != nullptr) {
- item->Process();
- item->MarkFinished();
- }
- }
-};
-
-// A OneShotBarrier is meant to be passed to |counter| users. Users should
-// either Signal() or Wait() when done (based on whether they want to return
-// immediately or wait until others are also done).
-class OneShotBarrier {
- public:
- explicit OneShotBarrier(size_t counter) : counter_(counter) {
- DCHECK_GE(counter_, 0);
- }
-
- void Wait() {
- DCHECK_NE(counter_, 0);
- mutex_.Lock();
- counter_--;
- if (counter_ == 0) {
- condition_.NotifyAll();
- } else {
- while (counter_ > 0) {
- condition_.Wait(&mutex_);
- }
- }
- mutex_.Unlock();
- }
-
- void Signal() {
- mutex_.Lock();
- counter_--;
- if (counter_ == 0) {
- condition_.NotifyAll();
- }
- mutex_.Unlock();
- }
-
- private:
- base::Mutex mutex_;
- base::ConditionVariable condition_;
- size_t counter_;
-};
-
-// A task that only processes a single item. Signals |barrier| when done; if
-// |wait_when_done|, will blocks until all other tasks have signaled |barrier|.
-// If |did_process_an_item| is non-null, will set it to true if it does process
-// an item. Otherwise, it will expect to get an item to process (and will report
-// a failure if it doesn't).
-class TaskProcessingOneItem : public ItemParallelJob::Task {
- public:
- TaskProcessingOneItem(Isolate* isolate, OneShotBarrier* barrier,
- bool wait_when_done,
- bool* did_process_an_item = nullptr)
- : ItemParallelJob::Task(isolate),
- barrier_(barrier),
- wait_when_done_(wait_when_done),
- did_process_an_item_(did_process_an_item) {}
-
- void RunInParallel(Runner runner) override {
- SimpleItem* item = GetItem<SimpleItem>();
-
- if (did_process_an_item_) {
- *did_process_an_item_ = item != nullptr;
- } else {
- EXPECT_NE(nullptr, item);
- }
-
- if (item) {
- item->Process();
- item->MarkFinished();
- }
-
- if (wait_when_done_) {
- barrier_->Wait();
- } else {
- barrier_->Signal();
- }
- }
-
- private:
- OneShotBarrier* barrier_;
- bool wait_when_done_;
- bool* did_process_an_item_;
-};
-
-class TaskForDifferentItems;
-
-class BaseItem : public ItemParallelJob::Item {
- public:
- ~BaseItem() override = default;
- virtual void ProcessItem(TaskForDifferentItems* task) = 0;
-};
-
-class TaskForDifferentItems : public ItemParallelJob::Task {
- public:
- explicit TaskForDifferentItems(Isolate* isolate, bool* processed_a,
- bool* processed_b)
- : ItemParallelJob::Task(isolate),
- processed_a_(processed_a),
- processed_b_(processed_b) {}
- ~TaskForDifferentItems() override = default;
-
- void RunInParallel(Runner runner) override {
- BaseItem* item = nullptr;
- while ((item = GetItem<BaseItem>()) != nullptr) {
- item->ProcessItem(this);
- item->MarkFinished();
- }
- }
-
- void ProcessA() { *processed_a_ = true; }
- void ProcessB() { *processed_b_ = true; }
-
- private:
- bool* processed_a_;
- bool* processed_b_;
-};
-
-class ItemA : public BaseItem {
- public:
- ~ItemA() override = default;
- void ProcessItem(TaskForDifferentItems* task) override { task->ProcessA(); }
-};
-
-class ItemB : public BaseItem {
- public:
- ~ItemB() override = default;
- void ProcessItem(TaskForDifferentItems* task) override { task->ProcessB(); }
-};
-
-} // namespace
-
-// ItemParallelJob runs tasks even without work items (as requested tasks may be
-// responsible for post-processing).
-TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
- bool did_run = false;
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddTask(new SimpleTask(i_isolate(), &did_run));
-
- job.Run();
- EXPECT_TRUE(did_run);
-}
-
-TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
- bool did_run = false;
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddTask(new SimpleTask(i_isolate(), &did_run));
-
- job.AddItem(new ItemParallelJob::Item);
-
- job.Run();
- EXPECT_TRUE(did_run);
-}
-
-TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
- const int kNumTasks = 128;
- const int kNumItems = kNumTasks - 4;
-
- TaskProcessingOneItem* tasks[kNumTasks] = {};
- bool did_process_an_item[kNumTasks] = {};
-
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
-
- // The barrier ensures that all tasks run. But only the first kNumItems tasks
- // should be assigned an item to execute.
- OneShotBarrier barrier(kNumTasks);
- for (int i = 0; i < kNumTasks; i++) {
- // Block the main thread when done to prevent it from returning control to
- // the job (which could cancel tasks that have yet to be scheduled).
- const bool wait_when_done = i == 0;
- tasks[i] = new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done,
- &did_process_an_item[i]);
- job.AddTask(tasks[i]);
- }
-
- for (int i = 0; i < kNumItems; i++) {
- job.AddItem(new SimpleItem);
- }
-
- job.Run();
-
- for (int i = 0; i < kNumTasks; i++) {
- // Only the first kNumItems tasks should have been assigned a work item.
- EXPECT_EQ(i < kNumItems, did_process_an_item[i]);
- }
-}
-
-TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
- const int kItems = 111;
- bool was_processed[kItems] = {};
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddTask(new EagerTask(i_isolate()));
- for (int i = 0; i < kItems; i++) {
- job.AddItem(new SimpleItem(&was_processed[i]));
- }
- job.Run();
- for (int i = 0; i < kItems; i++) {
- EXPECT_TRUE(was_processed[i]);
- }
-}
-
-TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
- const int kItemsAndTasks = 256;
- bool was_processed[kItemsAndTasks] = {};
- OneShotBarrier barrier(kItemsAndTasks);
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- for (int i = 0; i < kItemsAndTasks; i++) {
- job.AddItem(new SimpleItem(&was_processed[i]));
-
- // Block the main thread when done to prevent it from returning control to
- // the job (which could cancel tasks that have yet to be scheduled).
- const bool wait_when_done = i == 0;
- job.AddTask(
- new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
- }
- job.Run();
- for (int i = 0; i < kItemsAndTasks; i++) {
- EXPECT_TRUE(was_processed[i]);
- }
-}
-
-TEST_F(ItemParallelJobTest, DifferentItems) {
- bool item_a = false;
- bool item_b = false;
- ItemParallelJob job(i_isolate()->cancelable_task_manager(),
- parallel_job_semaphore());
- job.AddItem(new ItemA());
- job.AddItem(new ItemB());
- job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
- job.Run();
- EXPECT_TRUE(item_a);
- EXPECT_TRUE(item_b);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index 919578f2fb..92b5eef8dc 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -170,8 +170,11 @@ TEST_F(LocalHeapTest, GCEpilogue) {
CHECK(thread2->Start());
epilogue[1].WaitUntilStarted();
epilogue[2].WaitUntilStarted();
- heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
- GarbageCollectionReason::kTesting);
+ {
+ UnparkedScope scope(&lh);
+ heap->PreciseCollectAllGarbage(Heap::kNoGCFlags,
+ GarbageCollectionReason::kTesting);
+ }
epilogue[1].RequestStop();
epilogue[2].RequestStop();
thread1->Join();
diff --git a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
index 848def9e21..f58569eb10 100644
--- a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
@@ -384,14 +384,21 @@ TEST_F(UnifiedHeapSnapshotTest, MergedWrapperNode) {
// GCedWithJSRef is merged into MergedObject, replacing its name.
"NextObject" // NOLINT
}));
+ const size_t js_size = Utils::OpenHandle(*wrapper_object)->Size();
+#if CPPGC_SUPPORTS_OBJECT_NAMES
const size_t cpp_size =
cppgc::internal::HeapObjectHeader::FromPayload(gc_w_js_ref.Get())
.GetSize();
- const size_t js_size = Utils::OpenHandle(*wrapper_object)->Size();
ForEachEntryWithName(snapshot, GetExpectedName<GCedWithJSRef>(),
[cpp_size, js_size](const HeapEntry& entry) {
EXPECT_EQ(cpp_size + js_size, entry.self_size());
});
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ ForEachEntryWithName(snapshot, GetExpectedName<GCedWithJSRef>(),
+ [js_size](const HeapEntry& entry) {
+ EXPECT_EQ(js_size, entry.self_size());
+ });
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
}
namespace {
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index 597cbcf2cf..404cf2e1a0 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -6,6 +6,7 @@
#include "include/cppgc/garbage-collected.h"
#include "include/cppgc/persistent.h"
#include "include/cppgc/platform.h"
+#include "include/cppgc/testing.h"
#include "include/v8-cppgc.h"
#include "include/v8.h"
#include "src/api/api-inl.h"
@@ -140,6 +141,7 @@ TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
cpp_heap.AsBase().sweeper().FinishIfRunning();
EXPECT_TRUE(weak_holder);
}
+ USE(object);
{
js_heap.SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
@@ -149,5 +151,41 @@ TEST_F(UnifiedHeapDetachedTest, AllocationBeforeConfigureHeap) {
}
}
+TEST_F(UnifiedHeapDetachedTest, StandAloneCppGC) {
+ // Test ensures that stand-alone C++ GC are possible when using CppHeap. This
+ // works even in the presence of wrappables using TracedReference as long
+ // as the reference is empty.
+ auto heap = v8::CppHeap::Create(
+ V8::GetCurrentPlatform(),
+ CppHeapCreateParams{{}, WrapperHelper::DefaultWrapperDescriptor()});
+ auto* object =
+ cppgc::MakeGarbageCollected<Wrappable>(heap->GetAllocationHandle());
+ cppgc::WeakPersistent<Wrappable> weak_holder{object};
+
+ heap->EnableDetachedGarbageCollectionsForTesting();
+ {
+ heap->CollectGarbageForTesting(
+ cppgc::EmbedderStackState::kMayContainHeapPointers);
+ EXPECT_TRUE(weak_holder);
+ }
+ USE(object);
+ {
+ heap->CollectGarbageForTesting(cppgc::EmbedderStackState::kNoHeapPointers);
+ EXPECT_FALSE(weak_holder);
+ }
+}
+
+TEST_F(UnifiedHeapDetachedTest, StandaloneTestingHeap) {
+ // Perform garbage collection through the StandaloneTestingHeap API.
+ auto cpp_heap = v8::CppHeap::Create(
+ V8::GetCurrentPlatform(),
+ CppHeapCreateParams{{}, WrapperHelper::DefaultWrapperDescriptor()});
+ cpp_heap->EnableDetachedGarbageCollectionsForTesting();
+ cppgc::testing::StandaloneTestingHeap heap(cpp_heap->GetHeapHandle());
+ heap.StartGarbageCollection();
+ heap.PerformMarkingStep(cppgc::EmbedderStackState::kNoHeapPointers);
+ heap.FinalizeGarbageCollection(cppgc::EmbedderStackState::kNoHeapPointers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index d2beba0fbc..e027f12b78 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <limits>
+#include "src/interpreter/bytecode-array-builder.h"
-#include "src/init/v8.h"
+#include <limits>
#include "src/ast/scopes.h"
-#include "src/interpreter/bytecode-array-builder.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index ea60664bea..88e87f7e94 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
-#include "src/interpreter/bytecode-array-iterator.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 735ecf4d2f..619d270452 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -15,7 +15,6 @@
using ::testing::_;
using ::testing::Eq;
-using v8::internal::compiler::Node;
namespace c = v8::internal::compiler;
@@ -53,10 +52,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
}
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
+Matcher<c::Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<c::LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
- LoadSensitivity needs_poisoning) {
+ const Matcher<c::Node*>& base_matcher,
+ const Matcher<c::Node*>& index_matcher, LoadSensitivity needs_poisoning) {
CHECK_NE(LoadSensitivity::kUnsafe, needs_poisoning);
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
if (poisoning_level() == PoisoningMitigationLevel::kPoisonCriticalOnly &&
@@ -67,31 +66,35 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
const Matcher<c::LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ const Matcher<c::Node*>& base_matcher,
+ const Matcher<c::Node*>& index_matcher) {
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
index_matcher, _, _);
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
+Matcher<c::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<c::StoreRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher) {
+ const Matcher<c::Node*>& base_matcher,
+ const Matcher<c::Node*>& index_matcher,
+ const Matcher<c::Node*>& value_matcher) {
return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
value_matcher, _, _);
}
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
- const Matcher<Node*>& value_matcher) {
+Matcher<c::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
+ const Matcher<c::Node*>& value_matcher) {
return kSystemPointerSize == 8
? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
: IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset, LoadSensitivity needs_poisoning) {
return IsLoad(
@@ -103,7 +106,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
needs_poisoning);
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
int offset, LoadSensitivity needs_poisoning) {
return IsLoad(
@@ -115,7 +118,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
needs_poisoning);
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -136,7 +139,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[2];
+ Matcher<c::Node*> bytes[2];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
@@ -151,7 +154,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -172,7 +175,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[2];
+ Matcher<c::Node*> bytes[2];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
@@ -187,7 +190,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -208,7 +211,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[4];
+ Matcher<c::Node*> bytes[4];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
@@ -228,7 +231,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
int offset, LoadSensitivity needs_poisoning) {
if (TargetSupportsUnalignedAccess()) {
@@ -249,7 +252,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
#else
#error "Unknown Architecture"
#endif
- Matcher<Node*> bytes[4];
+ Matcher<c::Node*> bytes[4];
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
@@ -269,7 +272,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
}
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) {
switch (operand_size) {
@@ -285,7 +288,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
return nullptr;
}
-Matcher<Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) {
switch (operand_size) {
@@ -301,10 +304,10 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
return nullptr;
}
-Matcher<compiler::Node*>
+Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
int offset, OperandSize operand_size) {
- Matcher<compiler::Node*> reg_operand = IsChangeInt32ToIntPtr(
+ Matcher<c::Node*> reg_operand = IsChangeInt32ToIntPtr(
IsSignedOperand(offset, operand_size, LoadSensitivity::kSafe));
return IsBitcastWordToTagged(IsLoad(
MachineType::Pointer(), c::IsLoadParentFramePointer(),
@@ -409,36 +412,35 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
{
TNode<IntPtrT> index = m.IntPtrConstant(2);
TNode<Object> load_constant = m.LoadConstantPoolEntry(index);
- Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag),
- LoadSensitivity::kCritical));
+ Matcher<c::Node*> constant_pool_matcher = m.IsLoadFromObject(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag),
+ LoadSensitivity::kCritical));
}
{
- Node* index = m.UntypedParameter(2);
+ c::Node* index = m.UntypedParameter(2);
TNode<Object> load_constant =
m.LoadConstantPoolEntry(m.ReinterpretCast<IntPtrT>(index));
- Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
- MachineType::AnyTagged(),
- c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
- c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(
- MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrAdd(
- c::IsIntPtrConstant(FixedArray::kHeaderSize -
- kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
- LoadSensitivity::kCritical));
+ Matcher<c::Node*> constant_pool_matcher = m.IsLoadFromObject(
+ MachineType::AnyTagged(),
+ c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
+ kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(
+ MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrAdd(
+ c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
+ LoadSensitivity::kCritical));
}
}
}
@@ -488,19 +490,19 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
m.Int32Constant(2));
TNode<Context> context = m.ReinterpretCast<Context>(m.Int32Constant(4));
- Matcher<Node*> function_table = c::IsExternalConstant(
+ Matcher<c::Node*> function_table = c::IsExternalConstant(
ExternalReference::runtime_function_table_address_for_unittests(
isolate()));
- Matcher<Node*> function =
+ Matcher<c::Node*> function =
c::IsIntPtrAdd(function_table,
c::IsChangeUint32ToWord(c::IsInt32Mul(
Eq(function_id),
c::IsInt32Constant(sizeof(Runtime::Function)))));
- Matcher<Node*> function_entry =
+ Matcher<c::Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
c::IsIntPtrConstant(offsetof(Runtime::Function, entry)));
- Node* call_runtime =
+ c::Node* call_runtime =
m.CallRuntimeN(function_id, context, registers, result_size);
EXPECT_THAT(call_runtime,
c::IsCall(_, c::IsHeapConstant(builtin.code()),
@@ -521,30 +523,28 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
// Feedback vector is a phi node with two inputs. One of them is loading the
// feedback vector and the other is undefined constant (when feedback
// vectors aren't allocated). Find the input that loads feedback vector.
- CHECK_EQ(static_cast<Node*>(feedback_vector)->opcode(),
+ CHECK_EQ(static_cast<c::Node*>(feedback_vector)->opcode(),
i::compiler::IrOpcode::kPhi);
- Node* value0 =
+ c::Node* value0 =
i::compiler::NodeProperties::GetValueInput(feedback_vector, 0);
- Node* value1 =
+ c::Node* value1 =
i::compiler::NodeProperties::GetValueInput(feedback_vector, 1);
- Node* load_feedback_vector = value0;
+ c::Node* load_feedback_vector = value0;
if (value0->opcode() == i::compiler::IrOpcode::kHeapConstant) {
load_feedback_vector = value1;
}
- Matcher<Node*> load_function_matcher = IsBitcastWordToTagged(
+ Matcher<c::Node*> load_function_matcher = IsBitcastWordToTagged(
m.IsLoad(MachineType::Pointer(), c::IsLoadParentFramePointer(),
c::IsIntPtrConstant(Register::function_closure().ToOperand() *
kSystemPointerSize)));
- Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
- MachineType::TaggedPointer(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
- kHeapObjectTag));
- EXPECT_THAT(
- load_feedback_vector,
- m.IsLoadFromObject(
- MachineType::TaggedPointer(), load_vector_cell_matcher,
- c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
+ Matcher<c::Node*> load_vector_cell_matcher = m.IsLoadFromObject(
+ MachineType::TaggedPointer(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
+ EXPECT_THAT(load_feedback_vector,
+ m.IsLoadFromObject(
+ MachineType::TaggedPointer(), load_vector_cell_matcher,
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 828af4ade4..c2539d8a28 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -57,7 +57,8 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& index_matcher,
const Matcher<compiler::Node*>& value_matcher);
- Matcher<Node*> IsWordNot(const Matcher<Node*>& value_matcher);
+ Matcher<compiler::Node*> IsWordNot(
+ const Matcher<compiler::Node*>& value_matcher);
Matcher<compiler::Node*> IsUnsignedByteOperand(
int offset, LoadSensitivity needs_poisoning);
diff --git a/deps/v8/test/unittests/numbers/conversions-unittest.cc b/deps/v8/test/unittests/numbers/conversions-unittest.cc
index e0c1c55aae..43b761ac67 100644
--- a/deps/v8/test/unittests/numbers/conversions-unittest.cc
+++ b/deps/v8/test/unittests/numbers/conversions-unittest.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
-
#include "src/numbers/conversions.h"
+
+#include "src/codegen/source-position.h"
+#include "src/init/v8.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/objects/object-unittest.cc b/deps/v8/test/unittests/objects/object-unittest.cc
index eb666ebca8..d959c3ff08 100644
--- a/deps/v8/test/unittests/objects/object-unittest.cc
+++ b/deps/v8/test/unittests/objects/object-unittest.cc
@@ -95,11 +95,7 @@ TEST(Object, StructListOrder) {
<< " vs. current = " << current_type; \
prev = current;
- // Only test the _BASE portion (the hand-coded part). Note that the values are
- // not necessarily consecutive because some Structs that need special
- // handling, such as those that have multiple Map instances associated, are
- // omitted from this list.
- STRUCT_LIST_GENERATOR_BASE(STRUCT_LIST_ADAPTER, TEST_STRUCT)
+ STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, TEST_STRUCT)
#undef TEST_STRUCT
}
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index afefdc1f45..8cbce76b76 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -12,12 +12,16 @@
#include "src/base/build_config.h"
#include "src/objects/backing-store.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-result.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace {
@@ -2043,6 +2047,7 @@ class ValueSerializerTestWithSharedArrayBufferClone
Local<SharedArrayBuffer> NewSharedArrayBuffer(void* data, size_t byte_length,
bool is_wasm_memory) {
+#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory) {
// TODO(titzer): there is no way to create Wasm memory backing stores
// through the API, or to create a shared array buffer whose backing
@@ -2057,17 +2062,19 @@ class ValueSerializerTestWithSharedArrayBufferClone
i_isolate->factory()->NewJSSharedArrayBuffer(
std::move(backing_store));
return Utils::ToLocalShared(buffer);
- } else {
- std::unique_ptr<v8::BackingStore> backing_store =
- SharedArrayBuffer::NewBackingStore(
- data, byte_length,
- [](void*, size_t, void*) {
- // Leak the buffer as it has the
- // lifetime of the test.
- },
- nullptr);
- return SharedArrayBuffer::New(isolate(), std::move(backing_store));
}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CHECK(!is_wasm_memory);
+ std::unique_ptr<v8::BackingStore> backing_store =
+ SharedArrayBuffer::NewBackingStore(
+ data, byte_length,
+ [](void*, size_t, void*) {
+ // Leak the buffer as it has the
+ // lifetime of the test.
+ },
+ nullptr);
+ return SharedArrayBuffer::New(isolate(), std::move(backing_store));
}
static void SetUpTestCase() {
@@ -2173,6 +2180,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
ExpectScriptTrue("new Uint8Array(result.a).toString() === '0,1,128,255'");
}
+#if V8_ENABLE_WEBASSEMBLY
TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
RoundTripWebAssemblyMemory) {
bool flag_was_enabled = i::FLAG_experimental_wasm_threads;
@@ -2205,6 +2213,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
i::FLAG_experimental_wasm_threads = flag_was_enabled;
}
+#endif // V8_ENABLE_WEBASSEMBLY
TEST_F(ValueSerializerTest, UnsupportedHostObject) {
InvalidEncodeTest("new ExampleHostObject()");
diff --git a/deps/v8/test/unittests/objects/backing-store-unittest.cc b/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
index b31669a79c..e1e70f5f8d 100644
--- a/deps/v8/test/unittests/objects/backing-store-unittest.cc
+++ b/deps/v8/test/unittests/objects/wasm-backing-store-unittest.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects/backing-store.h"
#include "src/base/platform/platform.h"
+#include "src/objects/backing-store.h"
#include "test/unittests/test-utils.h"
-
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 124d3bb1b6..5dff8b6b87 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -68,6 +68,9 @@ constexpr size_t kMaxByteSizedLeb128 = 127;
using F = std::pair<ValueType, bool>;
+// Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...);
+using MakeSig = FixedSizeSignature<ValueType>;
+
enum MemoryType { kMemory32, kMemory64 };
// A helper for tests that require a module environment for functions,
@@ -1303,7 +1306,7 @@ TEST_F(FunctionBodyDecoderTest, TypeConversions) {
TestUnop(kExprF32ConvertF64, kWasmF32, kWasmF64);
}
-TEST_F(FunctionBodyDecoderTest, MacrosStmt) {
+TEST_F(FunctionBodyDecoderTest, MacrosVoid) {
builder.InitializeMemory();
ExpectValidates(sigs.v_i(), {WASM_LOCAL_SET(0, WASM_I32V_3(87348))});
ExpectValidates(
@@ -1743,7 +1746,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithMismatchedSigs3) {
WASM_FEATURE_SCOPE(return_call);
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte sig0 = builder.AddSignature(sigs.i_f());
@@ -1786,7 +1789,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsWithoutTableCrash) {
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectReturnCall) {
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kExprReturnCallIndirect};
ExpectFailure(sig, ArrayVector(code), kOmitEnd);
@@ -1866,7 +1869,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
TEST_F(FunctionBodyDecoderTest, IndirectCallsWithMismatchedSigs1) {
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte sig0 = builder.AddSignature(sigs.i_f());
@@ -1928,7 +1931,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectCallsWithoutTableCrash) {
TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
const FunctionSig* sig = sigs.i_i();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kExprCallIndirect};
ExpectFailure(sig, ArrayVector(code), kOmitEnd);
@@ -1937,7 +1940,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteIndirectCall) {
TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
const FunctionSig* sig = sigs.i_i();
builder.InitializeMemory();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kExprI32StoreMem};
ExpectFailure(sig, ArrayVector(code), kOmitEnd);
@@ -1947,7 +1950,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteI8x16Shuffle) {
WASM_FEATURE_SCOPE(simd);
const FunctionSig* sig = sigs.i_i();
builder.InitializeMemory();
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
static byte code[] = {kSimdPrefix,
static_cast<byte>(kExprI8x16Shuffle & 0xff)};
@@ -2239,14 +2242,6 @@ TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
ExpectFailure(sigs.i_d(), code);
}
-TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
- module->origin = kAsmJsSloppyOrigin;
- builder.InitializeMemory();
-
- byte code[] = {WASM_LOCAL_GET(0), kExprMemoryGrow, 0};
- ExpectFailure(sigs.i_i(), code);
-}
-
TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
ValueType float32int32float32[] = {kWasmF32, kWasmI32, kWasmF32};
FunctionSig sig_f_if(1, 2, float32int32float32);
@@ -2939,6 +2934,11 @@ TEST_F(FunctionBodyDecoderTest, TryDelegate) {
WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0), kExprEnd},
kAppendEnd,
"cannot delegate inside the catch handler of the target");
+ ExpectFailure(sigs.v_v(),
+ {WASM_TRY_OP, kExprUnwind,
+ WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0), kExprEnd},
+ kAppendEnd,
+ "cannot delegate inside the catch handler of the target");
ExpectFailure(
sigs.v_v(),
{WASM_BLOCK(WASM_TRY_OP, WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 3),
@@ -3325,7 +3325,7 @@ TEST_F(FunctionBodyDecoderTest, DeclarativeElemDrop) {
}
TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte function_index = builder.AddFunction(sigs.v_i());
ExpectFailure(sigs.a_v(), {WASM_REF_FUNC(function_index)});
@@ -3334,7 +3334,7 @@ TEST_F(FunctionBodyDecoderTest, RefFuncDeclared) {
}
TEST_F(FunctionBodyDecoderTest, RefFuncUndeclared) {
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
byte function_index = builder.AddFunction(sigs.v_i(), false);
WASM_FEATURE_SCOPE(reftypes);
@@ -3355,7 +3355,7 @@ TEST_F(FunctionBodyDecoderTest, ElemSegmentIndexUnsigned) {
}
TEST_F(FunctionBodyDecoderTest, TableCopy) {
- builder.InitializeTable(wasm::kWasmStmt);
+ builder.InitializeTable(wasm::kWasmVoid);
ExpectValidates(sigs.v_v(),
{WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
@@ -4269,17 +4269,18 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
HeapType::Representation func_heap_2 =
static_cast<HeapType::Representation>(builder.AddSignature(sigs.i_v()));
- // Passing/failing tests due to static subtyping.
std::tuple<HeapType::Representation, HeapType::Representation, bool> tests[] =
{std::make_tuple(HeapType::kData, array_heap, true),
std::make_tuple(HeapType::kData, super_struct_heap, true),
std::make_tuple(HeapType::kFunc, func_heap_1, true),
std::make_tuple(func_heap_1, func_heap_1, true),
- std::make_tuple(func_heap_1, func_heap_2, false),
+ std::make_tuple(func_heap_1, func_heap_2, true),
std::make_tuple(super_struct_heap, sub_struct_heap, true),
- std::make_tuple(sub_struct_heap, super_struct_heap, false),
- std::make_tuple(sub_struct_heap, array_heap, false),
- std::make_tuple(HeapType::kFunc, array_heap, false)};
+ std::make_tuple(array_heap, sub_struct_heap, true),
+ std::make_tuple(super_struct_heap, func_heap_1, true),
+ std::make_tuple(HeapType::kEq, super_struct_heap, false),
+ std::make_tuple(HeapType::kAny, func_heap_1, false),
+ std::make_tuple(HeapType::kI31, array_heap, false)};
for (auto test : tests) {
HeapType from_heap = HeapType(std::get<0>(test));
@@ -4308,10 +4309,10 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
ExpectValidates(&cast_sig,
{WASM_REF_CAST(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1))});
} else {
- std::string error_message = "[0] expected supertype of type " +
- std::to_string(to_heap.ref_index()) +
- ", found local.get of type " +
- test_reps[1].name();
+ std::string error_message =
+ "[0] expected subtype of (ref null func) or (ref null data), found "
+ "local.get of type " +
+ test_reps[1].name();
ExpectFailure(&test_sig,
{WASM_REF_TEST(WASM_LOCAL_GET(0),
WASM_RTT_CANON(WASM_HEAP_TYPE(to_heap)))},
@@ -4339,20 +4340,27 @@ TEST_F(FunctionBodyDecoderTest, RefTestCast) {
kAppendEnd,
"ref.cast[0] expected subtype of (ref null func) or (ref null data), "
"found i32.const of type i32");
+}
+
+TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+
+ TestModuleBuilder builder;
+ module = builder.module();
+ byte array_type = builder.AddArray(kWasmI8, true);
+
+ ValueType types[] = {ValueType::Ref(array_type, kNonNullable)};
+ FunctionSig sig(1, 0, types);
+
+ AddLocals(ValueType::Ref(array_type, kNullable), 1);
- // Trivial type error.
- ExpectFailure(
- sigs.v_v(),
- {WASM_REF_TEST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
- kAppendEnd,
- "ref.test[0] expected subtype of (ref null func) or (ref null data), "
- "found i32.const of type i32");
ExpectFailure(
- sigs.v_v(),
- {WASM_REF_CAST(WASM_I32V(1), WASM_RTT_CANON(array_heap)), kExprDrop},
- kAppendEnd,
- "ref.cast[0] expected subtype of (ref null func) or (ref null data), "
- "found i32.const of type i32");
+ &sig,
+ {WASM_LOCAL_TEE(0, WASM_ARRAY_NEW_DEFAULT(array_type, WASM_I32V(5),
+ WASM_RTT_CANON(array_type)))},
+ kAppendEnd, "expected (ref 0), got (ref null 0)");
}
// This tests that num_locals_ in decoder remains consistent, even if we fail
@@ -4365,6 +4373,16 @@ TEST_F(FunctionBodyDecoderTest, Regress_1154439) {
ExpectFailure(sigs.v_v(), {}, kAppendEnd, "local count too large");
}
+TEST_F(FunctionBodyDecoderTest, DropOnEmptyStack) {
+ // Valid code:
+ ExpectValidates(sigs.v_v(), {kExprI32Const, 1, kExprDrop}, kAppendEnd);
+ // Invalid code (dropping from empty stack):
+ ExpectFailure(sigs.v_v(), {kExprDrop}, kAppendEnd,
+ "not enough arguments on the stack for drop");
+ // Valid code (dropping from empty stack in unreachable code):
+ ExpectValidates(sigs.v_v(), {kExprUnreachable, kExprDrop}, kAppendEnd);
+}
+
class BranchTableIteratorTest : public TestWithZone {
public:
BranchTableIteratorTest() : TestWithZone() {}
@@ -5000,6 +5018,19 @@ TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, MemorySize) {
{WASM_MEMORY_SIZE, kExprI64Eqz, kExprDrop});
}
+TEST_P(FunctionBodyDecoderTestOnBothMemoryTypes, MemoryGrow) {
+ builder.InitializeMemory(GetParam());
+ // memory.grow is i32->i32 memory32.
+ Validate(!is_memory64(), sigs.i_i(), {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+ // memory.grow is i64->i64 memory32.
+ Validate(is_memory64(), sigs.l_l(), {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+ // any other combination always fails.
+ auto sig_l_i = MakeSig::Returns(kWasmI64).Params(kWasmI32);
+ ExpectFailure(&sig_l_i, {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+ auto sig_i_l = MakeSig::Returns(kWasmI32).Params(kWasmI64);
+ ExpectFailure(&sig_i_l, {WASM_MEMORY_GROW(WASM_LOCAL_GET(0))});
+}
+
#undef B1
#undef B2
#undef B3
diff --git a/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc b/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
new file mode 100644
index 0000000000..84f5908768
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
@@ -0,0 +1,41 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#if V8_TARGET_ARCH_IA32
+#include "src/execution/ia32/frame-constants-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/execution/x64/frame-constants-x64.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/execution/mips/frame-constants-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/execution/mips64/frame-constants-mips64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/execution/arm/frame-constants-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/execution/arm64/frame-constants-arm64.h"
+#elif V8_TARGET_ARCH_S390X
+#include "src/execution/s390/frame-constants-s390.h"
+#elif V8_TARGET_ARCH_PPC64
+#include "src/execution/ppc/frame-constants-ppc.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/execution/riscv64/frame-constants-riscv64.h"
+#endif
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The registers used by Liftoff and the registers spilled by the
+// WasmDebugBreak builtin should match.
+STATIC_ASSERT(kLiftoffAssemblerGpCacheRegs ==
+ WasmDebugBreakFrameConstants::kPushedGpRegs);
+
+STATIC_ASSERT(kLiftoffAssemblerFpCacheRegs ==
+ WasmDebugBreakFrameConstants::kPushedFpRegs);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
index edd12b022a..1109200a9a 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-memory64-unittest.cc
@@ -4,6 +4,7 @@
#include "src/objects/objects-inl.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "test/common/wasm/wasm-macro-gen.h"
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index f721dc33d3..3a9fec0c99 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/test-utils.h"
+#include "src/wasm/module-decoder.h"
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
-#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
using testing::HasSubstr;
@@ -495,7 +496,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
1) // mutable
};
EXPECT_FAILURE_WITH_MSG(no_initializer_no_end,
- "Global initializer is missing 'end'");
+ "Initializer expression is missing 'end'");
static const byte no_initializer[] = {
SECTION(Global, //--
@@ -505,7 +506,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
kExprEnd) // --
};
EXPECT_FAILURE_WITH_MSG(no_initializer,
- "Found 'end' in global initalizer, but no "
+ "Found 'end' in initializer expression, but no "
"expressions were found on the stack");
static const byte too_many_initializers_no_end[] = {
@@ -517,7 +518,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_I32V_1(43)) // another value is too much
};
EXPECT_FAILURE_WITH_MSG(too_many_initializers_no_end,
- "Global initializer is missing 'end'");
+ "Initializer expression is missing 'end'");
static const byte too_many_initializers[] = {
SECTION(Global, // --
@@ -528,8 +529,8 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_I32V_1(43), // another value is too much
kExprEnd)};
EXPECT_FAILURE_WITH_MSG(too_many_initializers,
- "Found 'end' in global initalizer, but more than one "
- "expressions were found on the stack");
+ "Found 'end' in initializer expression, but more than"
+ " one expressions were found on the stack");
static const byte missing_end_opcode[] = {
SECTION(Global, // --
@@ -539,7 +540,7 @@ TEST_F(WasmModuleVerifyTest, GlobalInitializer) {
WASM_I32V_1(42)) // init value
};
EXPECT_FAILURE_WITH_MSG(missing_end_opcode,
- "Global initializer is missing 'end'");
+ "Initializer expression is missing 'end'");
static const byte referencing_out_of_bounds_global[] = {
SECTION(Global, ENTRY_COUNT(1), // --
@@ -1971,6 +1972,24 @@ TEST_F(WasmModuleVerifyTest, TypedFunctionTable) {
EXPECT_EQ(ValueType::Ref(0, kNullable), result.value()->tables[0].type);
}
+TEST_F(WasmModuleVerifyTest, NullableTableIllegalInitializer) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_v), // type section
+ ONE_EMPTY_FUNCTION(0), // function section
+ SECTION(Table, // table section
+ ENTRY_COUNT(1), // 1 table
+ kOptRefCode, 0, // table 0: type
+ 0, 10, // table 0: limits
+ kExprRefFunc, 0, kExprEnd)}; // table 0: initializer
+
+ EXPECT_FAILURE_WITH_MSG(
+ data,
+ "section was shorter than expected size (8 bytes expected, 5 decoded)");
+}
+
TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
@@ -1999,13 +2018,47 @@ TEST_F(WasmModuleVerifyTest, IllegalTableTypes) {
auto result = DecodeModule(data.data(), data.data() + data.size());
- EXPECT_NOT_OK(
- result,
- "Currently, only externref and function references are allowed "
- "as table types");
+ EXPECT_NOT_OK(result,
+ "Currently, only externref and function references are "
+ "allowed as table types");
}
}
+TEST_F(WasmModuleVerifyTest, NonNullableTable) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_v), // type section
+ ONE_EMPTY_FUNCTION(0), // function section
+ SECTION(Table, // table section
+ ENTRY_COUNT(1), // 1 table
+ kRefCode, 0, // table 0: type
+ 0, 10, // table 0: limits
+ kExprRefFunc, 0, kExprEnd), // table 0: init. expression
+ SECTION(Code, ENTRY_COUNT(1), NOP_BODY)};
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(ValueType::Ref(0, kNonNullable), result.value()->tables[0].type);
+}
+
+TEST_F(WasmModuleVerifyTest, NonNullableTableNoInitializer) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ static const byte data[] = {
+ SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_x(kI32Code)),
+ SECTION(Table, // table section
+ ENTRY_COUNT(2), // 2 tables
+ kRefCode, 0, // table 0: type
+ 0, 10, // table 0: limits
+ kRefCode, 0, // table 1: type
+ 5, 6)}; // table 1: limits
+
+ EXPECT_FAILURE_WITH_MSG(data,
+ "invalid opcode 0x6b in initializer expression");
+}
+
TEST_F(WasmModuleVerifyTest, TieringCompilationHints) {
WASM_FEATURE_SCOPE(compilation_hints);
static const byte data[] = {
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
index 9689a15eb4..b9970cc097 100644
--- a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/test-utils.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/signature.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-linkage.h"
+#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
@@ -66,6 +67,45 @@ TEST_F(WasmCallDescriptorTest, TestExternRefIsGrouped) {
}
}
+TEST_F(WasmCallDescriptorTest, Regress_1174500) {
+ // Our test signature should have just enough params and returns to force
+ // 1 param and 1 return to be allocated as stack slots. Use FP registers to
+ // avoid interference with implicit parameters, like the Wasm Instance.
+ constexpr int kParamRegisters = arraysize(kFpParamRegisters);
+ constexpr int kParams = kParamRegisters + 1;
+ constexpr int kReturnRegisters = arraysize(kFpReturnRegisters);
+ constexpr int kReturns = kReturnRegisters + 1;
+ ValueType types[kReturns + kParams];
+ // One S128 return slot which shouldn't be padded unless the arguments area
+ // of the frame requires it.
+ for (int i = 0; i < kReturnRegisters; ++i) types[i] = kWasmF32;
+ types[kReturnRegisters] = kWasmS128;
+ // One F32 parameter slot to misalign the parameter area.
+ for (int i = 0; i < kParamRegisters; ++i) types[kReturns + i] = kWasmF32;
+ types[kReturns + kParamRegisters] = kWasmF32;
+
+ FunctionSig sig(kReturns, kParams, types);
+ compiler::CallDescriptor* desc =
+ compiler::GetWasmCallDescriptor(zone(), &sig);
+
+ // Get the location of our stack parameter slot. Skip the implicit Wasm
+ // instance parameter.
+ compiler::LinkageLocation last_param = desc->GetInputLocation(kParams + 1);
+ EXPECT_TRUE(last_param.IsCallerFrameSlot());
+ EXPECT_EQ(MachineType::Float32(), last_param.GetType());
+ EXPECT_EQ(-1, last_param.GetLocation());
+
+ // The stack return slot should be right above our last parameter, and any
+ // argument padding slots. The return slot itself should not be padded.
+ const int padding = ShouldPadArguments(1);
+ const int first_return_slot = -1 - (padding + 1);
+ compiler::LinkageLocation return_location =
+ desc->GetReturnLocation(kReturns - 1);
+ EXPECT_TRUE(return_location.IsCallerFrameSlot());
+ EXPECT_EQ(MachineType::Simd128(), return_location.GetType());
+ EXPECT_EQ(first_return_slot, return_location.GetLocation());
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/BUILD.gn b/deps/v8/test/wasm-api-tests/BUILD.gn
index 5bc48f57da..acb3f0c27c 100644
--- a/deps/v8/test/wasm-api-tests/BUILD.gn
+++ b/deps/v8/test/wasm-api-tests/BUILD.gn
@@ -8,6 +8,8 @@ v8_executable("wasm_api_tests") {
testonly = true
deps = [
+ "../:common_test_headers",
+ "../..:v8_internal_headers",
"../..:v8_maybe_icu",
"../..:wee8",
"//build/win:default_exe_manifest",
@@ -15,9 +17,7 @@ v8_executable("wasm_api_tests") {
"//testing/gtest",
]
- data_deps = [
- "../../tools:v8_testrunner",
- ]
+ data_deps = [ "../../tools:v8_testrunner" ]
data = [
"testcfg.py",
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-tests.status b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
index 05488c1711..6aa0f51011 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-tests.status
+++ b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
@@ -4,10 +4,10 @@
[
-['lite_mode or variant == jitless', {
- # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+# TODO(v8:7777): Change this once wasm is supported in jitless mode.
+['not has_webassembly or variant == jitless', {
'*': [SKIP],
-}], # lite_mode or variant == jitless
+}], # not has_webassembly or variant == jitless
################################################################################
['variant == stress_snapshot', {
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 2788ed5fce..6a99554898 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -14,17 +14,6 @@ META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
proposal_flags = [{
- 'name': 'reference-types',
- 'flags': ['--experimental-wasm-reftypes',
- '--no-experimental-wasm-bulk-memory',
- '--wasm-staging']
- },
- {
- 'name': 'bulk-memory-operations',
- 'flags': ['--experimental-wasm-bulk-memory',
- '--wasm-staging']
- },
- {
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index 287917ecda..63fbde0778 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-ef30002bb06bd09b91b62d3fa152d1af94b28eaf \ No newline at end of file
+50b01d97338b464df8daa56355f83011930ec678 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 9f8d54442d..39d9b86b95 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -15,18 +15,13 @@
'prototypes': [FAIL],
- # Outdated proposals, will work after rebasing.
- 'proposals/reference-types/global/value-get-set': [FAIL],
- 'proposals/reference-types/global/constructor': [FAIL],
- 'proposals/bulk-memory-operations/global/value-get-set': [FAIL],
- 'proposals/bulk-memory-operations/global/constructor': [FAIL],
-
# These are slow, and not useful to run for the proposals:
- 'proposals/reference-types/limits': [SKIP],
- 'proposals/bulk-memory-operations/limits': [SKIP],
'proposals/js-types/limits': [SKIP],
- # TODO(wasm): Update memory limit.
- 'limits': [FAIL],
+ 'proposals/simd/limits': [SKIP],
+ 'proposals/memory64/limits': [SKIP],
+
+ # TODO(v8:11577): investigate this failure.
+ 'limits': [SKIP],
}], # ALWAYS
['arch == s390 or arch == s390x or system == aix', {
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 908ce09d4e..c3e57ce6c5 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -8,16 +8,6 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
proposal_flags = [{
- 'name': 'reference-types',
- 'flags': ['--experimental-wasm-reftypes',
- '--wasm-staging']
- },
- {
- 'name': 'bulk-memory-operations',
- 'flags': ['--experimental-wasm-bulk-memory',
- '--wasm-staging']
- },
- {
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
@@ -62,7 +52,7 @@ class TestCase(testcase.D8TestCase):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
- return []
+ return ['--experimental-wasm-reftypes']
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index ee7a50fdd9..8390796a1a 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-4db01ba8549a087ae9adaa8540cec2689c7dad64 \ No newline at end of file
+38fd550b9d30afab338b1902dbb78ce86500ad0f \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 38ac495bea..17e2d00c59 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -6,16 +6,14 @@
[ALWAYS, {
'skip-stack-guard-page': [PASS, ['((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
# TODO(v8:10994): Failing spec test after update.
- 'proposals/simd/imports': [FAIL],
- 'proposals/simd/data': [FAIL],
'proposals/js-types/data': [FAIL],
- # TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
- # the bulk-memory proposal. Since we've enabled bulk-memory by default, we
- # need to update to use its testsuite.
- 'linking': [FAIL],
- 'elem': [FAIL],
- 'data': [FAIL],
+ # Missing rebase in the proposal repository.
+ 'proposals/js-types/table': [FAIL],
+ 'proposals/js-types/unreached-invalid': [FAIL],
+ 'proposals/memory64/linking': [FAIL],
+ 'proposals/memory64/table': [FAIL],
+ 'proposals/memory64/unreached-invalid': [FAIL],
# TODO(wasm): Roll newest tests into "js-types" repository.
'proposals/js-types/elem': [FAIL],
@@ -44,9 +42,7 @@
'proposals/memory64/elem': [FAIL],
'proposals/memory64/float_memory64': [FAIL],
'proposals/memory64/imports': [FAIL],
- 'proposals/memory64/load64': [FAIL],
'proposals/memory64/memory64': [FAIL],
- 'proposals/memory64/memory_grow64': [FAIL],
'proposals/memory64/memory_trap64': [FAIL],
}], # ALWAYS